forked from TrueCloudLab/frostfs-s3-gw
Compare commits
160 commits
master
...
poc/frostf
Author | SHA1 | Date | |
---|---|---|---|
3d8fd96094 | |||
9d19fb487e | |||
f8c6b89204 | |||
27e837f551 | |||
e28c445a50 | |||
4bb0872191 | |||
8242c328a1 | |||
93fc1e7801 | |||
c20194cc9d | |||
80237822d1 | |||
c4af1dc4ad | |||
b8c93ed391 | |||
51e591877b | |||
a4c612614a | |||
12cf29aed2 | |||
16840f1256 | |||
066b9a0250 | |||
54e1c333a1 | |||
69227b4845 | |||
c66c09765d | |||
9120e97ac5 | |||
2fc328a6d2 | |||
b5fce5c8d2 | |||
41a128b1aa | |||
6617adc22b | |||
631d9d83b6 | |||
adec93af54 | |||
8898c2ec08 | |||
8efcc957ea | |||
6b728fef87 | |||
6b1f365e65 | |||
fcf1c45ad2 | |||
6a9d3261a7 | |||
012ece40bb | |||
c750c87a61 | |||
94a42fa273 | |||
40d7f844e3 | |||
52b89d3497 | |||
6b109eee92 | |||
18878b66d3 | |||
46eae4a356 | |||
fe897ec588 | |||
52931663e1 | |||
1a09041cd1 | |||
631b7f67b4 | |||
8ca2998297 | |||
bcf5a85aab | |||
ad81b599dd | |||
361d10cc78 | |||
62e6b49254 | |||
80c4982bd4 | |||
73ed3f7782 | |||
6e3595e35b | |||
57add29643 | |||
b59aa06637 | |||
d62aa7b979 | |||
751a9be7cc | |||
e58ea40463 | |||
14ef9ff091 | |||
fc90981c03 | |||
83cdfbee78 | |||
37f2f468fe | |||
e30a452cdb | |||
7be70243f7 | |||
7f708b3a2d | |||
d531b13866 | |||
f921bc8af5 | |||
be03c5178f | |||
499f4c6495 | |||
2cbe3b9a27 | |||
c588d485fa | |||
3927223bb0 | |||
eba85b50b6 | |||
043447600e | |||
0cd353707a | |||
f74ab12f91 | |||
dea7b39805 | |||
9df8695463 | |||
614d703726 | |||
23593eee3d | |||
dfc4476afd | |||
84358f6742 | |||
7a380fa46c | |||
0590f84d68 | |||
751d66bde0 | |||
462589fc0c | |||
8fcaf76f41 | |||
19c89b38e6 | |||
0bcda6ea37 | |||
9dabaf6ecd | |||
840d457cb9 | |||
c2fae4c199 | |||
9e0c39dcc2 | |||
e4b1d07185 | |||
3b5fcc3c8a | |||
c75add64ec | |||
acb6e8cbca | |||
4e1fd9589b | |||
bd898ad59e | |||
43bee561cf | |||
4a6e3a19ce | |||
b445f7bbf9 | |||
868edfdb31 | |||
34bbbcf1ed | |||
fae03c2b50 | |||
81e860481d | |||
e97fea30ea | |||
cfc94e39ef | |||
ce9294685c | |||
a0f0d792b8 | |||
43e336e155 | |||
9f186d9aba | |||
11f30a037b | |||
136a186c14 | |||
24390fdec8 | |||
1fdbfb0dab | |||
1406f57bba | |||
3fbad97ae4 | |||
fa5889a0f5 | |||
e24bc3f2ce | |||
bf47978cfe | |||
|
b366e75366 | ||
e487ee5b7d | |||
69d8779daf | |||
9da77667f3 | |||
f200dd310e | |||
ee8cce662b | |||
ca8791a5fd | |||
2ab6f004f1 | |||
3da2d40fa8 | |||
1cbc7f323f | |||
c154f934e4 | |||
644524e8a5 | |||
ee6118c3d7 | |||
9e72fe1662 | |||
70ec5a0a5b | |||
4a8c382491 | |||
a5c89b78bc | |||
fb99b26209 | |||
01afa1cae4 | |||
6c68e21777 | |||
a025f2e9c5 | |||
bd3164c57f | |||
fb90c0f52c | |||
f2f90e260e | |||
a0937126cb | |||
655889a1a2 | |||
ef556bd8ac | |||
5104683f68 | |||
8151753eeb | |||
2282c32822 | |||
43685e03d9 | |||
cf18158da4 | |||
5c62010331 | |||
0af06c3bd9 | |||
680c0dbe3d | |||
596381c382 | |||
64e7356acc | |||
32bf915502 | |||
813aa2f173 |
189 changed files with 15279 additions and 5586 deletions
|
@ -1,7 +1,7 @@
|
|||
FROM golang:1.19 as builder
|
||||
FROM golang:1.21 as builder
|
||||
|
||||
ARG BUILD=now
|
||||
ARG REPO=github.com/TrueCloudLab/frostfs-s3-gw
|
||||
ARG REPO=git.frostfs.info/TrueCloudLab/frostfs-s3-gw
|
||||
ARG VERSION=dev
|
||||
|
||||
WORKDIR /src
|
||||
|
|
20
.forgejo/workflows/builds.yml
Normal file
20
.forgejo/workflows/builds.yml
Normal file
|
@ -0,0 +1,20 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
builds:
|
||||
name: Builds
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.20', '1.21' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Build binary
|
||||
run: make
|
20
.forgejo/workflows/dco.yml
Normal file
20
.forgejo/workflows/dco.yml
Normal file
|
@ -0,0 +1,20 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
dco:
|
||||
name: DCO
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.21'
|
||||
|
||||
- name: Run commit format checker
|
||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v1
|
||||
with:
|
||||
from: 3fbad97a
|
41
.forgejo/workflows/tests.yml
Normal file
41
.forgejo/workflows/tests.yml
Normal file
|
@ -0,0 +1,41 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.21'
|
||||
cache: true
|
||||
|
||||
- name: Install linters
|
||||
run: make lint-install
|
||||
|
||||
- name: Run linters
|
||||
run: make lint
|
||||
|
||||
tests:
|
||||
name: Tests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.20', '1.21' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
21
.forgejo/workflows/vulncheck.yml
Normal file
21
.forgejo/workflows/vulncheck.yml
Normal file
|
@ -0,0 +1,21 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
vulncheck:
|
||||
name: Vulncheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.21'
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
- name: Run govulncheck
|
||||
run: govulncheck ./...
|
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
|
@ -1 +1 @@
|
|||
* @alexvanin @KirillovDenis
|
||||
* @alexvanin @dkirillov
|
||||
|
|
45
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
45
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: community, triage, bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
<!--- If you're describing a bug, tell us what should happen -->
|
||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
||||
|
||||
## Current Behavior
|
||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
||||
|
||||
## Possible Solution
|
||||
<!--- Not obligatory -->
|
||||
<!--- If no reason/fix/additions for the bug can be suggested, -->
|
||||
<!--- uncomment the following phrase: -->
|
||||
|
||||
<!--- No fix can be suggested by a QA engineer. Further solutions shall be up to developers. -->
|
||||
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. -->
|
||||
|
||||
1.
|
||||
|
||||
## Context
|
||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
||||
|
||||
## Regression
|
||||
<!-- Is this issue a regression? (Yes / No) -->
|
||||
<!-- If Yes, optionally please include version or commit id or PR# that caused this regression, if you have these details. -->
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version used:
|
||||
* Server setup and configuration:
|
||||
* Operating System and version (`uname -a`):
|
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
blank_issues_enabled: false
|
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: community, triage
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Is your feature request related to a problem? Please describe.
|
||||
<!--- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
|
||||
|
||||
## Describe the solution you'd like
|
||||
<!--- A clear and concise description of what you want to happen. -->
|
||||
|
||||
## Describe alternatives you've considered
|
||||
<!--- A clear and concise description of any alternative solutions or features you've considered. -->
|
||||
|
||||
## Additional context
|
||||
<!--- Add any other context or screenshots about the feature request here. -->
|
73
.github/workflows/builds.yml
vendored
73
.github/workflows/builds.yml
vendored
|
@ -1,73 +0,0 @@
|
|||
name: Builds
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- 'support/*'
|
||||
types: [ opened, synchronize ]
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
|
||||
jobs:
|
||||
build_cli:
|
||||
name: Build CLI
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Get tree-service client
|
||||
run: make sync-tree
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Build CLI
|
||||
run: make
|
||||
|
||||
- name: Check version
|
||||
run: if [[ $(make version) == *"dirty"* ]]; then exit 1; fi
|
||||
|
||||
build_image:
|
||||
needs: build_cli
|
||||
name: Build Docker image
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Get tree-service client
|
||||
run: make sync-tree
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Build Docker image
|
||||
run: make image
|
67
.github/workflows/codeql-analysis.yml
vendored
67
.github/workflows/codeql-analysis.yml
vendored
|
@ -1,67 +0,0 @@
|
|||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, 'support/*' ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master, 'support/*' ]
|
||||
schedule:
|
||||
- cron: '35 8 * * 1'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'go' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||
# Learn more:
|
||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
22
.github/workflows/dco.yml
vendored
22
.github/workflows/dco.yml
vendored
|
@ -1,22 +0,0 @@
|
|||
name: DCO check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- 'support/*'
|
||||
|
||||
jobs:
|
||||
commits_check_job:
|
||||
runs-on: ubuntu-latest
|
||||
name: Commits Check
|
||||
steps:
|
||||
- name: Get PR Commits
|
||||
id: 'get-pr-commits'
|
||||
uses: tim-actions/get-pr-commits@master
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: DCO Check
|
||||
uses: tim-actions/dco@master
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
96
.github/workflows/tests.yml
vendored
96
.github/workflows/tests.yml
vendored
|
@ -1,96 +0,0 @@
|
|||
name: Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- 'support/*'
|
||||
types: [opened, synchronize]
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Get tree-service client
|
||||
run: make sync-tree
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: latest
|
||||
|
||||
cover:
|
||||
name: Coverage
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
env:
|
||||
CGO_ENABLED: 1
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Get tree-service client
|
||||
run: make sync-tree
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Test and write coverage profile
|
||||
run: make cover
|
||||
|
||||
- name: Upload coverage results to Codecov
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
fail_ci_if_error: false
|
||||
path_to_write_report: ./coverage.txt
|
||||
verbose: true
|
||||
|
||||
tests:
|
||||
name: Tests
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.18.x', '1.19.x' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Get tree-service client
|
||||
run: make sync-tree
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -2,9 +2,6 @@
|
|||
.idea
|
||||
.vscode
|
||||
|
||||
# Tree service
|
||||
internal/frostfs/services/tree/
|
||||
|
||||
# Vendoring
|
||||
vendor
|
||||
|
||||
|
@ -28,4 +25,3 @@ debian/files
|
|||
debian/*.log
|
||||
debian/*.substvars
|
||||
debian/frostfs-s3-gw/
|
||||
|
||||
|
|
11
.gitlint
Normal file
11
.gitlint
Normal file
|
@ -0,0 +1,11 @@
|
|||
[general]
|
||||
fail-without-commits=True
|
||||
regex-style-search=True
|
||||
contrib=CC1
|
||||
|
||||
[title-match-regex]
|
||||
regex=^\[\#[0-9Xx]+\]\s
|
||||
|
||||
[ignore-by-title]
|
||||
regex=^Release(.*)
|
||||
ignore=title-match-regex
|
|
@ -4,7 +4,7 @@
|
|||
# options for analysis running
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 5m
|
||||
timeout: 15m
|
||||
|
||||
# include test files or not, default is true
|
||||
tests: true
|
||||
|
@ -24,6 +24,16 @@ linters-settings:
|
|||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: false
|
||||
custom:
|
||||
truecloudlab-linters:
|
||||
path: bin/external_linters.so
|
||||
original-url: git.frostfs.info/TrueCloudLab/linters.git
|
||||
settings:
|
||||
noliteral:
|
||||
enable: true
|
||||
target-methods: ["Fatal"]
|
||||
disable-packages: ["codes", "tc"]
|
||||
constants-package: "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
|
||||
linters:
|
||||
enable:
|
||||
|
@ -45,6 +55,7 @@ linters:
|
|||
- gofmt
|
||||
- whitespace
|
||||
- goimports
|
||||
- truecloudlab-linters
|
||||
disable-all: true
|
||||
fast: false
|
||||
|
||||
|
|
52
.pre-commit-config.yaml
Normal file
52
.pre-commit-config.yaml
Normal file
|
@ -0,0 +1,52 @@
|
|||
ci:
|
||||
autofix_prs: false
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/jorisroovers/gitlint
|
||||
rev: v0.19.1
|
||||
hooks:
|
||||
- id: gitlint
|
||||
stages: [commit-msg]
|
||||
- id: gitlint-ci
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-case-conflict
|
||||
- id: check-executables-have-shebangs
|
||||
- id: check-shebang-scripts-are-executable
|
||||
- id: check-merge-conflict
|
||||
- id: check-json
|
||||
- id: check-xml
|
||||
- id: check-yaml
|
||||
- id: trailing-whitespace
|
||||
args: [--markdown-linebreak-ext=md]
|
||||
- id: end-of-file-fixer
|
||||
exclude: ".key$"
|
||||
|
||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||
rev: v0.9.0.2
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: make-lint-install
|
||||
name: install linters
|
||||
entry: make lint-install
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: make-lint
|
||||
name: run linters
|
||||
entry: make lint
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: go-unit-tests
|
||||
name: go unit tests
|
||||
entry: make test
|
||||
pass_filenames: false
|
||||
types: [go]
|
||||
language: system
|
489
CHANGELOG.md
489
CHANGELOG.md
|
@ -5,424 +5,91 @@ This document outlines major changes between releases.
|
|||
## [Unreleased]
|
||||
|
||||
### Fixed
|
||||
- Get empty bucket CORS from frostfs (TrueCloudLab#36)
|
||||
- Handle negative `Content-Length` on put (#125)
|
||||
- Use `DisableURIPathEscaping` to presign urls (#125)
|
||||
- Use specific s3 errors instead of `InternalError` where possible (#143)
|
||||
- `grpc` schemas in tree configuration (#166)
|
||||
- Return appropriate 404 code when object missed in storage but there is in gate cache (#158)
|
||||
- Replace part on re-upload when use multipart upload (#176)
|
||||
- Fix goroutine leak on put object error (#178)
|
||||
- Fix parsing signed headers in presigned urls (#182)
|
||||
- Fix url escaping (#188)
|
||||
- Use correct keys in `list-multipart-uploads` response (#185)
|
||||
|
||||
### Added
|
||||
- Return container name in `head-bucket` response (TrueCloudLab#18)
|
||||
- Billing metrics (TrueCloudLab#5)
|
||||
- Multiple configs support (TrueCloudLab#21)
|
||||
- Bucket name resolving policy (TrueCloudLab#25)
|
||||
- Add `trace_id` value into log record when tracing is enabled (#142)
|
||||
- Add basic error types and exit codes to `frostfs-s3-authmate` (#152)
|
||||
- Add a metric with addresses of nodes of the same and highest priority that are currently healthy (#51)
|
||||
- Support dump metrics descriptions (#80)
|
||||
- Add `copies_numbers` section to `placement_policy` in config file and support vectors of copies numbers (#70, #101)
|
||||
- Support impersonate bearer token (#81, #105)
|
||||
- Reload default and custom copies numbers on SIGHUP (#104)
|
||||
- Tracing support (#84, #140)
|
||||
- Return bearer token in `s3-authmate obtain-secret` result (#132)
|
||||
- Support multiple version credentials using GSet (#135)
|
||||
- Implement chunk uploading (#106)
|
||||
- Add new `kludge.bypass_content_encoding_check_in_chunks` config param (#146)
|
||||
- Add new `frostfs.client_cut` config param (#192)
|
||||
|
||||
### Changed
|
||||
- Update prometheus to v1.15.0 (#94)
|
||||
- Update go version to go1.19 (#118)
|
||||
- Remove object from tree and reset its cache on object deletion when it is already removed from storage (#78)
|
||||
- Finish rebranding (#2)
|
||||
- Timeout errors has code 504 now (#103)
|
||||
- Use request scope logger (#111)
|
||||
- Add `s3-authmate update-secret` command (#131)
|
||||
- Use default registerer for app metrics (#155)
|
||||
- Use chi router instead of archived gorlilla/mux (#149)
|
||||
- Complete multipart upload doesn't unnecessary copy now. Thus, the total time of multipart upload was reduced by 2 times (#63)
|
||||
- Use gate key to form object owner (#175)
|
||||
- Apply placement policies and copies if there is at least one valid value (#168)
|
||||
|
||||
### Removed
|
||||
- Drop `tree.service` param (now endpoints from `peers` section are used) (#133)
|
||||
|
||||
## [0.27.0] - Karpinsky - 2023-07-12
|
||||
|
||||
This is a first FrostFS S3 Gateway release named after
|
||||
[Karpinsky glacier](https://en.wikipedia.org/wiki/Karpinsky_Glacier).
|
||||
|
||||
### Fixed
|
||||
- Using multiple servers require only one healthy (#12)
|
||||
- Renew token before it expires (#20)
|
||||
- Add generated deb builder files to .gitignore, and fix typo (#28)
|
||||
- Get empty bucket CORS from frostfs (#36)
|
||||
- Don't count pool error on client abort (#35)
|
||||
- Handle request cancelling (#69)
|
||||
- Clean up List and Name caches when object is missing in Tree service (#57)
|
||||
- Don't create unnecessary delete-markers (#83)
|
||||
- `Too many pings` error (#145)
|
||||
|
||||
### Added
|
||||
- Billing metrics (#5, #26, #29)
|
||||
- Return container name in `head-bucket` response (#18)
|
||||
- Multiple configs support (#21)
|
||||
- Bucket name resolving policy (#25)
|
||||
- Support string `Action` and `Resource` fields in `bucketPolicy.Statement` (#32)
|
||||
- Add new `kludge.use_default_xmlns_for_complete_multipart` config param (#40)
|
||||
- Return `X-Owner-Id` in `head-bucket` response (#79)
|
||||
- Support multiple tree service endpoints (#74, #110, #114)
|
||||
|
||||
### Changed
|
||||
- Repository rebranding (#1)
|
||||
- Update neo-go to v0.101.0 (#14)
|
||||
- Update viper to v1.15.0 (#14)
|
||||
- Using multiple servers require only one healthy (TrueCloudLab#12)
|
||||
- Update go version to go1.18 (TrueCloudLab#16)
|
||||
- Return error on invalid LocationConstraint (TrueCloudLab#23)
|
||||
- Place billing metrics to separate url path (TrueCloudLab#26)
|
||||
- Add generated deb builder files to .gitignore, and fix typo (TrueCloudLab#28)
|
||||
|
||||
## [0.26.0] - 2022-12-28
|
||||
|
||||
### Added
|
||||
- Use client time as `now` in some requests (#726)
|
||||
- Reload policies on SIGHUP (#747)
|
||||
- Authmate flags for pool timeouts (#760)
|
||||
- Multiple server listeners (#742)
|
||||
|
||||
### Changed
|
||||
- Placement policy configuration (#568)
|
||||
- Improved debug logging of CID and OID values (#754)
|
||||
|
||||
### Removed
|
||||
- Deprecated linters (#755)
|
||||
|
||||
### Updating from v0.25.1
|
||||
New config parameters were added. And old one `defaul_policy` were changed.
|
||||
```yaml
|
||||
placement_policy:
|
||||
default: "REP 3"
|
||||
region_mapping: /path/to/container/policies.json
|
||||
```
|
||||
|
||||
Make sure you update the config accordingly:
|
||||
If you configure application using environment variables change:
|
||||
* `S3_GW_DEFAULT_POLICY` -> `S3_GW_PLACEMENT_POLICY_DEFAULT_POLICY`
|
||||
* `S3_GW_LISTEN_ADDRESS` -> `S3_GW_SERVER_0_ADDRESS`
|
||||
* `S3_GW_TLS_CERT_FILE` -> `S3_GW_SERVER_0_TLS_CERT_FILE` (and set `S3_GW_SERVER_0_TLS_ENABLED=true`)
|
||||
* `S3_GW_TLS_KEY_FILE` -> `S3_GW_SERVER_0_TLS_KEY_FILE` (and set `S3_GW_SERVER_0_TLS_ENABLED=true`)
|
||||
|
||||
If you configure application using `.yaml` file change:
|
||||
* `defaul_policy` -> `placement_policy.default`
|
||||
* `listen_address` -> `server.0.address`
|
||||
* `tls.cert_file` -> `server.0.tls.cert_file` (and set `server.0.tls.enabled: true`)
|
||||
* `tls.key_file` -> `server.0.tls.key_file` (and set `server.0.tls.enabled: true`)
|
||||
|
||||
## [0.25.1] - 2022-10-30
|
||||
|
||||
### Fixed
|
||||
- Empty bucket policy (#740)
|
||||
- Big object removal (#749)
|
||||
- Checksum panic (#741)
|
||||
|
||||
### Added
|
||||
- Debian packaging (#737)
|
||||
- Timeout for individual operations in streaming RPC (#750)
|
||||
|
||||
## [0.25.0] - 2022-10-31
|
||||
|
||||
### Fixed
|
||||
- Legal hold object lock enabling (#709)
|
||||
- Errors at object locking (#719)
|
||||
- Unrestricted access to not owned objects via cache (#713)
|
||||
- Check tree service health (#699)
|
||||
- Bucket names in listing (#733)
|
||||
|
||||
### Added
|
||||
- Config reloading on SIGHUP (#702, #715, #716)
|
||||
- Stop pool dial on SIGINT (#712)
|
||||
|
||||
### Changed
|
||||
- GitHub actions update (#710)
|
||||
- Makefile help (#725)
|
||||
- Optimized object tags setting (#669)
|
||||
- Improved logging (#728)
|
||||
- Unified unit test names (#617)
|
||||
- Improved docs (#732)
|
||||
|
||||
### Removed
|
||||
- Unused cache methods (#650)
|
||||
|
||||
### Updating from v0.24.0
|
||||
New config parameters were added. Make sure the default parameters are appropriate for you.
|
||||
|
||||
```yaml
|
||||
cache:
|
||||
accesscontrol:
|
||||
lifetime: 1m
|
||||
size: 100000
|
||||
```
|
||||
|
||||
## [0.24.0] - 2022-09-14
|
||||
|
||||
### Added
|
||||
- Exposure of pool metrics (#615, #680)
|
||||
- Configuration of `set_copies_number` (#634, #637)
|
||||
- Configuration of list of allowed `AccessKeyID` prefixes (#674)
|
||||
- Tagging directive for `CopyObject` (#666, #683)
|
||||
- Customer encryption (#595)
|
||||
- `CopiesNumber` configuration (#634, #637)
|
||||
|
||||
### Changed
|
||||
- Improved wallet configuration via `.yaml` config and environment variables (#607)
|
||||
- Update go version for build to 1.19 (#694, #705)
|
||||
- Update version calculation (#653, #697)
|
||||
- Optimized lock creation (#692)
|
||||
- Update way to configure `listen_domains` (#667)
|
||||
- Use `FilePath` instead of `FileName` for object keys (#657)
|
||||
- Optimize listing (#625, #616)
|
||||
|
||||
### Removed
|
||||
- Drop any object search logic (#545)
|
||||
|
||||
### Fixed
|
||||
- Responses to `GetObject` and `HeadObject`: removed redundant `VersionID` (#577, #682)
|
||||
- Replacement of object tagging in case of overwriting of an object (#645)
|
||||
- Using tags cache with empty `versionId` (#643)
|
||||
- Fix panic on go1.19 (#678)
|
||||
- Fix panic on invalid versioning status (#660)
|
||||
- Fix panic on missing decrypt reader (#704)
|
||||
- Using multipart uploads with `/` in name (#671)
|
||||
- Don't update settings cache when request fails (#661)
|
||||
- Fix handling `X-Amz-Copy-Source` header (#672)
|
||||
- ACL related problems (#676, #606)
|
||||
- Using `ContinuationToken` for "directories" (#684)
|
||||
- Fix `connection was closed` error (#656)
|
||||
- Fix listing for nested objects (#624)
|
||||
- Fix anon requests to tree service (#504, #505)
|
||||
|
||||
### Updating from v0.23.0
|
||||
Make sure your configuration is valid:
|
||||
|
||||
If you configure application using environment variables change:
|
||||
* `S3_GW_WALLET` -> `S3_GW_WALLET_PATH`
|
||||
* `S3_GW_ADDRESS` -> `S3_GW_WALLET_ADDRESS`
|
||||
* `S3_GW_LISTEN_DOMAINS_N` -> `S3_GW_LISTEN_DOMAINS` (use it as array variable)
|
||||
|
||||
If you configure application using `.yaml` file change:
|
||||
* `wallet` -> `wallet.path`
|
||||
* `address` -> `wallet.address`
|
||||
* `listen_domains.n` -> `listen_domains` (use it as array param)
|
||||
|
||||
|
||||
## [0.23.0] - 2022-08-01
|
||||
|
||||
### Fixed
|
||||
- System metadata are filtered now (#619)
|
||||
- List objects in corner cases (#612, #627)
|
||||
- Correct removal of a deleted object (#610)
|
||||
- Bucket creation could lead to "no healthy client" error (#636)
|
||||
|
||||
### Added
|
||||
- New param to configure pool error threshold (#633)
|
||||
|
||||
### Changed
|
||||
- Pprof and prometheus metrics configuration (#591)
|
||||
- Don't set sticky bit in authmate container (#540)
|
||||
- Updated compatibility table (#638)
|
||||
- Rely on string sanitizing from zap (#498)
|
||||
|
||||
### Updating from v0.22.0
|
||||
1. To enable pprof use `pprof.enabled` instead of `pprof` in config.
|
||||
To enable prometheus metrics use `prometheus.enabled` instead of `metrics` in config.
|
||||
If you are using the command line flags you can skip this step.
|
||||
|
||||
## [0.22.0] - 2022-07-25
|
||||
|
||||
Tree service support
|
||||
|
||||
### Fixed
|
||||
- Error logging (#450)
|
||||
- Default bucket location constraint (#463)
|
||||
- Suspended versioning status (#462)
|
||||
- CodeQL warnings (#489, #522, #539)
|
||||
- Bearer token behaviour with non-owned buckets (#459)
|
||||
- ACL issues (#495, #553, #571, #573, #574, #580)
|
||||
- Authmate policy parsing (#558)
|
||||
|
||||
### Added
|
||||
- Public key output in authmate issue-secret command (#482)
|
||||
- Support of conditional headers (#484)
|
||||
- Cache type cast error logging (#465)
|
||||
- `docker/*` target in Makefile (#471)
|
||||
- Pre signed requests (#529)
|
||||
- Tagging and ACL notifications (#361)
|
||||
- AWSv4 signer package to improve compatibility with S3 clients (#528)
|
||||
- Extension mimetype detector (#289)
|
||||
- Default params documentation (#592)
|
||||
- Health metric (#600)
|
||||
- Parallel object listing (#525)
|
||||
- Tree service (see commit links from #609)
|
||||
|
||||
### Changed
|
||||
- Reduce number of network requests (#439, #441)
|
||||
- Renamed authmate to s3-authmate (#518)
|
||||
- Version output (#578)
|
||||
- Improved error messages (#539)
|
||||
|
||||
### Removed
|
||||
- `layer/neofs` package (#438)
|
||||
|
||||
## [0.21.1] - 2022-05-16
|
||||
|
||||
### Changed
|
||||
- Update go version to go1.17 (#427)
|
||||
- Set homomorphic hashing disable attribute in container if required (#435)
|
||||
|
||||
## [0.21.0] - 2022-05-13
|
||||
|
||||
### Added
|
||||
- Support of get-object-attributes (#430)
|
||||
|
||||
### Fixed
|
||||
- Reduced time of bucket creation (#426)
|
||||
- Bucket removal (#428)
|
||||
- Obtainment of ETag value (#431)
|
||||
|
||||
### Changed
|
||||
- Authmate doesn't parse session context anymore, now it accepts application defined
|
||||
flexible structure with container ID in human-readable format (#428)
|
||||
|
||||
## [0.20.0] - 2022-04-29
|
||||
|
||||
### Added
|
||||
- Support of object locking (#195)
|
||||
- Support of basic notifications (#357, #358, #359)
|
||||
|
||||
### Changed
|
||||
- Logger behavior: now it writes to stderr instead of stdout, app name and
|
||||
version are always presented and fixed, all user options except of `level` are
|
||||
dropped (#380)
|
||||
- Improved docs, added config examples (#396, #398)
|
||||
- Updated NeoFS SDK (#365, #409)
|
||||
|
||||
### Fixed
|
||||
- Added check of `SetEACL` tokens before processing of requests (#347)
|
||||
- Authmate: returned lost session tokens when a parameter `--session-token` is
|
||||
omitted (#387)
|
||||
- Error when a bucket hasn't a settings file (#389)
|
||||
- Response to a request to delete not existing object (#392)
|
||||
- Replaced gate key in ACL Grantee by key of bearer token issuer (#395)
|
||||
- Missing attach of bearer token to requests to put system object (#399)
|
||||
- Deletion of system object while CompleteMultipartUpload (#400)
|
||||
- Improved English in docs and comments (#405)
|
||||
- Authmate: reconsidered default bearer token rules (#406)
|
||||
|
||||
## [0.19.0] - 2022-03-16
|
||||
|
||||
### Added
|
||||
- Authmate: support placement policy overriding (#343, #364)
|
||||
- Managing bucket notification configuration (#340)
|
||||
- Unit tests in go1.17 (#265)
|
||||
- NATS settings in application config (#341)
|
||||
- Support `Expires` and `Cache-Control` headers (#312)
|
||||
- Support `%` as delimiter (#313)
|
||||
- Support `null` version deletion (#319)
|
||||
- Bucket name resolving order (#285)
|
||||
- Authmate: added `timeout` flag (#290)
|
||||
- MinIO results in s3 compatibility tables (#304)
|
||||
- Support overriding response headers (#310)
|
||||
|
||||
### Changed
|
||||
- Authmate: check parameters before container creation (#372)
|
||||
- Unify cache invalidation on deletion (#368)
|
||||
- Updated NeoFS SDK to v1.0.0-rc.3 (#297, #333, #346, #376)
|
||||
- Authmate: changed session token rules handling (#329, #336, #338, #352)
|
||||
- Changed status code for some failed requests (#308)
|
||||
- GetBucketLocation returns policy name used at bucket creation (#301)
|
||||
|
||||
### Fixed
|
||||
- Waiting for bucket to be deleted (#366)
|
||||
- Authmate: changed error message for session context building (#348)
|
||||
- Authmate: fixed access key parsing in `obtain-secret` command (#295)
|
||||
- Distinguishing `BucketAlreadyExists` errors (#354)
|
||||
- Incorrect panic if handler not found (#305)
|
||||
- Authmate: use container friendly name as system name (#299, #324)
|
||||
- Use UTC `Last-Modified` timestamps (#331)
|
||||
- Don't return object system metadata (#307)
|
||||
- Handling empty post policy (#306)
|
||||
- Use `X-Amz-Verion-Id` in `CompleteMulipartUpload` (#318)
|
||||
|
||||
### Removed
|
||||
- Drop MinIO related errors (#316)
|
||||
|
||||
## [0.18.0] - 2021-12-16
|
||||
|
||||
### Added
|
||||
- Support for MultipartUpload (#186, #187)
|
||||
- CORS support (#217)
|
||||
- Authmate supports setting of tokens lifetime in a more convenient format (duration) (#258)
|
||||
- Generation of a random key for `--no-sign-request` (#276)
|
||||
|
||||
### Changed
|
||||
- Bucket name resolving mechanism from listing owner's containers to using DNS (#219)
|
||||
|
||||
### Removed
|
||||
- Deprecated golint, replaced by revive (#272)
|
||||
|
||||
## 0.17.0 (24 Sep 2021)
|
||||
With this release we introduce [ceph-based](https://github.com/ceph/s3-tests) S3 compatibility results.
|
||||
|
||||
### Added
|
||||
* Versioning support (#122, #242, #263)
|
||||
* Ceph S3 compatibility results (#150, #249, #266)
|
||||
* Handling `X-Amz-Expected-Bucket-Owner` header (#216)
|
||||
* `X-Container-Id` header for `HeadBucket` response (#220)
|
||||
* Basic ACL support (#49, #213)
|
||||
* Caching (#179, #206, #231, #236, #253)
|
||||
* Metadata directive when copying (#191)
|
||||
* Bucket name checking (189)
|
||||
* Continuation token support (#112, #154, #180)
|
||||
* Mapping `LocationConstraint` to `PlacementPolicy` (#89)
|
||||
* Tagging support (#196)
|
||||
* POST uploading support (#190)
|
||||
* Delete marker support (#248)
|
||||
* Expiration for access box (#255)
|
||||
* AWS CLI credential generating by authmate (#241)
|
||||
|
||||
### Changed
|
||||
* Default placement policy is now configurable (#218)
|
||||
* README is split into different files (#210)
|
||||
* Unified error handling (#89, #149, #184)
|
||||
* Authmate issue-secret response contains container id (#163)
|
||||
* Removed "github.com/nspcc-dev/neofs-node" dependency (#234)
|
||||
* Removed GitHub workflow of image publishing (#243)
|
||||
* Changed license to AGPLv3 (#264)
|
||||
|
||||
### Fixed
|
||||
* ListObjects results are now the same for different users (#230)
|
||||
* Error response for invalid authentication header is now correct (#199)
|
||||
* Saving object metadata (#198)
|
||||
* Range header handling (#194)
|
||||
* Correct status codes (#118, #262)
|
||||
* HeadObject for "directories" (#160)
|
||||
* Fetch-owner parameter support (#159)
|
||||
|
||||
## 0.16.0 (16 Jul 2021)
|
||||
|
||||
With this release we publish S3 gateway source code. It includes various S3
|
||||
compatibility improvements, support of bucket management, unified secp256r1
|
||||
cryptography with NEP-6 wallet support.
|
||||
|
||||
### Fixed
|
||||
* Allowed no-sign request (#65)
|
||||
* Bearer token attached to all requests (#84)
|
||||
* Time format in responses (#133)
|
||||
* Max-keys checked in ListObjects (#135)
|
||||
* Lost metadat in the objects (#131)
|
||||
* Unique bucket name check (#125)
|
||||
|
||||
### Added
|
||||
* Bucket management operations (#47, #72)
|
||||
* Node-specific owner IDs in bearer tokens (#83)
|
||||
* AWS CLI usage section in README (#77)
|
||||
* List object paging (#97)
|
||||
* Lifetime for the tokens in auth-mate (#108)
|
||||
* Support of range in GetObject request (#96)
|
||||
* Support of NEP-6 wallets instead of binary encoded keys (#92)
|
||||
* Support of JSON encoded rules in auth-mate (#71)
|
||||
* Support of delimiters in ListObjects (#98)
|
||||
* Support of object ETag (#93)
|
||||
* Support of time-based conditional CopyObject and GetObject (#94)
|
||||
|
||||
### Changed
|
||||
* Accesskey format: now `0` used as a delimiter between container ID and object
|
||||
ID instead of `_` (#164)
|
||||
* Accessbox is encoded in protobuf format (#48)
|
||||
* Authentication uses secp256r1 instead of ed25519 (#75)
|
||||
* Improved integration with NeoFS SDK and NeoFS API Go (#78, #88)
|
||||
* Optimized object put execution (#155)
|
||||
|
||||
### Removed
|
||||
* GRPC keepalive options (#73)
|
||||
|
||||
## 0.15.0 (10 Jun 2021)
|
||||
|
||||
This release brings S3 gateway to the current state of NeoFS and fixes some
|
||||
bugs, no new significant features introduced (other than moving here already
|
||||
existing authmate component).
|
||||
|
||||
New features:
|
||||
* authmate was moved into this repository and is now built along with the
|
||||
gateway itself (#46)
|
||||
|
||||
Behavior changes:
|
||||
* neofs-s3-gate was renamed to neofs-s3-gw (#50)
|
||||
|
||||
Improvements:
|
||||
* better Makefile (#43, #45, #55)
|
||||
* stricter linters (#45)
|
||||
* removed non-standard errors package from dependencies (#54)
|
||||
* refactoring, reusing new sdk-go component (#60, #62, #63)
|
||||
* updated neofs-api-go for compatibility with current NeoFS node 0.21.0 (#60, #68)
|
||||
* extended README (#67, #76)
|
||||
|
||||
Bugs fixed:
|
||||
* wrong (as per AWS specification) access key ID generated (#64)
|
||||
- Update go version to go1.18 (#16)
|
||||
- Return error on invalid LocationConstraint (#23)
|
||||
- Limit number of objects to delete at one time (#37)
|
||||
- CompleteMultipartUpload handler now sends whitespace characters to keep alive client's connection (#60)
|
||||
- Support new system attributes (#64)
|
||||
- Abstract network communication in TreeClient (#59, #75)
|
||||
- Changed values for `frostfs_s3_gw_state_health` metric (#91)
|
||||
|
||||
## Older versions
|
||||
|
||||
Please refer to [Github
|
||||
releases](https://github.com/nspcc-dev/neofs-s3-gw/releases/) for older
|
||||
releases.
|
||||
This project is a fork of [NeoFS S3 Gateway](https://github.com/nspcc-dev/neofs-s3-gw) from version v0.26.0.
|
||||
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-s3-gw/blob/master/CHANGELOG.md.
|
||||
|
||||
[0.18.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.17.0...v0.18.0
|
||||
[0.19.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.18.0...v0.19.0
|
||||
[0.20.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.19.0...v0.20.0
|
||||
[0.21.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.20.0...v0.21.0
|
||||
[0.21.1]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.21.0...v0.21.1
|
||||
[0.22.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.21.1...v0.22.0
|
||||
[0.23.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.22.0...v0.23.0
|
||||
[0.24.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.23.0...v0.24.0
|
||||
[0.25.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.24.0...v0.25.0
|
||||
[Unreleased]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.25.0...master
|
||||
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/b2148cc3...v0.27.0
|
||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.27.0...master
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
First, thank you for contributing! We love and encourage pull requests from
|
||||
everyone. Please follow the guidelines:
|
||||
|
||||
- Check the open [issues](https://github.com/TrueCloudLab/frostfs-s3-gw/issues) and
|
||||
[pull requests](https://github.com/TrueCloudLab/frostfs-s3-gw/pulls) for existing
|
||||
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/issues) and
|
||||
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pulls) for existing
|
||||
discussions.
|
||||
|
||||
- Open an issue first, to discuss a new feature or enhancement.
|
||||
|
@ -27,20 +27,20 @@ Start by forking the `frostfs-s3-gw` repository, make changes in a branch and th
|
|||
send a pull request. We encourage pull requests to discuss code changes. Here
|
||||
are the steps in details:
|
||||
|
||||
### Set up your GitHub Repository
|
||||
### Set up your git repository
|
||||
Fork [FrostFS S3 Gateway
|
||||
upstream](https://github.com/TrueCloudLab/frostfs-s3-gw/fork) source repository
|
||||
upstream](https://git.frostfs.info/repo/fork/15) source repository
|
||||
to your own personal repository. Copy the URL of your fork (you will need it for
|
||||
the `git clone` command below).
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/TrueCloudLab/frostfs-s3-gw
|
||||
$ git clone https://git.frostfs.info/<username>/frostfs-s3-gw.git
|
||||
```
|
||||
|
||||
### Set up git remote as ``upstream``
|
||||
```sh
|
||||
$ cd frostfs-s3-gw
|
||||
$ git remote add upstream https://github.com/TrueCloudLab/frostfs-s3-gw
|
||||
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw.git
|
||||
$ git fetch upstream
|
||||
$ git merge upstream/master
|
||||
...
|
||||
|
@ -90,8 +90,8 @@ $ git push origin feature/123-something_awesome
|
|||
```
|
||||
|
||||
### Create a Pull Request
|
||||
Pull requests can be created via GitHub. Refer to [this
|
||||
document](https://help.github.com/articles/creating-a-pull-request/) for
|
||||
Pull requests can be created via Forgejo. Refer to [this
|
||||
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
|
||||
detailed steps on how to create a pull request. After a Pull Request gets peer
|
||||
reviewed and approved, it will be merged.
|
||||
|
||||
|
|
56
Makefile
Normal file → Executable file
56
Makefile
Normal file → Executable file
|
@ -3,10 +3,13 @@
|
|||
# Common variables
|
||||
REPO ?= $(shell go list -m)
|
||||
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||
GO_VERSION ?= 1.19
|
||||
LINT_VERSION ?= 1.49.0
|
||||
GO_VERSION ?= 1.20
|
||||
LINT_VERSION ?= 1.54.0
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.2
|
||||
BINDIR = bin
|
||||
|
||||
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
||||
|
||||
# Binaries to build
|
||||
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
|
||||
BINS = $(addprefix $(BINDIR)/, $(CMDS))
|
||||
|
@ -16,7 +19,11 @@ REPO_BASENAME = $(shell basename `go list -m`)
|
|||
HUB_IMAGE ?= "truecloudlab/$(REPO_BASENAME)"
|
||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||
|
||||
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint version clean protoc
|
||||
OUTPUT_LINT_DIR ?= $(shell pwd)/bin
|
||||
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
|
||||
TMP_DIR := .cache
|
||||
|
||||
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean protoc
|
||||
|
||||
# .deb package versioning
|
||||
OS_RELEASE = $(shell lsb_release -cs)
|
||||
|
@ -28,7 +35,7 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
|
|||
# Make all binaries
|
||||
all: $(BINS)
|
||||
|
||||
$(BINS): sync-tree $(BINDIR) dep
|
||||
$(BINS): $(BINDIR) dep
|
||||
@echo "⇒ Build $@"
|
||||
CGO_ENABLED=0 \
|
||||
go build -v -trimpath \
|
||||
|
@ -39,10 +46,6 @@ $(BINDIR):
|
|||
@echo "⇒ Ensure dir: $@"
|
||||
@mkdir -p $@
|
||||
|
||||
# Synchronize tree service
|
||||
sync-tree:
|
||||
@./syncTree.sh
|
||||
|
||||
# Pull go dependencies
|
||||
dep:
|
||||
@printf "⇒ Download requirements: "
|
||||
|
@ -103,9 +106,23 @@ dirty-image:
|
|||
-f .docker/Dockerfile.dirty \
|
||||
-t $(HUB_IMAGE)-dirty:$(HUB_TAG) .
|
||||
|
||||
# Install linters
|
||||
lint-install:
|
||||
@mkdir -p $(TMP_DIR)
|
||||
@rm -rf $(TMP_DIR)/linters
|
||||
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
|
||||
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
|
||||
@rm -rf $(TMP_DIR)/linters
|
||||
@rmdir $(TMP_DIR) 2>/dev/null || true
|
||||
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
|
||||
|
||||
# Run linters
|
||||
lint:
|
||||
@golangci-lint --timeout=5m run
|
||||
@if [ ! -d "$(LINT_DIR)" ]; then \
|
||||
echo "Run make lint-install"; \
|
||||
exit 1; \
|
||||
fi
|
||||
$(LINT_DIR)/golangci-lint --timeout=5m run
|
||||
|
||||
# Run linters in Docker
|
||||
docker/lint:
|
||||
|
@ -115,6 +132,14 @@ docker/lint:
|
|||
--env HOME=/src \
|
||||
golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint'
|
||||
|
||||
# Activate pre-commit hooks
|
||||
pre-commit:
|
||||
pre-commit install -t pre-commit -t commit-msg
|
||||
|
||||
# Deactivate pre-commit hooks
|
||||
unpre-commit:
|
||||
pre-commit uninstall -t pre-commit -t commit-msg
|
||||
|
||||
# Show current version
|
||||
version:
|
||||
@echo $(VERSION)
|
||||
|
@ -126,10 +151,16 @@ clean:
|
|||
|
||||
# Generate code from .proto files
|
||||
protoc:
|
||||
# Install specific version for protobuf lib
|
||||
@GOBIN=$(abspath $(BINDIR)) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen
|
||||
@for f in `find . -type f -name '*.proto' -not -path './vendor/*'`; do \
|
||||
echo "⇒ Processing $$f "; \
|
||||
protoc \
|
||||
--go_out=paths=source_relative:. $$f; \
|
||||
--go_out=paths=source_relative:. \
|
||||
--plugin=protoc-gen-go-frostfs=$(BINDIR)/protogen \
|
||||
--go-frostfs_out=. --go-frostfs_opt=paths=source_relative \
|
||||
--go-grpc_opt=require_unimplemented_servers=false \
|
||||
--go-grpc_out=. --go-grpc_opt=paths=source_relative $$f; \
|
||||
done
|
||||
rm -rf vendor
|
||||
|
||||
|
@ -145,4 +176,9 @@ debpackage:
|
|||
debclean:
|
||||
dh clean
|
||||
|
||||
# Dump metrics (use METRICS_DUMP_OUT variable to override default out file './metrics-dump.json')
|
||||
.PHONY: dump-metrics
|
||||
dump-metrics:
|
||||
@go test ./metrics -run TestDescribeAll --tags=dump_metrics --out=$(abspath $(METRICS_DUMP_OUT))
|
||||
|
||||
include help.mk
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
</p>
|
||||
|
||||
---
|
||||
[![Report](https://goreportcard.com/badge/github.com/TrueCloudLab/frostfs-s3-gw)](https://goreportcard.com/report/github.com/TrueCloudLab/frostfs-s3-gw)
|
||||
![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/TrueCloudLab/frostfs-s3-gw?sort=semver)
|
||||
![License](https://img.shields.io/github/license/TrueCloudLab/frostfs-s3-gw.svg?style=popout)
|
||||
[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-s3-gw)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-s3-gw)
|
||||
![Release](https://img.shields.io/badge/dynamic/json.svg?label=release&url=https://git.frostfs.info/api/v1/repos/TrueCloudLab/frostfs-s3-gw/releases&query=$[0].tag_name&color=orange)
|
||||
![License](https://img.shields.io/badge/license-GPL--3.0-orange.svg)
|
||||
|
||||
# FrostFS S3 Gateway
|
||||
|
||||
|
@ -16,7 +16,7 @@ FrostFS S3 gateway provides API compatible with Amazon S3 cloud storage service.
|
|||
|
||||
## Installation
|
||||
|
||||
```go get -u github.com/TrueCloudLab/frostfs-s3-gw```
|
||||
```go get -u git.frostfs.info/TrueCloudLab/frostfs-s3-gw```
|
||||
|
||||
Or you can call `make` to build it from the cloned repository (the binary will
|
||||
end up in `bin/frostfs-s3-gw` with authmate helper in `bin/frostfs-s3-authmate`).
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v0.26.0
|
||||
v0.27.0
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
|
@ -14,12 +13,13 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
v4 "github.com/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
apiErrors "github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
)
|
||||
|
@ -31,27 +31,15 @@ var authorizationFieldRegexp = regexp.MustCompile(`AWS4-HMAC-SHA256 Credential=(
|
|||
var postPolicyCredentialRegexp = regexp.MustCompile(`(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request`)
|
||||
|
||||
type (
|
||||
// Center is a user authentication interface.
|
||||
Center interface {
|
||||
Authenticate(request *http.Request) (*Box, error)
|
||||
}
|
||||
|
||||
// Box contains access box and additional info.
|
||||
Box struct {
|
||||
AccessBox *accessbox.Box
|
||||
ClientTime time.Time
|
||||
}
|
||||
|
||||
center struct {
|
||||
Center struct {
|
||||
reg *RegexpSubmatcher
|
||||
postReg *RegexpSubmatcher
|
||||
cli tokens.Credentials
|
||||
allowedAccessKeyIDPrefixes []string // empty slice means all access key ids are allowed
|
||||
}
|
||||
|
||||
prs int
|
||||
|
||||
authHeader struct {
|
||||
//nolint:revive
|
||||
AuthHeader struct {
|
||||
AccessKeyID string
|
||||
Service string
|
||||
Region string
|
||||
|
@ -78,22 +66,9 @@ const (
|
|||
ContentTypeHdr = "Content-Type"
|
||||
)
|
||||
|
||||
// ErrNoAuthorizationHeader is returned for unauthenticated requests.
|
||||
var ErrNoAuthorizationHeader = errors.New("no authorization header")
|
||||
|
||||
func (p prs) Read(_ []byte) (n int, err error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (p prs) Seek(_ int64, _ int) (int64, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
var _ io.ReadSeeker = prs(0)
|
||||
|
||||
// New creates an instance of AuthCenter.
|
||||
func New(frostFS tokens.FrostFS, key *keys.PrivateKey, prefixes []string, config *cache.Config) Center {
|
||||
return ¢er{
|
||||
func New(frostFS tokens.FrostFS, key *keys.PrivateKey, prefixes []string, config *cache.Config) *Center {
|
||||
return &Center{
|
||||
cli: tokens.New(frostFS, key, config),
|
||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||
|
@ -101,7 +76,7 @@ func New(frostFS tokens.FrostFS, key *keys.PrivateKey, prefixes []string, config
|
|||
}
|
||||
}
|
||||
|
||||
func (c *center) parseAuthHeader(header string) (*authHeader, error) {
|
||||
func (c *Center) parseAuthHeader(header string) (*AuthHeader, error) {
|
||||
submatches := c.reg.GetSubmatches(header)
|
||||
if len(submatches) != authHeaderPartsNum {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed)
|
||||
|
@ -114,7 +89,7 @@ func (c *center) parseAuthHeader(header string) (*authHeader, error) {
|
|||
|
||||
signedFields := strings.Split(submatches["signed_header_fields"], ";")
|
||||
|
||||
return &authHeader{
|
||||
return &AuthHeader{
|
||||
AccessKeyID: submatches["access_key_id"],
|
||||
Service: submatches["service"],
|
||||
Region: submatches["region"],
|
||||
|
@ -124,7 +99,7 @@ func (c *center) parseAuthHeader(header string) (*authHeader, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (a *authHeader) getAddress() (oid.Address, error) {
|
||||
func (a *AuthHeader) getAddress() (oid.Address, error) {
|
||||
var addr oid.Address
|
||||
if err := addr.DecodeString(strings.ReplaceAll(a.AccessKeyID, "0", "/")); err != nil {
|
||||
return addr, apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID)
|
||||
|
@ -132,10 +107,10 @@ func (a *authHeader) getAddress() (oid.Address, error) {
|
|||
return addr, nil
|
||||
}
|
||||
|
||||
func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
||||
func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
||||
var (
|
||||
err error
|
||||
authHdr *authHeader
|
||||
authHdr *AuthHeader
|
||||
signatureDateTimeStr string
|
||||
needClientTime bool
|
||||
)
|
||||
|
@ -146,12 +121,12 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
|||
if len(creds) != 5 || creds[4] != "aws4_request" {
|
||||
return nil, fmt.Errorf("bad X-Amz-Credential")
|
||||
}
|
||||
authHdr = &authHeader{
|
||||
authHdr = &AuthHeader{
|
||||
AccessKeyID: creds[0],
|
||||
Service: creds[3],
|
||||
Region: creds[2],
|
||||
SignatureV4: queryValues.Get(AmzSignature),
|
||||
SignedFields: queryValues[AmzSignedHeaders],
|
||||
SignedFields: strings.Split(queryValues.Get(AmzSignedHeaders), ";"),
|
||||
Date: creds[1],
|
||||
IsPresigned: true,
|
||||
}
|
||||
|
@ -166,7 +141,7 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
|||
if strings.HasPrefix(r.Header.Get(ContentTypeHdr), "multipart/form-data") {
|
||||
return c.checkFormData(r)
|
||||
}
|
||||
return nil, ErrNoAuthorizationHeader
|
||||
return nil, middleware.ErrNoAuthorizationHeader
|
||||
}
|
||||
authHdr, err = c.parseAuthHeader(authHeaderField[0])
|
||||
if err != nil {
|
||||
|
@ -200,7 +175,14 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
result := &Box{AccessBox: box}
|
||||
result := &middleware.Box{
|
||||
AccessBox: box,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: authHdr.AccessKeyID,
|
||||
Region: authHdr.Region,
|
||||
SignatureV4: authHdr.SignatureV4,
|
||||
},
|
||||
}
|
||||
if needClientTime {
|
||||
result.ClientTime = signatureDateTime
|
||||
}
|
||||
|
@ -208,7 +190,7 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func (c center) checkAccessKeyID(accessKeyID string) error {
|
||||
func (c Center) checkAccessKeyID(accessKeyID string) error {
|
||||
if len(c.allowedAccessKeyIDPrefixes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -222,7 +204,7 @@ func (c center) checkAccessKeyID(accessKeyID string) error {
|
|||
return apiErrors.GetAPIError(apiErrors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
func (c *center) checkFormData(r *http.Request) (*Box, error) {
|
||||
func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
||||
if err := r.ParseMultipartForm(maxFormSizeMemory); err != nil {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidArgument)
|
||||
}
|
||||
|
@ -233,7 +215,7 @@ func (c *center) checkFormData(r *http.Request) (*Box, error) {
|
|||
|
||||
policy := MultipartFormValue(r, "policy")
|
||||
if policy == "" {
|
||||
return nil, ErrNoAuthorizationHeader
|
||||
return nil, middleware.ErrNoAuthorizationHeader
|
||||
}
|
||||
|
||||
submatches := c.postReg.GetSubmatches(MultipartFormValue(r, "x-amz-credential"))
|
||||
|
@ -264,10 +246,10 @@ func (c *center) checkFormData(r *http.Request) (*Box, error) {
|
|||
return nil, apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
|
||||
return &Box{AccessBox: box}, nil
|
||||
return &middleware.Box{AccessBox: box}, nil
|
||||
}
|
||||
|
||||
func cloneRequest(r *http.Request, authHeader *authHeader) *http.Request {
|
||||
func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
||||
otherRequest := r.Clone(context.TODO())
|
||||
otherRequest.Header = make(http.Header)
|
||||
|
||||
|
@ -288,9 +270,10 @@ func cloneRequest(r *http.Request, authHeader *authHeader) *http.Request {
|
|||
return otherRequest
|
||||
}
|
||||
|
||||
func (c *center) checkSign(authHeader *authHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
|
||||
func (c *Center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
|
||||
awsCreds := credentials.NewStaticCredentials(authHeader.AccessKeyID, box.Gate.AccessKey, "")
|
||||
signer := v4.NewSigner(awsCreds)
|
||||
signer.DisableURIPathEscaping = true
|
||||
|
||||
var signature string
|
||||
if authHeader.IsPresigned {
|
||||
|
@ -306,7 +289,6 @@ func (c *center) checkSign(authHeader *authHeader, box *accessbox.Box, request *
|
|||
}
|
||||
signature = request.URL.Query().Get(AmzSignature)
|
||||
} else {
|
||||
signer.DisableURIPathEscaping = true
|
||||
if _, err := signer.Sign(request, nil, authHeader.Service, authHeader.Region, signatureDateTime); err != nil {
|
||||
return fmt.Errorf("failed to sign temporary HTTP request: %w", err)
|
||||
}
|
||||
|
|
|
@ -5,26 +5,26 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAuthHeaderParse(t *testing.T) {
|
||||
defaultHeader := "AWS4-HMAC-SHA256 Credential=oid0cid/20210809/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=2811ccb9e242f41426738fb1f"
|
||||
|
||||
center := ¢er{
|
||||
center := &Center{
|
||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
header string
|
||||
err error
|
||||
expected *authHeader
|
||||
expected *AuthHeader
|
||||
}{
|
||||
{
|
||||
header: defaultHeader,
|
||||
err: nil,
|
||||
expected: &authHeader{
|
||||
expected: &AuthHeader{
|
||||
AccessKeyID: "oid0cid",
|
||||
Service: "s3",
|
||||
Region: "us-east-1",
|
||||
|
@ -54,29 +54,29 @@ func TestAuthHeaderGetAddress(t *testing.T) {
|
|||
defaulErr := errors.GetAPIError(errors.ErrInvalidAccessKeyID)
|
||||
|
||||
for _, tc := range []struct {
|
||||
authHeader *authHeader
|
||||
authHeader *AuthHeader
|
||||
err error
|
||||
}{
|
||||
{
|
||||
authHeader: &authHeader{
|
||||
authHeader: &AuthHeader{
|
||||
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJM0HrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
authHeader: &authHeader{
|
||||
authHeader: &AuthHeader{
|
||||
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJMHrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
|
||||
},
|
||||
err: defaulErr,
|
||||
},
|
||||
{
|
||||
authHeader: &authHeader{
|
||||
authHeader: &AuthHeader{
|
||||
AccessKeyID: "oid0cid",
|
||||
},
|
||||
err: defaulErr,
|
||||
},
|
||||
{
|
||||
authHeader: &authHeader{
|
||||
authHeader: &AuthHeader{
|
||||
AccessKeyID: "oidcid",
|
||||
},
|
||||
err: defaulErr,
|
||||
|
|
47
api/auth/presign.go
Normal file
47
api/auth/presign.go
Normal file
|
@ -0,0 +1,47 @@
|
|||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
type RequestData struct {
|
||||
Method string
|
||||
Endpoint string
|
||||
Bucket string
|
||||
Object string
|
||||
}
|
||||
|
||||
type PresignData struct {
|
||||
Service string
|
||||
Region string
|
||||
Lifetime time.Duration
|
||||
SignTime time.Time
|
||||
}
|
||||
|
||||
// PresignRequest forms pre-signed request to access objects without aws credentials.
|
||||
func PresignRequest(creds *credentials.Credentials, reqData RequestData, presignData PresignData) (*http.Request, error) {
|
||||
urlStr := fmt.Sprintf("%s/%s/%s", reqData.Endpoint, rest.EscapePath(reqData.Bucket, false), rest.EscapePath(reqData.Object, false))
|
||||
req, err := http.NewRequest(strings.ToUpper(reqData.Method), urlStr, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z"))
|
||||
req.Header.Set(ContentTypeHdr, "text/plain")
|
||||
|
||||
signer := v4.NewSigner(creds)
|
||||
signer.DisableURIPathEscaping = true
|
||||
|
||||
if _, err = signer.Presign(req, nil, presignData.Service, presignData.Region, presignData.Lifetime, presignData.SignTime); err != nil {
|
||||
return nil, fmt.Errorf("presign: %w", err)
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
95
api/auth/presign_test.go
Normal file
95
api/auth/presign_test.go
Normal file
|
@ -0,0 +1,95 @@
|
|||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var _ tokens.Credentials = (*credentialsMock)(nil)
|
||||
|
||||
type credentialsMock struct {
|
||||
boxes map[string]*accessbox.Box
|
||||
}
|
||||
|
||||
func newTokensFrostfsMock() *credentialsMock {
|
||||
return &credentialsMock{
|
||||
boxes: make(map[string]*accessbox.Box),
|
||||
}
|
||||
}
|
||||
|
||||
func (m credentialsMock) addBox(addr oid.Address, box *accessbox.Box) {
|
||||
m.boxes[addr.String()] = box
|
||||
}
|
||||
|
||||
func (m credentialsMock) GetBox(_ context.Context, addr oid.Address) (*accessbox.Box, error) {
|
||||
box, ok := m.boxes[addr.String()]
|
||||
if !ok {
|
||||
return nil, &apistatus.ObjectNotFound{}
|
||||
}
|
||||
|
||||
return box, nil
|
||||
}
|
||||
|
||||
func (m credentialsMock) Put(context.Context, cid.ID, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error) {
|
||||
return oid.Address{}, nil
|
||||
}
|
||||
|
||||
func (m credentialsMock) Update(context.Context, oid.Address, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error) {
|
||||
return oid.Address{}, nil
|
||||
}
|
||||
|
||||
func TestCheckSign(t *testing.T) {
|
||||
var accessKeyAddr oid.Address
|
||||
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
|
||||
require.NoError(t, err)
|
||||
|
||||
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
||||
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
|
||||
awsCreds := credentials.NewStaticCredentials(accessKeyID, secretKey, "")
|
||||
|
||||
reqData := RequestData{
|
||||
Method: "GET",
|
||||
Endpoint: "http://localhost:8084",
|
||||
Bucket: "my-bucket",
|
||||
Object: "@obj/name",
|
||||
}
|
||||
presignData := PresignData{
|
||||
Service: "s3",
|
||||
Region: "spb",
|
||||
Lifetime: 10 * time.Minute,
|
||||
SignTime: time.Now().UTC(),
|
||||
}
|
||||
|
||||
req, err := PresignRequest(awsCreds, reqData, presignData)
|
||||
require.NoError(t, err)
|
||||
|
||||
expBox := &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
AccessKey: secretKey,
|
||||
},
|
||||
}
|
||||
|
||||
mock := newTokensFrostfsMock()
|
||||
mock.addBox(accessKeyAddr, expBox)
|
||||
|
||||
c := &Center{
|
||||
cli: mock,
|
||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||
}
|
||||
box, err := c.Authenticate(req)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, expBox, box.AccessBox)
|
||||
}
|
|
@ -790,6 +790,8 @@ const doubleSpace = " "
|
|||
|
||||
// stripExcessSpaces will rewrite the passed in slice's string values to not
|
||||
// contain multiple side-by-side spaces.
|
||||
//
|
||||
//nolint:revive
|
||||
func stripExcessSpaces(vals []string) {
|
||||
var j, k, l, m, spaces int
|
||||
for i, str := range vals {
|
||||
|
|
5
api/cache/access_control.go
vendored
5
api/cache/access_control.go
vendored
|
@ -4,7 +4,8 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/bluele/gcache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -46,7 +47,7 @@ func (o *AccessControlCache) Get(owner user.ID, key string) bool {
|
|||
|
||||
result, ok := entry.(bool)
|
||||
if !ok {
|
||||
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return false
|
||||
}
|
||||
|
|
7
api/cache/accessbox.go
vendored
7
api/cache/accessbox.go
vendored
|
@ -4,8 +4,9 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/bluele/gcache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -57,7 +58,7 @@ func (o *AccessBoxCache) Get(address oid.Address) *accessbox.Box {
|
|||
|
||||
result, ok := entry.(*accessbox.Box)
|
||||
if !ok {
|
||||
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
|
5
api/cache/buckets.go
vendored
5
api/cache/buckets.go
vendored
|
@ -4,7 +4,8 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"github.com/bluele/gcache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -46,7 +47,7 @@ func (o *BucketCache) Get(key string) *data.BucketInfo {
|
|||
|
||||
result, ok := entry.(*data.BucketInfo)
|
||||
if !ok {
|
||||
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
|
8
api/cache/cache_test.go
vendored
8
api/cache/cache_test.go
vendored
|
@ -3,10 +3,10 @@ package cache
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
cidtest "github.com/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||
oidtest "github.com/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest/observer"
|
||||
|
|
5
api/cache/names.go
vendored
5
api/cache/names.go
vendored
|
@ -4,7 +4,8 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/bluele/gcache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -48,7 +49,7 @@ func (o *ObjectsNameCache) Get(key string) *oid.Address {
|
|||
|
||||
result, ok := entry.(oid.Address)
|
||||
if !ok {
|
||||
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
|
7
api/cache/objects.go
vendored
7
api/cache/objects.go
vendored
|
@ -4,8 +4,9 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/bluele/gcache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -47,7 +48,7 @@ func (o *ObjectsCache) GetObject(address oid.Address) *data.ExtendedObjectInfo {
|
|||
|
||||
result, ok := entry.(*data.ExtendedObjectInfo)
|
||||
if !ok {
|
||||
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
|
6
api/cache/objects_test.go
vendored
6
api/cache/objects_test.go
vendored
|
@ -4,9 +4,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
objecttest "github.com/TrueCloudLab/frostfs-sdk-go/object/test"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
|
9
api/cache/objectslist.go
vendored
9
api/cache/objectslist.go
vendored
|
@ -6,8 +6,9 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/bluele/gcache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -75,7 +76,7 @@ func (l *ObjectsListCache) GetVersions(key ObjectsListKey) []*data.NodeVersion {
|
|||
|
||||
result, ok := entry.([]*data.NodeVersion)
|
||||
if !ok {
|
||||
l.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
l.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
@ -94,7 +95,7 @@ func (l *ObjectsListCache) CleanCacheEntriesContainingObject(objectName string,
|
|||
for _, key := range keys {
|
||||
k, ok := key.(ObjectsListKey)
|
||||
if !ok {
|
||||
l.logger.Warn("invalid cache key type", zap.String("actual", fmt.Sprintf("%T", key)),
|
||||
l.logger.Warn(logs.InvalidCacheKeyType, zap.String("actual", fmt.Sprintf("%T", key)),
|
||||
zap.String("expected", fmt.Sprintf("%T", k)))
|
||||
continue
|
||||
}
|
||||
|
|
6
api/cache/objectslist_test.go
vendored
6
api/cache/objectslist_test.go
vendored
|
@ -4,9 +4,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
cidtest "github.com/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||
oidtest "github.com/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
|
11
api/cache/system.go
vendored
11
api/cache/system.go
vendored
|
@ -4,7 +4,8 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"github.com/bluele/gcache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -48,7 +49,7 @@ func (o *SystemCache) GetObject(key string) *data.ObjectInfo {
|
|||
|
||||
result, ok := entry.(*data.ObjectInfo)
|
||||
if !ok {
|
||||
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
@ -79,7 +80,7 @@ func (o *SystemCache) GetCORS(key string) *data.CORSConfiguration {
|
|||
|
||||
result, ok := entry.(*data.CORSConfiguration)
|
||||
if !ok {
|
||||
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
@ -95,7 +96,7 @@ func (o *SystemCache) GetSettings(key string) *data.BucketSettings {
|
|||
|
||||
result, ok := entry.(*data.BucketSettings)
|
||||
if !ok {
|
||||
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
@ -111,7 +112,7 @@ func (o *SystemCache) GetNotificationConfiguration(key string) *data.Notificatio
|
|||
|
||||
result, ok := entry.(*data.NotificationConfiguration)
|
||||
if !ok {
|
||||
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -4,9 +4,9 @@ import (
|
|||
"encoding/xml"
|
||||
"time"
|
||||
|
||||
cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/user"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -40,9 +40,10 @@ type (
|
|||
|
||||
Bucket string
|
||||
Name string
|
||||
Size int64
|
||||
Size uint64
|
||||
ContentType string
|
||||
Created time.Time
|
||||
CreationEpoch uint64
|
||||
HashSum string
|
||||
Owner user.ID
|
||||
Headers map[string]string
|
||||
|
@ -52,7 +53,7 @@ type (
|
|||
NotificationInfo struct {
|
||||
Name string
|
||||
Version string
|
||||
Size int64
|
||||
Size uint64
|
||||
HashSum string
|
||||
}
|
||||
|
||||
|
|
|
@ -4,9 +4,9 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/user"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -18,6 +18,7 @@ type NodeVersion struct {
|
|||
BaseNodeVersion
|
||||
DeleteMarker *DeleteMarkerInfo
|
||||
IsUnversioned bool
|
||||
IsCombined bool
|
||||
}
|
||||
|
||||
func (v NodeVersion) IsDeleteMarker() bool {
|
||||
|
@ -53,7 +54,7 @@ type BaseNodeVersion struct {
|
|||
ParenID uint64
|
||||
OID oid.ID
|
||||
Timestamp uint64
|
||||
Size int64
|
||||
Size uint64
|
||||
ETag string
|
||||
FilePath string
|
||||
}
|
||||
|
@ -74,23 +75,23 @@ type MultipartInfo struct {
|
|||
Owner user.ID
|
||||
Created time.Time
|
||||
Meta map[string]string
|
||||
CopiesNumber uint32
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
// PartInfo is upload information about part.
|
||||
type PartInfo struct {
|
||||
Key string
|
||||
UploadID string
|
||||
Number int
|
||||
OID oid.ID
|
||||
Size int64
|
||||
ETag string
|
||||
Created time.Time
|
||||
Key string `json:"key"`
|
||||
UploadID string `json:"uploadId"`
|
||||
Number int `json:"number"`
|
||||
OID oid.ID `json:"oid"`
|
||||
Size uint64 `json:"size"`
|
||||
ETag string `json:"etag"`
|
||||
Created time.Time `json:"created"`
|
||||
}
|
||||
|
||||
// ToHeaderString form short part representation to use in S3-Completed-Parts header.
|
||||
func (p *PartInfo) ToHeaderString() string {
|
||||
return strconv.Itoa(p.Number) + "-" + strconv.FormatInt(p.Size, 10) + "-" + p.ETag
|
||||
return strconv.Itoa(p.Number) + "-" + strconv.FormatUint(p.Size, 10) + "-" + p.ETag
|
||||
}
|
||||
|
||||
// LockInfo is lock information to create appropriate tree node.
|
||||
|
|
|
@ -3,6 +3,8 @@ package errors
|
|||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
frosterrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
)
|
||||
|
||||
type (
|
||||
|
@ -175,8 +177,10 @@ const (
|
|||
// Add new extended error codes here.
|
||||
ErrInvalidObjectName
|
||||
ErrOperationTimedOut
|
||||
ErrGatewayTimeout
|
||||
ErrOperationMaxedOut
|
||||
ErrInvalidRequest
|
||||
ErrInvalidRequestLargeCopy
|
||||
ErrInvalidStorageClass
|
||||
|
||||
ErrMalformedJSON
|
||||
|
@ -318,7 +322,7 @@ var errorCodes = errorCodeMap{
|
|||
ErrInvalidMaxUploads: {
|
||||
ErrCode: ErrInvalidMaxUploads,
|
||||
Code: "InvalidArgument",
|
||||
Description: "Argument max-uploads must be an integer between 0 and 2147483647",
|
||||
Description: "Argument max-uploads must be an integer from 1 to 1000",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidMaxKeys: {
|
||||
|
@ -594,7 +598,7 @@ var errorCodes = errorCodeMap{
|
|||
ErrAuthorizationHeaderMalformed: {
|
||||
ErrCode: ErrAuthorizationHeaderMalformed,
|
||||
Code: "AuthorizationHeaderMalformed",
|
||||
Description: "The authorization header is malformed; the region is wrong; expecting 'us-east-1'.",
|
||||
Description: "The authorization header that you provided is not valid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMalformedPOSTRequest: {
|
||||
|
@ -1124,6 +1128,12 @@ var errorCodes = errorCodeMap{
|
|||
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrGatewayTimeout: {
|
||||
ErrCode: ErrGatewayTimeout,
|
||||
Code: "GatewayTimeout",
|
||||
Description: "The server is acting as a gateway and cannot get a response in time",
|
||||
HTTPStatusCode: http.StatusGatewayTimeout,
|
||||
},
|
||||
ErrOperationMaxedOut: {
|
||||
ErrCode: ErrOperationMaxedOut,
|
||||
Code: "SlowDown",
|
||||
|
@ -1152,6 +1162,12 @@ var errorCodes = errorCodeMap{
|
|||
Description: "Invalid Request",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRequestLargeCopy: {
|
||||
ErrCode: ErrInvalidRequestLargeCopy,
|
||||
Code: "InvalidRequest",
|
||||
Description: "CopyObject request made on objects larger than 5GB in size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIncorrectContinuationToken: {
|
||||
ErrCode: ErrIncorrectContinuationToken,
|
||||
Code: "InvalidArgument",
|
||||
|
@ -1693,6 +1709,7 @@ var errorCodes = errorCodeMap{
|
|||
|
||||
// IsS3Error checks if the provided error is a specific s3 error.
|
||||
func IsS3Error(err error, code ErrorCode) bool {
|
||||
err = frosterrors.UnwrapErr(err)
|
||||
e, ok := err.(Error)
|
||||
return ok && e.ErrCode == code
|
||||
}
|
||||
|
|
|
@ -14,15 +14,17 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
v2acl "github.com/TrueCloudLab/frostfs-api-go/v2/acl"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/session"
|
||||
v2acl "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -158,8 +160,93 @@ func (s ServiceRecord) ToEACLRecord() *eacl.Record {
|
|||
return serviceRecord
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidStatement = stderrors.New("invalid statement")
|
||||
errInvalidPrincipal = stderrors.New("invalid principal")
|
||||
)
|
||||
|
||||
func (s *statement) UnmarshalJSON(data []byte) error {
|
||||
var statementMap map[string]interface{}
|
||||
if err := json.Unmarshal(data, &statementMap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sidField, ok := statementMap["Sid"]
|
||||
if ok {
|
||||
if s.Sid, ok = sidField.(string); !ok {
|
||||
return errInvalidStatement
|
||||
}
|
||||
}
|
||||
|
||||
effectField, ok := statementMap["Effect"]
|
||||
if ok {
|
||||
if s.Effect, ok = effectField.(string); !ok {
|
||||
return errInvalidStatement
|
||||
}
|
||||
}
|
||||
|
||||
principalField, ok := statementMap["Principal"]
|
||||
if ok {
|
||||
principalMap, ok := principalField.(map[string]interface{})
|
||||
if !ok {
|
||||
return errInvalidPrincipal
|
||||
}
|
||||
|
||||
awsField, ok := principalMap["AWS"]
|
||||
if ok {
|
||||
if s.Principal.AWS, ok = awsField.(string); !ok {
|
||||
return fmt.Errorf("%w: 'AWS' field must be string", errInvalidPrincipal)
|
||||
}
|
||||
}
|
||||
|
||||
canonicalUserField, ok := principalMap["CanonicalUser"]
|
||||
if ok {
|
||||
if s.Principal.CanonicalUser, ok = canonicalUserField.(string); !ok {
|
||||
return errInvalidPrincipal
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
actionField, ok := statementMap["Action"]
|
||||
if ok {
|
||||
switch actionField := actionField.(type) {
|
||||
case []interface{}:
|
||||
s.Action = make([]string, len(actionField))
|
||||
for i, action := range actionField {
|
||||
if s.Action[i], ok = action.(string); !ok {
|
||||
return errInvalidStatement
|
||||
}
|
||||
}
|
||||
case string:
|
||||
s.Action = []string{actionField}
|
||||
default:
|
||||
return errInvalidStatement
|
||||
}
|
||||
}
|
||||
|
||||
resourceField, ok := statementMap["Resource"]
|
||||
if ok {
|
||||
switch resourceField := resourceField.(type) {
|
||||
case []interface{}:
|
||||
s.Resource = make([]string, len(resourceField))
|
||||
for i, action := range resourceField {
|
||||
if s.Resource[i], ok = action.(string); !ok {
|
||||
return errInvalidStatement
|
||||
}
|
||||
}
|
||||
case string:
|
||||
s.Resource = []string{resourceField}
|
||||
default:
|
||||
return errInvalidStatement
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -167,20 +254,20 @@ func (h *handler) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bucketACL, err := h.obj.GetBucketACL(r.Context(), bktInfo)
|
||||
bucketACL, err := h.obj.GetBucketACL(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, h.encodeBucketACL(bktInfo.Name, bucketACL)); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, h.encodeBucketACL(ctx, bktInfo.Name, bucketACL)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) bearerTokenIssuerKey(ctx context.Context) (*keys.PublicKey, error) {
|
||||
box, err := layer.GetBoxData(ctx)
|
||||
box, err := middleware.GetBoxData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -197,7 +284,7 @@ func (h *handler) bearerTokenIssuerKey(ctx context.Context) (*keys.PublicKey, er
|
|||
}
|
||||
|
||||
func (h *handler) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get bearer token issuer key", reqInfo, err)
|
||||
|
@ -281,7 +368,8 @@ func (h *handler) updateBucketACL(r *http.Request, astChild *ast, bktInfo *data.
|
|||
}
|
||||
|
||||
func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -289,7 +377,7 @@ func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bucketACL, err := h.obj.GetBucketACL(r.Context(), bktInfo)
|
||||
bucketACL, err := h.obj.GetBucketACL(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
||||
return
|
||||
|
@ -301,27 +389,28 @@ func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||
}
|
||||
|
||||
objInfo, err := h.obj.GetObjectInfo(r.Context(), prm)
|
||||
objInfo, err := h.obj.GetObjectInfo(ctx, prm)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not object info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, h.encodeObjectACL(bucketACL, reqInfo.BucketName, objInfo.VersionID())); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, h.encodeObjectACL(ctx, bucketACL, reqInfo.BucketName, objInfo.VersionID())); err != nil {
|
||||
h.logAndSendError(w, "failed to encode response", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
versionID := reqInfo.URL.Query().Get(api.QueryVersionID)
|
||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
||||
key, err := h.bearerTokenIssuerKey(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get gate key", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
token, err := getSessionTokenSetEACL(r.Context())
|
||||
token, err := getSessionTokenSetEACL(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get eacl token", reqInfo, err)
|
||||
return
|
||||
|
@ -339,7 +428,7 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
VersionID: versionID,
|
||||
}
|
||||
|
||||
objInfo, err := h.obj.GetObjectInfo(r.Context(), p)
|
||||
objInfo, err := h.obj.GetObjectInfo(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get object info", reqInfo, err)
|
||||
return
|
||||
|
@ -381,15 +470,15 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
||||
}
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -420,14 +509,14 @@ func checkOwner(info *data.BucketInfo, owner string) error {
|
|||
|
||||
// may need to convert owner to appropriate format
|
||||
if info.Owner.String() != owner {
|
||||
return errors.GetAPIError(errors.ErrAccessDenied)
|
||||
return fmt.Errorf("%w: mismatch owner", errors.GetAPIError(errors.ErrAccessDenied))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *handler) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -1326,7 +1415,7 @@ func isWriteOperation(op eacl.Operation) bool {
|
|||
return op == eacl.OperationDelete || op == eacl.OperationPut
|
||||
}
|
||||
|
||||
func (h *handler) encodeObjectACL(bucketACL *layer.BucketACL, bucketName, objectVersion string) *AccessControlPolicy {
|
||||
func (h *handler) encodeObjectACL(ctx context.Context, bucketACL *layer.BucketACL, bucketName, objectVersion string) *AccessControlPolicy {
|
||||
res := &AccessControlPolicy{
|
||||
Owner: Owner{
|
||||
ID: bucketACL.Info.Owner.String(),
|
||||
|
@ -1372,7 +1461,7 @@ func (h *handler) encodeObjectACL(bucketACL *layer.BucketACL, bucketName, object
|
|||
if read {
|
||||
permission = aclFullControl
|
||||
} else {
|
||||
h.log.Warn("some acl not fully mapped")
|
||||
h.reqLogger(ctx).Warn(logs.SomeACLNotFullyMapped)
|
||||
}
|
||||
|
||||
var grantee *Grantee
|
||||
|
@ -1394,8 +1483,8 @@ func (h *handler) encodeObjectACL(bucketACL *layer.BucketACL, bucketName, object
|
|||
return res
|
||||
}
|
||||
|
||||
func (h *handler) encodeBucketACL(bucketName string, bucketACL *layer.BucketACL) *AccessControlPolicy {
|
||||
return h.encodeObjectACL(bucketACL, bucketName, "")
|
||||
func (h *handler) encodeBucketACL(ctx context.Context, bucketName string, bucketACL *layer.BucketACL) *AccessControlPolicy {
|
||||
return h.encodeObjectACL(ctx, bucketACL, bucketName, "")
|
||||
}
|
||||
|
||||
func contains(list []eacl.Operation, op eacl.Operation) bool {
|
||||
|
|
|
@ -2,7 +2,6 @@ package handler
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
|
@ -11,16 +10,19 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -1352,6 +1354,85 @@ func TestBucketPolicy(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBucketPolicyUnmarshal(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
policy string
|
||||
}{
|
||||
{
|
||||
name: "action/resource array",
|
||||
policy: `
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Principal": {
|
||||
"AWS": "arn:aws:iam::111122223333:role/JohnDoe"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:GetObject",
|
||||
"s3:GetObjectVersion"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::DOC-EXAMPLE-BUCKET/*",
|
||||
"arn:aws:s3:::DOC-EXAMPLE-BUCKET2/*"
|
||||
]
|
||||
}]
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "action/resource string",
|
||||
policy: `
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Principal": {
|
||||
"AWS": "arn:aws:iam::111122223333:role/JohnDoe"
|
||||
},
|
||||
"Effect": "Deny",
|
||||
"Action": "s3:GetObject",
|
||||
"Resource": "arn:aws:s3:::DOC-EXAMPLE-BUCKET/*"
|
||||
}]
|
||||
}
|
||||
`,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
bktPolicy := &bucketPolicy{}
|
||||
err := json.Unmarshal([]byte(tc.policy), bktPolicy)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutBucketPolicy(t *testing.T) {
|
||||
bktPolicy := `
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Principal": {
|
||||
"AWS": "*"
|
||||
},
|
||||
"Effect": "Deny",
|
||||
"Action": "s3:GetObject",
|
||||
"Resource": "arn:aws:s3:::bucket-for-policy/*"
|
||||
}]
|
||||
}
|
||||
`
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName := "bucket-for-policy"
|
||||
|
||||
box, _ := createAccessBox(t)
|
||||
createBucket(t, hc, bktName, box)
|
||||
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, "", bytes.NewReader([]byte(bktPolicy)))
|
||||
ctx := middleware.SetBoxData(r.Context(), box)
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().PutBucketPolicyHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func getBucketPolicy(hc *handlerContext, bktName string) *bucketPolicy {
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
hc.Handler().GetBucketPolicyHandler(w, r)
|
||||
|
@ -1368,7 +1449,7 @@ func putBucketPolicy(hc *handlerContext, bktName string, bktPolicy *bucketPolicy
|
|||
require.NoError(hc.t, err)
|
||||
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, "", bytes.NewReader(body))
|
||||
ctx := context.WithValue(r.Context(), api.BoxData, box)
|
||||
ctx := middleware.SetBoxData(r.Context(), box)
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().PutBucketPolicyHandler(w, r)
|
||||
assertStatus(hc.t, w, status)
|
||||
|
@ -1401,9 +1482,14 @@ func createAccessBox(t *testing.T) (*accessbox.Box, *keys.PrivateKey) {
|
|||
|
||||
tok := new(session.Container)
|
||||
tok.ForVerb(session.VerbContainerSetEACL)
|
||||
err = tok.Sign(key.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
tok2 := new(session.Container)
|
||||
tok2.ForVerb(session.VerbContainerPut)
|
||||
err = tok2.Sign(key.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
box := &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
SessionTokens: []*session.Container{tok, tok2},
|
||||
|
@ -1414,24 +1500,34 @@ func createAccessBox(t *testing.T) (*accessbox.Box, *keys.PrivateKey) {
|
|||
return box, key
|
||||
}
|
||||
|
||||
func createBucket(t *testing.T, tc *handlerContext, bktName string, box *accessbox.Box) *data.BucketInfo {
|
||||
w, r := prepareTestRequest(tc, bktName, "", nil)
|
||||
ctx := context.WithValue(r.Context(), api.BoxData, box)
|
||||
r = r.WithContext(ctx)
|
||||
tc.Handler().CreateBucketHandler(w, r)
|
||||
func createBucket(t *testing.T, hc *handlerContext, bktName string, box *accessbox.Box) *data.BucketInfo {
|
||||
w := createBucketBase(hc, bktName, box)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
bktInfo, err := tc.Layer().GetBucketInfo(tc.Context(), bktName)
|
||||
bktInfo, err := hc.Layer().GetBucketInfo(hc.Context(), bktName)
|
||||
require.NoError(t, err)
|
||||
return bktInfo
|
||||
}
|
||||
|
||||
func createBucketAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, code s3errors.ErrorCode) {
|
||||
w := createBucketBase(hc, bktName, box)
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(code))
|
||||
}
|
||||
|
||||
func createBucketBase(hc *handlerContext, bktName string, box *accessbox.Box) *httptest.ResponseRecorder {
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
ctx := middleware.SetBoxData(r.Context(), box)
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().CreateBucketHandler(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
func putBucketACL(t *testing.T, tc *handlerContext, bktName string, box *accessbox.Box, header map[string]string) {
|
||||
w, r := prepareTestRequest(tc, bktName, "", nil)
|
||||
for key, val := range header {
|
||||
r.Header.Set(key, val)
|
||||
}
|
||||
ctx := context.WithValue(r.Context(), api.BoxData, box)
|
||||
ctx := middleware.SetBoxData(r.Context(), box)
|
||||
r = r.WithContext(ctx)
|
||||
tc.Handler().PutBucketACLHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
|
|
@ -1,12 +1,18 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -15,7 +21,7 @@ type (
|
|||
log *zap.Logger
|
||||
obj layer.Client
|
||||
notificator Notificator
|
||||
cfg *Config
|
||||
cfg Config
|
||||
}
|
||||
|
||||
Notificator interface {
|
||||
|
@ -24,32 +30,25 @@ type (
|
|||
}
|
||||
|
||||
// Config contains data which handler needs to keep.
|
||||
Config struct {
|
||||
Policy PlacementPolicy
|
||||
DefaultMaxAge int
|
||||
NotificatorEnabled bool
|
||||
CopiesNumber uint32
|
||||
ResolveZoneList []string
|
||||
IsResolveListAllow bool // True if ResolveZoneList contains allowed zones
|
||||
Config interface {
|
||||
DefaultPlacementPolicy() netmap.PlacementPolicy
|
||||
PlacementPolicy(string) (netmap.PlacementPolicy, bool)
|
||||
CopiesNumbers(string) ([]uint32, bool)
|
||||
DefaultCopiesNumbers() []uint32
|
||||
NewCompleteMultipartDecoder(io.Reader) *xml.Decoder
|
||||
DefaultMaxAge() int
|
||||
NotificatorEnabled() bool
|
||||
ResolveZoneList() []string
|
||||
IsResolveListAllow() bool
|
||||
CompleteMultipartKeepalive() time.Duration
|
||||
BypassContentEncodingInChunks() bool
|
||||
}
|
||||
|
||||
PlacementPolicy interface {
|
||||
Default() netmap.PlacementPolicy
|
||||
Get(string) (netmap.PlacementPolicy, bool)
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultPolicy is a default policy of placing containers in FrostFS if it's not set at the request.
|
||||
DefaultPolicy = "REP 3"
|
||||
// DefaultCopiesNumber is a default number of object copies that is enough to consider put successful if it's not set in config.
|
||||
DefaultCopiesNumber uint32 = 0
|
||||
)
|
||||
|
||||
var _ api.Handler = (*handler)(nil)
|
||||
|
||||
// New creates new api.Handler using given logger and client.
|
||||
func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg *Config) (api.Handler, error) {
|
||||
func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg Config) (api.Handler, error) {
|
||||
switch {
|
||||
case obj == nil:
|
||||
return nil, errors.New("empty FrostFS Object Layer")
|
||||
|
@ -57,8 +56,8 @@ func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg *Config
|
|||
return nil, errors.New("empty logger")
|
||||
}
|
||||
|
||||
if !cfg.NotificatorEnabled {
|
||||
log.Warn("notificator is disabled, s3 won't produce notification events")
|
||||
if !cfg.NotificatorEnabled() {
|
||||
log.Warn(logs.NotificatorIsDisabledS3WontProduceNotificationEvents)
|
||||
} else if notificator == nil {
|
||||
return nil, errors.New("empty notificator")
|
||||
}
|
||||
|
@ -70,3 +69,44 @@ func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg *Config
|
|||
notificator: notificator,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// pickCopiesNumbers chooses the return values following this logic:
|
||||
// 1) array of copies numbers sent in request's header has the highest priority.
|
||||
// 2) array of copies numbers with corresponding location constraint provided in the config file.
|
||||
// 3) default copies number from the config file wrapped into array.
|
||||
func (h *handler) pickCopiesNumbers(metadata map[string]string, locationConstraint string) ([]uint32, error) {
|
||||
copiesNumbersStr, ok := metadata[layer.AttributeFrostfsCopiesNumber]
|
||||
if ok {
|
||||
result, err := parseCopiesNumbers(copiesNumbersStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
copiesNumbers, ok := h.cfg.CopiesNumbers(locationConstraint)
|
||||
if ok {
|
||||
return copiesNumbers, nil
|
||||
}
|
||||
|
||||
return h.cfg.DefaultCopiesNumbers(), nil
|
||||
}
|
||||
|
||||
func parseCopiesNumbers(copiesNumbersStr string) ([]uint32, error) {
|
||||
var result []uint32
|
||||
copiesNumbersSplit := strings.Split(copiesNumbersStr, ",")
|
||||
|
||||
for i := range copiesNumbersSplit {
|
||||
item := strings.ReplaceAll(copiesNumbersSplit[i], " ", "")
|
||||
if len(item) == 0 {
|
||||
continue
|
||||
}
|
||||
copiesNumber, err := strconv.ParseUint(item, 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pasrse copies number: %w", err)
|
||||
}
|
||||
result = append(result, uint32(copiesNumber))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
|
69
api/handler/api_test.go
Normal file
69
api/handler/api_test.go
Normal file
|
@ -0,0 +1,69 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCopiesNumberPicker(t *testing.T) {
|
||||
var locationConstraints = map[string][]uint32{}
|
||||
locationConstraint1 := "one"
|
||||
locationConstraint2 := "two"
|
||||
locationConstraints[locationConstraint1] = []uint32{2, 3, 4}
|
||||
|
||||
config := &configMock{
|
||||
copiesNumbers: locationConstraints,
|
||||
defaultCopiesNumbers: []uint32{1},
|
||||
}
|
||||
h := handler{
|
||||
cfg: config,
|
||||
}
|
||||
|
||||
metadata := map[string]string{}
|
||||
|
||||
t.Run("pick default copies number", func(t *testing.T) {
|
||||
metadata["somekey1"] = "5, 6, 7"
|
||||
expectedCopiesNumbers := []uint32{1}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
||||
t.Run("pick copies number vector according to location constraint", func(t *testing.T) {
|
||||
metadata["somekey2"] = "6, 7, 8"
|
||||
expectedCopiesNumbers := []uint32{2, 3, 4}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
||||
t.Run("pick copies number from metadata", func(t *testing.T) {
|
||||
metadata["frostfs-copies-number"] = "7, 8, 9"
|
||||
expectedCopiesNumbers := []uint32{7, 8, 9}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
||||
t.Run("pick copies number from metadata with no space", func(t *testing.T) {
|
||||
metadata["frostfs-copies-number"] = "7,8,9"
|
||||
expectedCopiesNumbers := []uint32{7, 8, 9}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
||||
t.Run("pick copies number from metadata with trailing comma", func(t *testing.T) {
|
||||
metadata["frostfs-copies-number"] = "11, 12, 13, "
|
||||
expectedCopiesNumbers := []uint32{11, 12, 13}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
}
|
|
@ -6,10 +6,11 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -17,7 +18,7 @@ type (
|
|||
GetObjectAttributesResponse struct {
|
||||
ETag string `xml:"ETag,omitempty"`
|
||||
Checksum *Checksum `xml:"Checksum,omitempty"`
|
||||
ObjectSize int64 `xml:"ObjectSize,omitempty"`
|
||||
ObjectSize uint64 `xml:"ObjectSize,omitempty"`
|
||||
StorageClass string `xml:"StorageClass,omitempty"`
|
||||
ObjectParts *ObjectParts `xml:"ObjectParts,omitempty"`
|
||||
}
|
||||
|
@ -67,7 +68,7 @@ var validAttributes = map[string]struct{}{
|
|||
}
|
||||
|
||||
func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
params, err := parseGetObjectAttributeArgs(r)
|
||||
if err != nil {
|
||||
|
@ -123,7 +124,7 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
|
|||
}
|
||||
|
||||
writeAttributesHeaders(w.Header(), extendedInfo, bktSettings.Unversioned())
|
||||
if err = api.EncodeToResponse(w, response); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -17,7 +17,7 @@ func TestGetObjectPartsAttributes(t *testing.T) {
|
|||
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
putObject(t, hc, bktName, objName)
|
||||
putObject(hc, bktName, objName)
|
||||
result := getObjectAttributes(hc, bktName, objName, objectParts)
|
||||
require.Nil(t, result.ObjectParts)
|
||||
|
||||
|
|
|
@ -6,12 +6,14 @@ import (
|
|||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -46,7 +48,8 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
tagSet map[string]string
|
||||
sessionTokenEACL *session.Container
|
||||
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
ctx = r.Context()
|
||||
reqInfo = middleware.GetReqInfo(ctx)
|
||||
|
||||
containsACL = containsACLHeaders(r)
|
||||
)
|
||||
|
@ -84,26 +87,45 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(r.Context(), dstBktInfo)
|
||||
settings, err := h.obj.GetBucketSettings(ctx, dstBktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if containsACL {
|
||||
if sessionTokenEACL, err = getSessionTokenSetEACL(r.Context()); err != nil {
|
||||
if sessionTokenEACL, err = getSessionTokenSetEACL(ctx); err != nil {
|
||||
h.logAndSendError(w, "could not get eacl session token from a box", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), srcObjPrm)
|
||||
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not find object", reqInfo, err)
|
||||
return
|
||||
}
|
||||
srcObjInfo := extendedSrcObjInfo.ObjectInfo
|
||||
|
||||
encryptionParams, err := formEncryptionParams(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if srcSize, err := layer.GetObjectSize(srcObjInfo); err != nil {
|
||||
h.logAndSendError(w, "failed to get source object size", reqInfo, err)
|
||||
return
|
||||
} else if srcSize > layer.UploadMaxSize { //https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
|
||||
h.logAndSendError(w, "too bid object to copy with single copy operation, use multipart upload copy instead", reqInfo, errors.GetAPIError(errors.ErrInvalidRequestLargeCopy))
|
||||
return
|
||||
}
|
||||
|
||||
args, err := parseCopyObjectArgs(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not parse request params", reqInfo, err)
|
||||
|
@ -135,24 +157,13 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
NodeVersion: extendedSrcObjInfo.NodeVersion,
|
||||
}
|
||||
|
||||
_, tagSet, err = h.obj.GetObjectTagging(r.Context(), tagPrm)
|
||||
_, tagSet, err = h.obj.GetObjectTagging(ctx, tagPrm)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get object tagging", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
encryptionParams, err := formEncryptionParams(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if err = checkPreconditions(srcObjInfo, args.Conditional); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed))
|
||||
return
|
||||
|
@ -162,18 +173,14 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
if len(srcObjInfo.ContentType) > 0 {
|
||||
srcObjInfo.Headers[api.ContentType] = srcObjInfo.ContentType
|
||||
}
|
||||
metadata = srcObjInfo.Headers
|
||||
metadata = makeCopyMap(srcObjInfo.Headers)
|
||||
delete(metadata, layer.MultipartObjectSize) // object payload will be real one rather than list of compound parts
|
||||
} else if contentType := r.Header.Get(api.ContentType); len(contentType) > 0 {
|
||||
metadata[api.ContentType] = contentType
|
||||
}
|
||||
|
||||
copiesNumber, err := getCopiesNumberOrDefault(metadata, h.cfg.CopiesNumber)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
params := &layer.CopyObjectParams{
|
||||
SrcVersioned: srcObjPrm.Versioned(),
|
||||
SrcObject: srcObjInfo,
|
||||
ScrBktInfo: srcObjPrm.BktInfo,
|
||||
DstBktInfo: dstBktInfo,
|
||||
|
@ -181,24 +188,29 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
SrcSize: srcObjInfo.Size,
|
||||
Header: metadata,
|
||||
Encryption: encryptionParams,
|
||||
CopiesNuber: copiesNumber,
|
||||
}
|
||||
|
||||
params.Lock, err = formObjectLock(r.Context(), dstBktInfo, settings.LockConfiguration, r.Header)
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, dstBktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
params.Lock, err = formObjectLock(ctx, dstBktInfo, settings.LockConfiguration, r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not form object lock", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
additional := []zap.Field{zap.String("src_bucket_name", srcBucket), zap.String("src_object_name", srcObject)}
|
||||
extendedDstObjInfo, err := h.obj.CopyObject(r.Context(), params)
|
||||
extendedDstObjInfo, err := h.obj.CopyObject(ctx, params)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't copy object", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
dstObjInfo := extendedDstObjInfo.ObjectInfo
|
||||
|
||||
if err = api.EncodeToResponse(w, &CopyObjectResponse{LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339), ETag: dstObjInfo.HashSum}); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, &CopyObjectResponse{LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339), ETag: dstObjInfo.HashSum}); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
@ -216,7 +228,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
SessionToken: sessionTokenEACL,
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketACL(r.Context(), p); err != nil {
|
||||
if err = h.obj.PutBucketACL(ctx, p); err != nil {
|
||||
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -232,16 +244,13 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
TagSet: tagSet,
|
||||
NodeVersion: extendedDstObjInfo.NodeVersion,
|
||||
}
|
||||
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
|
||||
if _, err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
||||
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
h.log.Info("object is copied",
|
||||
zap.String("bucket", dstObjInfo.Bucket),
|
||||
zap.String("object", dstObjInfo.Name),
|
||||
zap.Stringer("object_id", dstObjInfo.ID))
|
||||
h.reqLogger(ctx).Info(logs.ObjectIsCopied, zap.Stringer("object_id", dstObjInfo.ID))
|
||||
|
||||
s := &SendNotificationParams{
|
||||
Event: EventObjectCreatedCopy,
|
||||
|
@ -249,8 +258,8 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
BktInfo: dstBktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
||||
}
|
||||
|
||||
if encryptionParams.Enabled() {
|
||||
|
@ -258,7 +267,15 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
func isCopyingToItselfForbidden(reqInfo *api.ReqInfo, srcBucket string, srcObject string, settings *data.BucketSettings, args *copyObjectArgs) bool {
|
||||
func makeCopyMap(headers map[string]string) map[string]string {
|
||||
res := make(map[string]string, len(headers))
|
||||
for key, val := range headers {
|
||||
res[key] = val
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func isCopyingToItselfForbidden(reqInfo *middleware.ReqInfo, srcBucket string, srcObject string, settings *data.BucketSettings, args *copyObjectArgs) bool {
|
||||
if reqInfo.BucketName != srcBucket || reqInfo.ObjectName != srcObject {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -6,7 +6,8 @@ import (
|
|||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -29,14 +30,14 @@ func TestCopyWithTaggingDirective(t *testing.T) {
|
|||
copyMeta := CopyMeta{
|
||||
Tags: map[string]string{"key2": "val"},
|
||||
}
|
||||
copyObject(t, tc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
|
||||
copyObject(tc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
|
||||
tagging := getObjectTagging(t, tc, bktName, objToCopy, emptyVersion)
|
||||
require.Len(t, tagging.TagSet, 1)
|
||||
require.Equal(t, "key", tagging.TagSet[0].Key)
|
||||
require.Equal(t, "val", tagging.TagSet[0].Value)
|
||||
|
||||
copyMeta.TaggingDirective = replaceDirective
|
||||
copyObject(t, tc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
|
||||
copyObject(tc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
|
||||
tagging = getObjectTagging(t, tc, bktName, objToCopy2, emptyVersion)
|
||||
require.Len(t, tagging.TagSet, 1)
|
||||
require.Equal(t, "key2", tagging.TagSet[0].Key)
|
||||
|
@ -51,20 +52,54 @@ func TestCopyToItself(t *testing.T) {
|
|||
|
||||
copyMeta := CopyMeta{MetadataDirective: replaceDirective}
|
||||
|
||||
copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusBadRequest)
|
||||
copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
||||
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusBadRequest)
|
||||
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
||||
|
||||
putBucketVersioning(t, tc, bktName, true)
|
||||
copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
||||
copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
||||
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
||||
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
||||
|
||||
putBucketVersioning(t, tc, bktName, false)
|
||||
copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
||||
copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
||||
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
||||
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
||||
}
|
||||
|
||||
func copyObject(t *testing.T, tc *handlerContext, bktName, fromObject, toObject string, copyMeta CopyMeta, statusCode int) {
|
||||
w, r := prepareTestRequest(tc, bktName, toObject, nil)
|
||||
func TestCopyMultipart(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-copy", "object-for-copy"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
partSize := layer.UploadMinSize
|
||||
objLen := 6 * partSize
|
||||
headers := map[string]string{}
|
||||
|
||||
data := multipartUpload(hc, bktName, objName, headers, objLen, partSize)
|
||||
require.Equal(t, objLen, len(data))
|
||||
|
||||
objToCopy := "copy-target"
|
||||
var copyMeta CopyMeta
|
||||
copyObject(hc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
|
||||
|
||||
copiedData, _ := getObject(hc, bktName, objToCopy)
|
||||
equalDataSlices(t, data, copiedData)
|
||||
|
||||
result := getObjectAttributes(hc, bktName, objToCopy, objectParts)
|
||||
require.NotNil(t, result.ObjectParts)
|
||||
|
||||
objToCopy2 := "copy-target2"
|
||||
copyMeta.MetadataDirective = replaceDirective
|
||||
copyObject(hc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
|
||||
|
||||
result = getObjectAttributes(hc, bktName, objToCopy2, objectParts)
|
||||
require.Nil(t, result.ObjectParts)
|
||||
|
||||
copiedData, _ = getObject(hc, bktName, objToCopy2)
|
||||
equalDataSlices(t, data, copiedData)
|
||||
}
|
||||
|
||||
func copyObject(hc *handlerContext, bktName, fromObject, toObject string, copyMeta CopyMeta, statusCode int) {
|
||||
w, r := prepareTestRequest(hc, bktName, toObject, nil)
|
||||
r.Header.Set(api.AmzCopySource, bktName+"/"+fromObject)
|
||||
|
||||
r.Header.Set(api.AmzMetadataDirective, copyMeta.MetadataDirective)
|
||||
|
@ -79,8 +114,8 @@ func copyObject(t *testing.T, tc *handlerContext, bktName, fromObject, toObject
|
|||
}
|
||||
r.Header.Set(api.AmzTagging, tagsQuery.Encode())
|
||||
|
||||
tc.Handler().CopyObjectHandler(w, r)
|
||||
assertStatus(t, w, statusCode)
|
||||
hc.Handler().CopyObjectHandler(w, r)
|
||||
assertStatus(hc.t, w, statusCode)
|
||||
}
|
||||
|
||||
func putObjectTagging(t *testing.T, tc *handlerContext, bktName, objName string, tags map[string]string) {
|
||||
|
|
|
@ -5,9 +5,11 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -18,7 +20,7 @@ const (
|
|||
)
|
||||
|
||||
func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -32,14 +34,14 @@ func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, cors); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, cors); err != nil {
|
||||
h.logAndSendError(w, "could not encode cors to response", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -50,7 +52,12 @@ func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
p := &layer.PutCORSParams{
|
||||
BktInfo: bktInfo,
|
||||
Reader: r.Body,
|
||||
CopiesNumber: h.cfg.CopiesNumber,
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketCORS(r.Context(), p); err != nil {
|
||||
|
@ -58,11 +65,11 @@ func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
api.WriteSuccessResponseHeadersOnly(w)
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -85,19 +92,21 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
|
|||
if origin == "" {
|
||||
return
|
||||
}
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
if reqInfo.BucketName == "" {
|
||||
return
|
||||
}
|
||||
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
|
||||
bktInfo, err := h.obj.GetBucketInfo(ctx, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.log.Warn("get bucket info", zap.Error(err))
|
||||
h.reqLogger(ctx).Warn(logs.GetBucketInfo, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
|
||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.log.Warn("get bucket cors", zap.Error(err))
|
||||
h.reqLogger(ctx).Warn(logs.GetBucketCors, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -136,7 +145,7 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
|
@ -185,12 +194,12 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
|||
if rule.MaxAgeSeconds > 0 || rule.MaxAgeSeconds == -1 {
|
||||
w.Header().Set(api.AccessControlMaxAge, strconv.Itoa(rule.MaxAgeSeconds))
|
||||
} else {
|
||||
w.Header().Set(api.AccessControlMaxAge, strconv.Itoa(h.cfg.DefaultMaxAge))
|
||||
w.Header().Set(api.AccessControlMaxAge, strconv.Itoa(h.cfg.DefaultMaxAge()))
|
||||
}
|
||||
if o != wildcard {
|
||||
w.Header().Set(api.AccessControlAllowCredentials, "true")
|
||||
}
|
||||
api.WriteSuccessResponseHeadersOnly(w)
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
)
|
||||
|
||||
func TestCORSOriginWildcard(t *testing.T) {
|
||||
|
@ -23,14 +23,14 @@ func TestCORSOriginWildcard(t *testing.T) {
|
|||
bktName := "bucket-for-cors"
|
||||
box, _ := createAccessBox(t)
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
ctx := context.WithValue(r.Context(), api.BoxData, box)
|
||||
ctx := middleware.SetBoxData(r.Context(), box)
|
||||
r = r.WithContext(ctx)
|
||||
r.Header.Add(api.AmzACL, "public-read")
|
||||
hc.Handler().CreateBucketHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(body))
|
||||
ctx = context.WithValue(r.Context(), api.BoxData, box)
|
||||
ctx = middleware.SetBoxData(r.Context(), box)
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().PutBucketCorsHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
|
|
@ -6,17 +6,22 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
apistatus "github.com/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// limitation of AWS https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
|
||||
const maxObjectsToDelete = 1000
|
||||
|
||||
// DeleteObjectsRequest -- xml carrying the object key names which should be deleted.
|
||||
type DeleteObjectsRequest struct {
|
||||
// Element to enable quiet mode for the request
|
||||
|
@ -58,7 +63,8 @@ type DeleteObjectsResponse struct {
|
|||
}
|
||||
|
||||
func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
versionID := reqInfo.URL.Query().Get(api.QueryVersionID)
|
||||
versionedObject := []*layer.VersionedObject{{
|
||||
Name: reqInfo.ObjectName,
|
||||
|
@ -71,7 +77,7 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
|
@ -82,7 +88,7 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
Objects: versionedObject,
|
||||
Settings: bktSettings,
|
||||
}
|
||||
deletedObjects := h.obj.DeleteObjects(r.Context(), p)
|
||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
||||
deletedObject := deletedObjects[0]
|
||||
if deletedObject.Error != nil {
|
||||
if isErrObjectLocked(deletedObject.Error) {
|
||||
|
@ -109,7 +115,7 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
var objID oid.ID
|
||||
if len(versionID) != 0 {
|
||||
if err = objID.DecodeString(versionID); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -124,8 +130,8 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
if err = h.sendNotifications(r.Context(), m); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, m); err != nil {
|
||||
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
||||
}
|
||||
|
||||
if deletedObject.VersionID != "" {
|
||||
|
@ -145,15 +151,15 @@ func isErrObjectLocked(err error) bool {
|
|||
switch err.(type) {
|
||||
default:
|
||||
return strings.Contains(err.Error(), "object is locked")
|
||||
case apistatus.ObjectLocked,
|
||||
*apistatus.ObjectLocked:
|
||||
case *apistatus.ObjectLocked:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteMultipleObjectsHandler handles multiple delete requests.
|
||||
func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
// Content-Md5 is required and should be set
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
|
@ -176,6 +182,11 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
if len(requested.Objects) == 0 || len(requested.Objects) > maxObjectsToDelete {
|
||||
h.logAndSendError(w, "number of objects to delete must be greater than 0 and less or equal to 1000", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
|
||||
return
|
||||
}
|
||||
|
||||
removed := make(map[string]*layer.VersionedObject)
|
||||
toRemove := make([]*layer.VersionedObject, 0, len(requested.Objects))
|
||||
for _, obj := range requested.Objects {
|
||||
|
@ -198,7 +209,7 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
|
@ -216,7 +227,7 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
Objects: toRemove,
|
||||
Settings: bktSettings,
|
||||
}
|
||||
deletedObjects := h.obj.DeleteObjects(r.Context(), p)
|
||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
||||
|
||||
var errs []error
|
||||
for _, obj := range deletedObjects {
|
||||
|
@ -251,17 +262,17 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
zap.Array("objects", marshaler),
|
||||
zap.Errors("errors", errs),
|
||||
}
|
||||
h.log.Error("couldn't delete objects", fields...)
|
||||
h.reqLogger(ctx).Error(logs.CouldntDeleteObjects, fields...)
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, response); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "could not write response", reqInfo, err, zap.Array("objects", marshaler))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
|
@ -270,7 +281,7 @@ func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
var sessionToken *session.Container
|
||||
|
||||
boxData, err := layer.GetBoxData(r.Context())
|
||||
boxData, err := middleware.GetBoxData(r.Context())
|
||||
if err == nil {
|
||||
sessionToken = boxData.Gate.SessionTokenForDelete()
|
||||
}
|
||||
|
|
|
@ -3,11 +3,15 @@ package handler
|
|||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -15,6 +19,31 @@ const (
|
|||
emptyVersion = ""
|
||||
)
|
||||
|
||||
func TestDeleteBucketOnAlreadyRemovedError(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
putObject(hc, bktName, objName)
|
||||
|
||||
addr := getAddressOfLastVersion(hc, bktInfo, objName)
|
||||
hc.tp.SetObjectError(addr, &apistatus.ObjectAlreadyRemoved{})
|
||||
|
||||
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
||||
|
||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||
}
|
||||
|
||||
func getAddressOfLastVersion(hc *handlerContext, bktInfo *data.BucketInfo, objName string) oid.Address {
|
||||
nodeVersion, err := hc.tree.GetLatestVersion(hc.context, bktInfo, objName)
|
||||
require.NoError(hc.t, err)
|
||||
var addr oid.Address
|
||||
addr.SetContainer(bktInfo.CID)
|
||||
addr.SetObject(nodeVersion.OID)
|
||||
return addr
|
||||
}
|
||||
|
||||
func TestDeleteBucket(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -31,6 +60,26 @@ func TestDeleteBucket(t *testing.T) {
|
|||
deleteBucket(t, tc, bktName, http.StatusNoContent)
|
||||
}
|
||||
|
||||
func TestDeleteBucketOnNotFoundError(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
putObject(hc, bktName, objName)
|
||||
|
||||
nodeVersion, err := hc.tree.GetUnversioned(hc.context, bktInfo, objName)
|
||||
require.NoError(t, err)
|
||||
var addr oid.Address
|
||||
addr.SetContainer(bktInfo.CID)
|
||||
addr.SetObject(nodeVersion.OID)
|
||||
hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{})
|
||||
|
||||
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
||||
|
||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||
}
|
||||
|
||||
func TestDeleteObject(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -49,7 +98,7 @@ func TestDeleteObjectFromSuspended(t *testing.T) {
|
|||
bktName, objName := "bucket-versioned-for-removal", "object-to-delete"
|
||||
|
||||
createSuspendedBucket(t, tc, bktName)
|
||||
putObject(t, tc, bktName, objName)
|
||||
putObject(tc, bktName, objName)
|
||||
|
||||
versionID, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
require.True(t, isDeleteMarker)
|
||||
|
@ -147,6 +196,81 @@ func TestRemoveDeleteMarker(t *testing.T) {
|
|||
require.True(t, existInMockedFrostFS(tc, bktInfo, objInfo), "object doesn't exist but should")
|
||||
}
|
||||
|
||||
func TestDeleteMarkerVersioned(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||
createVersionedBucketAndObject(t, tc, bktName, objName)
|
||||
|
||||
t.Run("not create new delete marker if last version is delete marker", func(t *testing.T) {
|
||||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
require.True(t, isDeleteMarker)
|
||||
versions := listVersions(t, tc, bktName)
|
||||
require.Equal(t, deleteMarkerVersion, versions.DeleteMarker[0].VersionID)
|
||||
|
||||
_, isDeleteMarker = deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
require.True(t, isDeleteMarker)
|
||||
versions = listVersions(t, tc, bktName)
|
||||
require.Len(t, versions.DeleteMarker, 1)
|
||||
require.Equal(t, deleteMarkerVersion, versions.DeleteMarker[0].VersionID)
|
||||
})
|
||||
|
||||
t.Run("do not create delete marker if object does not exist", func(t *testing.T) {
|
||||
versionsBefore := listVersions(t, tc, bktName)
|
||||
_, isDeleteMarker := deleteObject(t, tc, bktName, "dummy", emptyVersion)
|
||||
require.False(t, isDeleteMarker)
|
||||
versionsAfter := listVersions(t, tc, bktName)
|
||||
require.Equal(t, versionsBefore, versionsAfter)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeleteMarkerSuspended(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||
bktInfo, _ := createVersionedBucketAndObject(t, tc, bktName, objName)
|
||||
putBucketVersioning(t, tc, bktName, false)
|
||||
|
||||
t.Run("not create new delete marker if last version is delete marker", func(t *testing.T) {
|
||||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
require.True(t, isDeleteMarker)
|
||||
require.Equal(t, data.UnversionedObjectVersionID, deleteMarkerVersion)
|
||||
|
||||
deleteMarkerVersion, isDeleteMarker = deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
require.True(t, isDeleteMarker)
|
||||
require.Equal(t, data.UnversionedObjectVersionID, deleteMarkerVersion)
|
||||
|
||||
versions := listVersions(t, tc, bktName)
|
||||
require.Len(t, versions.DeleteMarker, 1)
|
||||
require.Equal(t, deleteMarkerVersion, versions.DeleteMarker[0].VersionID)
|
||||
})
|
||||
|
||||
t.Run("do not create delete marker if object does not exist", func(t *testing.T) {
|
||||
versionsBefore := listVersions(t, tc, bktName)
|
||||
_, isDeleteMarker := deleteObject(t, tc, bktName, "dummy", emptyVersion)
|
||||
require.False(t, isDeleteMarker)
|
||||
versionsAfter := listVersions(t, tc, bktName)
|
||||
require.Equal(t, versionsBefore, versionsAfter)
|
||||
})
|
||||
|
||||
t.Run("remove last unversioned non delete marker", func(t *testing.T) {
|
||||
objName := "obj3"
|
||||
putObject(tc, bktName, objName)
|
||||
|
||||
nodeVersion, err := tc.tree.GetUnversioned(tc.Context(), bktInfo, objName)
|
||||
require.NoError(t, err)
|
||||
|
||||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
require.True(t, isDeleteMarker)
|
||||
require.Equal(t, data.UnversionedObjectVersionID, deleteMarkerVersion)
|
||||
|
||||
objVersions := getVersion(listVersions(t, tc, bktName), objName)
|
||||
require.Len(t, objVersions, 0)
|
||||
|
||||
require.False(t, tc.MockedPool().ObjectExists(nodeVersion.OID))
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeleteObjectCombined(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -197,7 +321,7 @@ func TestDeleteMarkers(t *testing.T) {
|
|||
deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
|
||||
versions := listVersions(t, tc, bktName)
|
||||
require.Len(t, versions.DeleteMarker, 3, "invalid delete markers length")
|
||||
require.Len(t, versions.DeleteMarker, 0, "invalid delete markers length")
|
||||
require.Len(t, versions.Version, 0, "versions must be empty")
|
||||
|
||||
require.Len(t, listOIDsFromMockedFrostFS(t, tc, bktName), 0, "shouldn't be any object in frostfs")
|
||||
|
@ -209,7 +333,7 @@ func TestDeleteObjectFromListCache(t *testing.T) {
|
|||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||
bktInfo, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
|
||||
|
||||
versions := listObjectsV1(t, tc, bktName, "", "", "", -1)
|
||||
versions := listObjectsV1(tc, bktName, "", "", "", -1)
|
||||
require.Len(t, versions.Contents, 1)
|
||||
|
||||
checkFound(t, tc, bktName, objName, objInfo.VersionID())
|
||||
|
@ -217,7 +341,7 @@ func TestDeleteObjectFromListCache(t *testing.T) {
|
|||
checkNotFound(t, tc, bktName, objName, objInfo.VersionID())
|
||||
|
||||
// check cache is clean after object removal
|
||||
versions = listObjectsV1(t, tc, bktName, "", "", "", -1)
|
||||
versions = listObjectsV1(tc, bktName, "", "", "", -1)
|
||||
require.Len(t, versions.Contents, 0)
|
||||
|
||||
require.False(t, existInMockedFrostFS(tc, bktInfo, objInfo))
|
||||
|
@ -283,28 +407,53 @@ func deleteObject(t *testing.T, tc *handlerContext, bktName, objName, version st
|
|||
return w.Header().Get(api.AmzVersionID), w.Header().Get(api.AmzDeleteMarker) != ""
|
||||
}
|
||||
|
||||
func deleteObjects(t *testing.T, tc *handlerContext, bktName string, objVersions [][2]string) *DeleteObjectsResponse {
|
||||
req := &DeleteObjectsRequest{}
|
||||
for _, version := range objVersions {
|
||||
req.Objects = append(req.Objects, ObjectIdentifier{
|
||||
ObjectName: version[0],
|
||||
VersionID: version[1],
|
||||
})
|
||||
}
|
||||
|
||||
w, r := prepareTestRequest(tc, bktName, "", req)
|
||||
r.Header.Set(api.ContentMD5, "")
|
||||
tc.Handler().DeleteMultipleObjectsHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
res := &DeleteObjectsResponse{}
|
||||
parseTestResponse(t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func deleteBucket(t *testing.T, tc *handlerContext, bktName string, code int) {
|
||||
w, r := prepareTestRequest(tc, bktName, "", nil)
|
||||
tc.Handler().DeleteBucketHandler(w, r)
|
||||
assertStatus(t, w, code)
|
||||
}
|
||||
|
||||
func checkNotFound(t *testing.T, tc *handlerContext, bktName, objName, version string) {
|
||||
query := make(url.Values)
|
||||
query.Add(api.QueryVersionID, version)
|
||||
|
||||
w, r := prepareTestFullRequest(tc, bktName, objName, query, nil)
|
||||
tc.Handler().HeadObjectHandler(w, r)
|
||||
func checkNotFound(t *testing.T, hc *handlerContext, bktName, objName, version string) {
|
||||
w := headObjectBase(hc, bktName, objName, version)
|
||||
assertStatus(t, w, http.StatusNotFound)
|
||||
}
|
||||
|
||||
func checkFound(t *testing.T, tc *handlerContext, bktName, objName, version string) {
|
||||
func headObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apiErrors.ErrorCode) {
|
||||
w := headObjectBase(hc, bktName, objName, version)
|
||||
assertS3Error(hc.t, w, apiErrors.GetAPIError(code))
|
||||
}
|
||||
|
||||
func checkFound(t *testing.T, hc *handlerContext, bktName, objName, version string) {
|
||||
w := headObjectBase(hc, bktName, objName, version)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func headObjectBase(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
|
||||
query := make(url.Values)
|
||||
query.Add(api.QueryVersionID, version)
|
||||
|
||||
w, r := prepareTestFullRequest(tc, bktName, objName, query, nil)
|
||||
tc.Handler().HeadObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
|
||||
hc.Handler().HeadObjectHandler(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
func listVersions(t *testing.T, tc *handlerContext, bktName string) *ListObjectsVersionsResponse {
|
||||
|
@ -316,11 +465,21 @@ func listVersions(t *testing.T, tc *handlerContext, bktName string) *ListObjects
|
|||
return res
|
||||
}
|
||||
|
||||
func putObject(t *testing.T, tc *handlerContext, bktName, objName string) {
|
||||
func getVersion(resp *ListObjectsVersionsResponse, objName string) []*ObjectVersionResponse {
|
||||
var res []*ObjectVersionResponse
|
||||
for i, version := range resp.Version {
|
||||
if version.Key == objName {
|
||||
res = append(res, &resp.Version[i])
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func putObject(hc *handlerContext, bktName, objName string) {
|
||||
body := bytes.NewReader([]byte("content"))
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, body)
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
|
||||
hc.Handler().PutObjectHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func createSuspendedBucket(t *testing.T, tc *handlerContext, bktName string) *data.BucketInfo {
|
||||
|
|
|
@ -7,13 +7,14 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -41,7 +42,7 @@ func TestSimpleGetEncrypted(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.NotEqual(t, content, string(encryptedContent))
|
||||
|
||||
response, _ := getEncryptedObject(t, tc, bktName, objName)
|
||||
response, _ := getEncryptedObject(tc, bktName, objName)
|
||||
require.Equal(t, content, string(response))
|
||||
}
|
||||
|
||||
|
@ -103,14 +104,40 @@ func TestS3EncryptionSSECMultipartUpload(t *testing.T) {
|
|||
data := multipartUploadEncrypted(tc, bktName, objName, headers, objLen, partSize)
|
||||
require.Equal(t, objLen, len(data))
|
||||
|
||||
resData, resHeader := getEncryptedObject(t, tc, bktName, objName)
|
||||
resData, resHeader := getEncryptedObject(tc, bktName, objName)
|
||||
equalDataSlices(t, data, resData)
|
||||
require.Equal(t, headers[api.ContentType], resHeader.Get(api.ContentType))
|
||||
require.Equal(t, headers[headerMetaKey], resHeader[headerMetaKey][0])
|
||||
require.Equal(t, strconv.Itoa(objLen), resHeader.Get(api.ContentLength))
|
||||
|
||||
checkContentUsingRangeEnc(t, tc, bktName, objName, data, 1000000)
|
||||
checkContentUsingRangeEnc(t, tc, bktName, objName, data, 10000000)
|
||||
checkContentUsingRangeEnc(tc, bktName, objName, data, 1000000)
|
||||
checkContentUsingRangeEnc(tc, bktName, objName, data, 10000000)
|
||||
}
|
||||
|
||||
func TestMultipartUploadGetRange(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName, objName := "bucket-for-multipart-s3-tests", "multipart_obj"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
objLen := 30 * 1024 * 1024
|
||||
partSize := objLen / 6
|
||||
headerMetaKey := api.MetadataPrefix + "foo"
|
||||
headers := map[string]string{
|
||||
headerMetaKey: "bar",
|
||||
api.ContentType: "text/plain",
|
||||
}
|
||||
|
||||
data := multipartUpload(hc, bktName, objName, headers, objLen, partSize)
|
||||
require.Equal(t, objLen, len(data))
|
||||
|
||||
resData, resHeader := getObject(hc, bktName, objName)
|
||||
equalDataSlices(t, data, resData)
|
||||
require.Equal(t, headers[api.ContentType], resHeader.Get(api.ContentType))
|
||||
require.Equal(t, headers[headerMetaKey], resHeader[headerMetaKey][0])
|
||||
require.Equal(t, strconv.Itoa(objLen), resHeader.Get(api.ContentLength))
|
||||
|
||||
checkContentUsingRange(hc, bktName, objName, data, 1000000)
|
||||
checkContentUsingRange(hc, bktName, objName, data, 10000000)
|
||||
}
|
||||
|
||||
func equalDataSlices(t *testing.T, expected, actual []byte) {
|
||||
|
@ -127,7 +154,15 @@ func equalDataSlices(t *testing.T, expected, actual []byte) {
|
|||
}
|
||||
}
|
||||
|
||||
func checkContentUsingRangeEnc(t *testing.T, tc *handlerContext, bktName, objName string, data []byte, step int) {
|
||||
func checkContentUsingRangeEnc(hc *handlerContext, bktName, objName string, data []byte, step int) {
|
||||
checkContentUsingRangeBase(hc, bktName, objName, data, step, true)
|
||||
}
|
||||
|
||||
func checkContentUsingRange(hc *handlerContext, bktName, objName string, data []byte, step int) {
|
||||
checkContentUsingRangeBase(hc, bktName, objName, data, step, false)
|
||||
}
|
||||
|
||||
func checkContentUsingRangeBase(hc *handlerContext, bktName, objName string, data []byte, step int, encrypted bool) {
|
||||
var off, toRead, end int
|
||||
|
||||
for off < len(data) {
|
||||
|
@ -137,8 +172,14 @@ func checkContentUsingRangeEnc(t *testing.T, tc *handlerContext, bktName, objNam
|
|||
}
|
||||
end = off + toRead - 1
|
||||
|
||||
rangeData := getEncryptedObjectRange(t, tc, bktName, objName, off, end)
|
||||
equalDataSlices(t, data[off:end+1], rangeData)
|
||||
var rangeData []byte
|
||||
if encrypted {
|
||||
rangeData = getEncryptedObjectRange(hc.t, hc, bktName, objName, off, end)
|
||||
} else {
|
||||
rangeData = getObjectRange(hc.t, hc, bktName, objName, off, end)
|
||||
}
|
||||
|
||||
equalDataSlices(hc.t, data[off:end+1], rangeData)
|
||||
|
||||
off += step
|
||||
}
|
||||
|
@ -168,6 +209,30 @@ func multipartUploadEncrypted(hc *handlerContext, bktName, objName string, heade
|
|||
return
|
||||
}
|
||||
|
||||
func multipartUpload(hc *handlerContext, bktName, objName string, headers map[string]string, objLen, partsSize int) (objData []byte) {
|
||||
multipartInfo := createMultipartUpload(hc, bktName, objName, headers)
|
||||
|
||||
var sum, currentPart int
|
||||
var etags []string
|
||||
adjustedSize := partsSize
|
||||
|
||||
for sum < objLen {
|
||||
currentPart++
|
||||
|
||||
sum += partsSize
|
||||
if sum > objLen {
|
||||
adjustedSize = objLen - sum
|
||||
}
|
||||
|
||||
etag, data := uploadPart(hc, bktName, objName, multipartInfo.UploadID, currentPart, adjustedSize)
|
||||
etags = append(etags, etag)
|
||||
objData = append(objData, data...)
|
||||
}
|
||||
|
||||
completeMultipartUpload(hc, bktName, objName, multipartInfo.UploadID, etags)
|
||||
return
|
||||
}
|
||||
|
||||
func createMultipartUploadEncrypted(hc *handlerContext, bktName, objName string, headers map[string]string) *InitiateMultipartUploadResponse {
|
||||
return createMultipartUploadBase(hc, bktName, objName, true, headers)
|
||||
}
|
||||
|
@ -190,6 +255,11 @@ func createMultipartUploadBase(hc *handlerContext, bktName, objName string, encr
|
|||
}
|
||||
|
||||
func completeMultipartUpload(hc *handlerContext, bktName, objName, uploadID string, partsETags []string) {
|
||||
w := completeMultipartUploadBase(hc, bktName, objName, uploadID, partsETags)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func completeMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID string, partsETags []string) *httptest.ResponseRecorder {
|
||||
query := make(url.Values)
|
||||
query.Set(uploadIDQuery, uploadID)
|
||||
complete := &CompleteMultipartUpload{
|
||||
|
@ -204,7 +274,8 @@ func completeMultipartUpload(hc *handlerContext, bktName, objName, uploadID stri
|
|||
|
||||
w, r := prepareTestFullRequest(hc, bktName, objName, query, complete)
|
||||
hc.Handler().CompleteMultipartUploadHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
func uploadPartEncrypted(hc *handlerContext, bktName, objName, uploadID string, num, size int) (string, []byte) {
|
||||
|
@ -247,7 +318,7 @@ func TestMultipartEncrypted(t *testing.T) {
|
|||
part2ETag, part2 := uploadPartEncrypted(hc, bktName, objName, multipartInitInfo.UploadID, 2, 5)
|
||||
completeMultipartUpload(hc, bktName, objName, multipartInitInfo.UploadID, []string{part1ETag, part2ETag})
|
||||
|
||||
res, _ := getEncryptedObject(t, hc, bktName, objName)
|
||||
res, _ := getEncryptedObject(hc, bktName, objName)
|
||||
require.Equal(t, len(part1)+len(part2), len(res))
|
||||
require.Equal(t, append(part1, part2...), res)
|
||||
|
||||
|
@ -263,13 +334,22 @@ func putEncryptedObject(t *testing.T, tc *handlerContext, bktName, objName, cont
|
|||
assertStatus(t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func getEncryptedObject(t *testing.T, tc *handlerContext, bktName, objName string) ([]byte, http.Header) {
|
||||
w, r := prepareTestRequest(tc, bktName, objName, nil)
|
||||
func getEncryptedObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header) {
|
||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||
setEncryptHeaders(r)
|
||||
tc.Handler().GetObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
return getObjectBase(hc, w, r)
|
||||
}
|
||||
|
||||
func getObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header) {
|
||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||
return getObjectBase(hc, w, r)
|
||||
}
|
||||
|
||||
func getObjectBase(hc *handlerContext, w *httptest.ResponseRecorder, r *http.Request) ([]byte, http.Header) {
|
||||
hc.Handler().GetObjectHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
content, err := io.ReadAll(w.Result().Body)
|
||||
require.NoError(t, err)
|
||||
require.NoError(hc.t, err)
|
||||
return content, w.Header()
|
||||
}
|
||||
|
||||
|
|
|
@ -8,10 +8,11 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -87,8 +88,10 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E
|
|||
if len(info.Headers[layer.AttributeEncryptionAlgorithm]) > 0 {
|
||||
h.Set(api.ContentLength, info.Headers[layer.AttributeDecryptedSize])
|
||||
addSSECHeaders(h, requestHeader)
|
||||
} else if len(info.Headers[layer.MultipartObjectSize]) > 0 {
|
||||
h.Set(api.ContentLength, info.Headers[layer.MultipartObjectSize])
|
||||
} else {
|
||||
h.Set(api.ContentLength, strconv.FormatInt(info.Size, 10))
|
||||
h.Set(api.ContentLength, strconv.FormatUint(info.Size, 10))
|
||||
}
|
||||
|
||||
h.Set(api.ETag, info.HashSum)
|
||||
|
@ -104,6 +107,9 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E
|
|||
if expires := info.Headers[api.Expires]; expires != "" {
|
||||
h.Set(api.Expires, expires)
|
||||
}
|
||||
if encodings := info.Headers[api.ContentEncoding]; encodings != "" {
|
||||
h.Set(api.ContentEncoding, encodings)
|
||||
}
|
||||
|
||||
for key, val := range info.Headers {
|
||||
if layer.IsSystemHeader(key) {
|
||||
|
@ -117,7 +123,7 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
var (
|
||||
params *layer.RangeParams
|
||||
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
reqInfo = middleware.GetReqInfo(r.Context())
|
||||
)
|
||||
|
||||
conditional, err := parseConditionalHeaders(r.Header)
|
||||
|
@ -161,15 +167,13 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
fullSize := info.Size
|
||||
if encryptionParams.Enabled() {
|
||||
if fullSize, err = strconv.ParseInt(info.Headers[layer.AttributeDecryptedSize], 10, 64); err != nil {
|
||||
h.logAndSendError(w, "invalid decrypted size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
||||
fullSize, err := layer.GetObjectSize(info)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if params, err = fetchRangeHeader(r.Header, uint64(fullSize)); err != nil {
|
||||
if params, err = fetchRangeHeader(r.Header, fullSize); err != nil {
|
||||
h.logAndSendError(w, "could not parse range header", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -201,38 +205,53 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned())
|
||||
if params != nil {
|
||||
writeRangeHeaders(w, params, info.Size)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
getParams := &layer.GetObjectParams{
|
||||
getPayloadParams := &layer.GetObjectParams{
|
||||
ObjectInfo: info,
|
||||
Writer: w,
|
||||
Versioned: p.Versioned(),
|
||||
Range: params,
|
||||
BucketInfo: bktInfo,
|
||||
Encryption: encryptionParams,
|
||||
}
|
||||
if err = h.obj.GetObject(r.Context(), getParams); err != nil {
|
||||
h.logAndSendError(w, "could not get object", reqInfo, err)
|
||||
|
||||
objPayload, err := h.obj.GetObject(r.Context(), getPayloadParams)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get object payload", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned())
|
||||
if params != nil {
|
||||
writeRangeHeaders(w, params, fullSize)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
if err = objPayload.StreamTo(w); err != nil {
|
||||
h.logAndSendError(w, "could not stream object payload", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func checkPreconditions(info *data.ObjectInfo, args *conditionalArgs) error {
|
||||
if len(args.IfMatch) > 0 && args.IfMatch != info.HashSum {
|
||||
return errors.GetAPIError(errors.ErrPreconditionFailed)
|
||||
return fmt.Errorf("%w: etag mismatched: '%s', '%s'", errors.GetAPIError(errors.ErrPreconditionFailed), args.IfMatch, info.HashSum)
|
||||
}
|
||||
if len(args.IfNoneMatch) > 0 && args.IfNoneMatch == info.HashSum {
|
||||
return errors.GetAPIError(errors.ErrNotModified)
|
||||
return fmt.Errorf("%w: etag matched: '%s', '%s'", errors.GetAPIError(errors.ErrNotModified), args.IfNoneMatch, info.HashSum)
|
||||
}
|
||||
if args.IfModifiedSince != nil && info.Created.Before(*args.IfModifiedSince) {
|
||||
return errors.GetAPIError(errors.ErrNotModified)
|
||||
return fmt.Errorf("%w: not modified since '%s', last modified '%s'", errors.GetAPIError(errors.ErrNotModified),
|
||||
args.IfModifiedSince.Format(time.RFC3339), info.Created.Format(time.RFC3339))
|
||||
}
|
||||
if args.IfUnmodifiedSince != nil && info.Created.After(*args.IfUnmodifiedSince) {
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_RequestSyntax
|
||||
// If both of the If-Match and If-Unmodified-Since headers are present in the request as follows:
|
||||
// If-Match condition evaluates to true, and;
|
||||
// If-Unmodified-Since condition evaluates to false;
|
||||
// then, S3 returns 200 OK and the data requested.
|
||||
if len(args.IfMatch) == 0 {
|
||||
return errors.GetAPIError(errors.ErrPreconditionFailed)
|
||||
return fmt.Errorf("%w: modified since '%s', last modified '%s'", errors.GetAPIError(errors.ErrPreconditionFailed),
|
||||
args.IfUnmodifiedSince.Format(time.RFC3339), info.Created.Format(time.RFC3339))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -242,8 +261,8 @@ func checkPreconditions(info *data.ObjectInfo, args *conditionalArgs) error {
|
|||
func parseConditionalHeaders(headers http.Header) (*conditionalArgs, error) {
|
||||
var err error
|
||||
args := &conditionalArgs{
|
||||
IfMatch: headers.Get(api.IfMatch),
|
||||
IfNoneMatch: headers.Get(api.IfNoneMatch),
|
||||
IfMatch: strings.Trim(headers.Get(api.IfMatch), "\""),
|
||||
IfNoneMatch: strings.Trim(headers.Get(api.IfNoneMatch), "\""),
|
||||
}
|
||||
|
||||
if args.IfModifiedSince, err = parseHTTPTime(headers.Get(api.IfModifiedSince)); err != nil {
|
||||
|
@ -268,7 +287,7 @@ func parseHTTPTime(data string) (*time.Time, error) {
|
|||
return &result, nil
|
||||
}
|
||||
|
||||
func writeRangeHeaders(w http.ResponseWriter, params *layer.RangeParams, size int64) {
|
||||
func writeRangeHeaders(w http.ResponseWriter, params *layer.RangeParams, size uint64) {
|
||||
w.Header().Set(api.AcceptRanges, "bytes")
|
||||
w.Header().Set(api.ContentRange, fmt.Sprintf("bytes %d-%d/%d", params.Start, params.End, size))
|
||||
w.Header().Set(api.ContentLength, strconv.FormatUint(params.End-params.Start+1, 10))
|
||||
|
|
|
@ -2,15 +2,20 @@ package handler
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -143,7 +148,11 @@ func TestPreconditions(t *testing.T) {
|
|||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual := checkPreconditions(tc.info, tc.args)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
if tc.expected == nil {
|
||||
require.NoError(t, actual)
|
||||
} else {
|
||||
require.True(t, stderrors.Is(actual, tc.expected), tc.expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -170,6 +179,24 @@ func TestGetRange(t *testing.T) {
|
|||
require.Equal(t, "bcdef", string(end))
|
||||
}
|
||||
|
||||
func TestGetObject(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName, objName := "bucket", "obj"
|
||||
bktInfo, objInfo := createVersionedBucketAndObject(hc.t, hc, bktName, objName)
|
||||
|
||||
putObject(hc, bktName, objName)
|
||||
|
||||
checkFound(hc.t, hc, bktName, objName, objInfo.VersionID())
|
||||
checkFound(hc.t, hc, bktName, objName, emptyVersion)
|
||||
|
||||
addr := getAddressOfLastVersion(hc, bktInfo, objName)
|
||||
hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{})
|
||||
hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectNotFound{})
|
||||
|
||||
getObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), errors.ErrNoSuchVersion)
|
||||
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, errors.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
func putObjectContent(hc *handlerContext, bktName, objName, content string) {
|
||||
body := bytes.NewReader([]byte(content))
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
|
||||
|
@ -186,3 +213,17 @@ func getObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, s
|
|||
require.NoError(t, err)
|
||||
return content
|
||||
}
|
||||
|
||||
func getObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code errors.ErrorCode) {
|
||||
w := getObjectBaseResponse(hc, bktName, objName, version)
|
||||
assertS3Error(hc.t, w, errors.GetAPIError(code))
|
||||
}
|
||||
|
||||
func getObjectBaseResponse(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
|
||||
query := make(url.Values)
|
||||
query.Add(api.QueryVersionID, version)
|
||||
|
||||
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
|
||||
hc.Handler().GetObjectHandler(w, r)
|
||||
return w
|
||||
}
|
||||
|
|
|
@ -3,9 +3,9 @@ package handler
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
|
@ -13,15 +13,18 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/resolver"
|
||||
cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
|
@ -32,7 +35,11 @@ type handlerContext struct {
|
|||
t *testing.T
|
||||
h *handler
|
||||
tp *layer.TestFrostFS
|
||||
tree *tree.Tree
|
||||
context context.Context
|
||||
config *configMock
|
||||
|
||||
layerFeatures *layer.FeatureSettingsMock
|
||||
}
|
||||
|
||||
func (hc *handlerContext) Handler() *handler {
|
||||
|
@ -51,24 +58,72 @@ func (hc *handlerContext) Context() context.Context {
|
|||
return hc.context
|
||||
}
|
||||
|
||||
type placementPolicyMock struct {
|
||||
type configMock struct {
|
||||
defaultPolicy netmap.PlacementPolicy
|
||||
copiesNumbers map[string][]uint32
|
||||
defaultCopiesNumbers []uint32
|
||||
bypassContentEncodingInChunks bool
|
||||
}
|
||||
|
||||
func (p *placementPolicyMock) Default() netmap.PlacementPolicy {
|
||||
return p.defaultPolicy
|
||||
func (c *configMock) DefaultPlacementPolicy() netmap.PlacementPolicy {
|
||||
return c.defaultPolicy
|
||||
}
|
||||
|
||||
func (p *placementPolicyMock) Get(string) (netmap.PlacementPolicy, bool) {
|
||||
func (c *configMock) PlacementPolicy(string) (netmap.PlacementPolicy, bool) {
|
||||
return netmap.PlacementPolicy{}, false
|
||||
}
|
||||
|
||||
func (c *configMock) CopiesNumbers(locationConstraint string) ([]uint32, bool) {
|
||||
result, ok := c.copiesNumbers[locationConstraint]
|
||||
return result, ok
|
||||
}
|
||||
|
||||
func (c *configMock) DefaultCopiesNumbers() []uint32 {
|
||||
return c.defaultCopiesNumbers
|
||||
}
|
||||
|
||||
func (c *configMock) NewCompleteMultipartDecoder(r io.Reader) *xml.Decoder {
|
||||
return xml.NewDecoder(r)
|
||||
}
|
||||
|
||||
func (c *configMock) BypassContentEncodingInChunks() bool {
|
||||
return c.bypassContentEncodingInChunks
|
||||
}
|
||||
|
||||
func (c *configMock) DefaultMaxAge() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *configMock) NotificatorEnabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *configMock) ResolveZoneList() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (c *configMock) IsResolveListAllow() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *configMock) CompleteMultipartKeepalive() time.Duration {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
func prepareHandlerContext(t *testing.T) *handlerContext {
|
||||
return prepareHandlerContextBase(t, false)
|
||||
}
|
||||
|
||||
func prepareHandlerContextWithMinCache(t *testing.T) *handlerContext {
|
||||
return prepareHandlerContextBase(t, true)
|
||||
}
|
||||
|
||||
func prepareHandlerContextBase(t *testing.T, minCache bool) *handlerContext {
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
l := zap.NewExample()
|
||||
tp := layer.NewTestFrostFS()
|
||||
tp := layer.NewTestFrostFS(key)
|
||||
|
||||
testResolver := &resolver.Resolver{Name: "test_resolver"}
|
||||
testResolver.SetResolveFunc(func(_ context.Context, name string) (cid.ID, error) {
|
||||
|
@ -78,23 +133,34 @@ func prepareHandlerContext(t *testing.T) *handlerContext {
|
|||
var owner user.ID
|
||||
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
||||
|
||||
treeMock := NewTreeServiceMock(t)
|
||||
|
||||
cacheCfg := layer.DefaultCachesConfigs(l)
|
||||
if minCache {
|
||||
cacheCfg = getMinCacheConfig(l)
|
||||
}
|
||||
|
||||
features := &layer.FeatureSettingsMock{}
|
||||
|
||||
layerCfg := &layer.Config{
|
||||
Caches: layer.DefaultCachesConfigs(zap.NewExample()),
|
||||
Caches: cacheCfg,
|
||||
AnonKey: layer.AnonymousKey{Key: key},
|
||||
Resolver: testResolver,
|
||||
TreeService: layer.NewTreeService(),
|
||||
TreeService: treeMock,
|
||||
Features: features,
|
||||
}
|
||||
|
||||
var pp netmap.PlacementPolicy
|
||||
err = pp.DecodeString("REP 1")
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &configMock{
|
||||
defaultPolicy: pp,
|
||||
}
|
||||
h := &handler{
|
||||
log: l,
|
||||
obj: layer.NewLayer(l, tp, layerCfg),
|
||||
cfg: &Config{
|
||||
Policy: &placementPolicyMock{defaultPolicy: pp},
|
||||
},
|
||||
cfg: cfg,
|
||||
}
|
||||
|
||||
return &handlerContext{
|
||||
|
@ -102,14 +168,42 @@ func prepareHandlerContext(t *testing.T) *handlerContext {
|
|||
t: t,
|
||||
h: h,
|
||||
tp: tp,
|
||||
context: context.WithValue(context.Background(), api.BoxData, newTestAccessBox(t, key)),
|
||||
tree: treeMock,
|
||||
context: middleware.SetBoxData(context.Background(), newTestAccessBox(t, key)),
|
||||
config: cfg,
|
||||
|
||||
layerFeatures: features,
|
||||
}
|
||||
}
|
||||
|
||||
func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
|
||||
minCacheCfg := &cache.Config{
|
||||
Size: 1,
|
||||
Lifetime: 1,
|
||||
Logger: logger,
|
||||
}
|
||||
return &layer.CachesConfig{
|
||||
Logger: logger,
|
||||
Objects: minCacheCfg,
|
||||
ObjectsList: minCacheCfg,
|
||||
Names: minCacheCfg,
|
||||
Buckets: minCacheCfg,
|
||||
System: minCacheCfg,
|
||||
AccessControl: minCacheCfg,
|
||||
}
|
||||
}
|
||||
|
||||
func NewTreeServiceMock(t *testing.T) *tree.Tree {
|
||||
memCli, err := tree.NewTreeServiceClientMemory()
|
||||
require.NoError(t, err)
|
||||
return tree.NewTree(memCli, zap.NewExample())
|
||||
}
|
||||
|
||||
func createTestBucket(hc *handlerContext, bktName string) *data.BucketInfo {
|
||||
_, err := hc.MockedPool().CreateContainer(hc.Context(), layer.PrmContainerCreate{
|
||||
Creator: hc.owner,
|
||||
Name: bktName,
|
||||
BasicACL: acl.PublicRWExtended,
|
||||
})
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
|
@ -161,7 +255,7 @@ func createTestObject(hc *handlerContext, bktInfo *data.BucketInfo, objName stri
|
|||
extObjInfo, err := hc.Layer().PutObject(hc.Context(), &layer.PutObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Object: objName,
|
||||
Size: int64(len(content)),
|
||||
Size: uint64(len(content)),
|
||||
Reader: bytes.NewReader(content),
|
||||
Header: header,
|
||||
})
|
||||
|
@ -186,8 +280,8 @@ func prepareTestRequestWithQuery(hc *handlerContext, bktName, objName string, qu
|
|||
r := httptest.NewRequest(http.MethodPut, defaultURL, bytes.NewReader(body))
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
reqInfo := api.NewReqInfo(w, r, api.ObjectRequest{Bucket: bktName, Object: objName})
|
||||
r = r.WithContext(api.SetReqInfo(hc.Context(), reqInfo))
|
||||
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: bktName, Object: objName})
|
||||
r = r.WithContext(middleware.SetReqInfo(hc.Context(), reqInfo))
|
||||
|
||||
return w, r
|
||||
}
|
||||
|
@ -196,8 +290,8 @@ func prepareTestPayloadRequest(hc *handlerContext, bktName, objName string, payl
|
|||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPut, defaultURL, payload)
|
||||
|
||||
reqInfo := api.NewReqInfo(w, r, api.ObjectRequest{Bucket: bktName, Object: objName})
|
||||
r = r.WithContext(api.SetReqInfo(hc.Context(), reqInfo))
|
||||
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: bktName, Object: objName})
|
||||
r = r.WithContext(middleware.SetReqInfo(hc.Context(), reqInfo))
|
||||
|
||||
return w, r
|
||||
}
|
||||
|
@ -212,10 +306,16 @@ func existInMockedFrostFS(tc *handlerContext, bktInfo *data.BucketInfo, objInfo
|
|||
p := &layer.GetObjectParams{
|
||||
BucketInfo: bktInfo,
|
||||
ObjectInfo: objInfo,
|
||||
Writer: io.Discard,
|
||||
}
|
||||
|
||||
return tc.Layer().GetObject(tc.Context(), p) == nil
|
||||
objPayload, err := tc.Layer().GetObject(tc.Context(), p)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err = io.ReadAll(objPayload)
|
||||
require.NoError(tc.t, err)
|
||||
return true
|
||||
}
|
||||
|
||||
func listOIDsFromMockedFrostFS(t *testing.T, tc *handlerContext, bktName string) []oid.ID {
|
||||
|
@ -235,6 +335,8 @@ func assertStatus(t *testing.T, w *httptest.ResponseRecorder, status int) {
|
|||
|
||||
func readResponse(t *testing.T, w *httptest.ResponseRecorder, status int, model interface{}) {
|
||||
assertStatus(t, w, status)
|
||||
if status == http.StatusOK {
|
||||
err := xml.NewDecoder(w.Result().Body).Decode(model)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,20 +1,21 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const sizeToDetectType = 512
|
||||
|
||||
func getRangeToDetectContentType(maxSize int64) *layer.RangeParams {
|
||||
end := uint64(maxSize)
|
||||
func getRangeToDetectContentType(maxSize uint64) *layer.RangeParams {
|
||||
end := maxSize
|
||||
if sizeToDetectType < end {
|
||||
end = sizeToDetectType
|
||||
}
|
||||
|
@ -26,7 +27,7 @@ func getRangeToDetectContentType(maxSize int64) *layer.RangeParams {
|
|||
}
|
||||
|
||||
func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -83,18 +84,26 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
if len(info.ContentType) == 0 {
|
||||
if info.ContentType = layer.MimeByFilePath(info.Name); len(info.ContentType) == 0 {
|
||||
buffer := bytes.NewBuffer(make([]byte, 0, sizeToDetectType))
|
||||
getParams := &layer.GetObjectParams{
|
||||
ObjectInfo: info,
|
||||
Writer: buffer,
|
||||
Versioned: p.Versioned(),
|
||||
Range: getRangeToDetectContentType(info.Size),
|
||||
BucketInfo: bktInfo,
|
||||
}
|
||||
if err = h.obj.GetObject(r.Context(), getParams); err != nil {
|
||||
|
||||
objPayload, err := h.obj.GetObject(r.Context(), getParams)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get object", reqInfo, err, zap.Stringer("oid", info.ID))
|
||||
return
|
||||
}
|
||||
info.ContentType = http.DetectContentType(buffer.Bytes())
|
||||
|
||||
buffer, err := io.ReadAll(objPayload)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not partly read payload to detect content type", reqInfo, err, zap.Stringer("oid", info.ID))
|
||||
return
|
||||
}
|
||||
|
||||
info.ContentType = http.DetectContentType(buffer)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -114,7 +123,7 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
func (h *handler) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -122,15 +131,16 @@ func (h *handler) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
w.Header().Set(api.OwnerID, bktInfo.Owner.EncodeToString())
|
||||
w.Header().Set(api.ContainerID, bktInfo.CID.EncodeToString())
|
||||
w.Header().Set(api.AmzBucketRegion, bktInfo.LocationConstraint)
|
||||
|
||||
if isAvailableToResolve(bktInfo.Zone, h.cfg.ResolveZoneList, h.cfg.IsResolveListAllow) {
|
||||
if isAvailableToResolve(bktInfo.Zone, h.cfg.ResolveZoneList(), h.cfg.IsResolveListAllow()) {
|
||||
w.Header().Set(api.ContainerName, bktInfo.Name)
|
||||
w.Header().Set(api.ContainerZone, bktInfo.Zone)
|
||||
}
|
||||
|
||||
api.WriteResponse(w, http.StatusOK, nil, api.MimeNone)
|
||||
middleware.WriteResponse(w, http.StatusOK, nil, middleware.MimeNone)
|
||||
}
|
||||
|
||||
func (h *handler) setLockingHeaders(bktInfo *data.BucketInfo, lockInfo *data.LockInfo, header http.Header) error {
|
||||
|
|
|
@ -1,15 +1,18 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -24,10 +27,14 @@ func TestConditionalHead(t *testing.T) {
|
|||
tc.Handler().HeadObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
etag := w.Result().Header.Get(api.ETag)
|
||||
etagQuoted := "\"" + etag + "\""
|
||||
|
||||
headers := map[string]string{api.IfMatch: etag}
|
||||
headObject(t, tc, bktName, objName, headers, http.StatusOK)
|
||||
|
||||
headers = map[string]string{api.IfMatch: etagQuoted}
|
||||
headObject(t, tc, bktName, objName, headers, http.StatusOK)
|
||||
|
||||
headers = map[string]string{api.IfMatch: "etag"}
|
||||
headObject(t, tc, bktName, objName, headers, http.StatusPreconditionFailed)
|
||||
|
||||
|
@ -47,6 +54,9 @@ func TestConditionalHead(t *testing.T) {
|
|||
headers = map[string]string{api.IfNoneMatch: etag}
|
||||
headObject(t, tc, bktName, objName, headers, http.StatusNotModified)
|
||||
|
||||
headers = map[string]string{api.IfNoneMatch: etagQuoted}
|
||||
headObject(t, tc, bktName, objName, headers, http.StatusNotModified)
|
||||
|
||||
headers = map[string]string{api.IfNoneMatch: "etag"}
|
||||
headObject(t, tc, bktName, objName, headers, http.StatusOK)
|
||||
|
||||
|
@ -75,17 +85,48 @@ func headObject(t *testing.T, tc *handlerContext, bktName, objName string, heade
|
|||
}
|
||||
|
||||
func TestInvalidAccessThroughCache(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-cache", "obj-for-cache"
|
||||
createBucketAndObject(tc, bktName, objName)
|
||||
bktInfo, _ := createBucketAndObject(hc, bktName, objName)
|
||||
setContainerEACL(hc, bktInfo.CID)
|
||||
|
||||
headObject(t, tc, bktName, objName, nil, http.StatusOK)
|
||||
headObject(t, hc, bktName, objName, nil, http.StatusOK)
|
||||
|
||||
w, r := prepareTestRequest(tc, bktName, objName, nil)
|
||||
tc.Handler().HeadObjectHandler(w, r.WithContext(context.WithValue(r.Context(), api.BoxData, newTestAccessBox(t, nil))))
|
||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().HeadObjectHandler(w, r.WithContext(middleware.SetBoxData(r.Context(), newTestAccessBox(t, nil))))
|
||||
assertStatus(t, w, http.StatusForbidden)
|
||||
}
|
||||
|
||||
func setContainerEACL(hc *handlerContext, cnrID cid.ID) {
|
||||
table := eacl.NewTable()
|
||||
table.SetCID(cnrID)
|
||||
for _, op := range fullOps {
|
||||
table.AddRecord(getOthersRecord(op, eacl.ActionDeny))
|
||||
}
|
||||
|
||||
err := hc.MockedPool().SetContainerEACL(hc.Context(), *table, nil)
|
||||
require.NoError(hc.t, err)
|
||||
}
|
||||
|
||||
func TestHeadObject(t *testing.T) {
|
||||
hc := prepareHandlerContextWithMinCache(t)
|
||||
bktName, objName := "bucket", "obj"
|
||||
bktInfo, objInfo := createVersionedBucketAndObject(hc.t, hc, bktName, objName)
|
||||
|
||||
putObject(hc, bktName, objName)
|
||||
|
||||
checkFound(hc.t, hc, bktName, objName, objInfo.VersionID())
|
||||
checkFound(hc.t, hc, bktName, objName, emptyVersion)
|
||||
|
||||
addr := getAddressOfLastVersion(hc, bktInfo, objName)
|
||||
hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{})
|
||||
hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectNotFound{})
|
||||
|
||||
headObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), s3errors.ErrNoSuchVersion)
|
||||
headObjectAssertS3Error(hc, bktName, objName, emptyVersion, s3errors.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
func TestIsAvailableToResolve(t *testing.T) {
|
||||
list := []string{"container", "s3"}
|
||||
|
||||
|
|
|
@ -3,11 +3,11 @@ package handler
|
|||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
)
|
||||
|
||||
func (h *handler) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -15,7 +15,7 @@ func (h *handler) GetBucketLocationHandler(w http.ResponseWriter, r *http.Reques
|
|||
return
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, LocationResponse{Location: bktInfo.LocationConstraint}); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, LocationResponse{Location: bktInfo.LocationConstraint}); err != nil {
|
||||
h.logAndSendError(w, "couldn't encode bucket location response", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,8 +4,8 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
)
|
||||
|
||||
const maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
|
@ -15,7 +15,7 @@ func (h *handler) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
var (
|
||||
own user.ID
|
||||
res *ListBucketsResponse
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
reqInfo = middleware.GetReqInfo(r.Context())
|
||||
)
|
||||
|
||||
list, err := h.obj.ListBuckets(r.Context())
|
||||
|
@ -42,7 +42,7 @@ func (h *handler) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
})
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, res); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, res); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,10 +8,11 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -26,7 +27,7 @@ const (
|
|||
)
|
||||
|
||||
func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -73,7 +74,7 @@ func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
|
|||
}
|
||||
|
||||
func (h *handler) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -100,13 +101,13 @@ func (h *handler) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
|
|||
settings.LockConfiguration.ObjectLockEnabled = enabledValue
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, settings.LockConfiguration); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, settings.LockConfiguration); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -143,7 +144,12 @@ func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
|||
Enabled: legalHold.Status == legalHoldOn,
|
||||
},
|
||||
},
|
||||
CopiesNumber: h.cfg.CopiesNumber,
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = h.obj.PutLockInfo(r.Context(), p); err != nil {
|
||||
|
@ -153,7 +159,7 @@ func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -184,13 +190,13 @@ func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
|||
legalHold.Status = legalHoldOn
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, legalHold); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, legalHold); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -222,7 +228,12 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
|||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||
},
|
||||
NewLock: lock,
|
||||
CopiesNumber: h.cfg.CopiesNumber,
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = h.obj.PutLockInfo(r.Context(), p); err != nil {
|
||||
|
@ -232,7 +243,7 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -271,7 +282,7 @@ func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
|||
retention.Mode = complianceMode
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, retention); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, retention); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,9 +10,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -313,7 +314,7 @@ func TestPutBucketLockConfigurationHandler(t *testing.T) {
|
|||
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPut, defaultURL, bytes.NewReader(body))
|
||||
r = r.WithContext(api.SetReqInfo(r.Context(), api.NewReqInfo(w, r, api.ObjectRequest{Bucket: tc.bucket})))
|
||||
r = r.WithContext(middleware.SetReqInfo(r.Context(), middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: tc.bucket})))
|
||||
|
||||
hc.Handler().PutBucketObjectLockConfigHandler(w, r)
|
||||
|
||||
|
@ -386,7 +387,7 @@ func TestGetBucketLockConfigurationHandler(t *testing.T) {
|
|||
t.Run(tc.name, func(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPut, defaultURL, bytes.NewReader(nil))
|
||||
r = r.WithContext(api.SetReqInfo(r.Context(), api.NewReqInfo(w, r, api.ObjectRequest{Bucket: tc.bucket})))
|
||||
r = r.WithContext(middleware.SetReqInfo(r.Context(), middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: tc.bucket})))
|
||||
|
||||
hc.Handler().GetBucketObjectLockConfigHandler(w, r)
|
||||
|
||||
|
@ -406,7 +407,7 @@ func TestGetBucketLockConfigurationHandler(t *testing.T) {
|
|||
}
|
||||
|
||||
func assertS3Error(t *testing.T, w *httptest.ResponseRecorder, expectedError apiErrors.Error) {
|
||||
actualErrorResponse := &api.ErrorResponse{}
|
||||
actualErrorResponse := &middleware.ErrorResponse{}
|
||||
err := xml.NewDecoder(w.Result().Body).Decode(actualErrorResponse)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -535,7 +536,7 @@ func TestPutObjectWithLock(t *testing.T) {
|
|||
createTestBucketWithLock(hc, bktName, lockConfig)
|
||||
|
||||
objDefault := "obj-default-retention"
|
||||
putObject(t, hc, bktName, objDefault)
|
||||
putObject(hc, bktName, objDefault)
|
||||
|
||||
getObjectRetentionApproximate(hc, bktName, objDefault, governanceMode, time.Now().Add(24*time.Hour))
|
||||
getObjectLegalHold(hc, bktName, objDefault, legalHoldOff)
|
||||
|
@ -586,7 +587,7 @@ func TestPutLockErrors(t *testing.T) {
|
|||
headers[api.AmzObjectLockRetainUntilDate] = "dummy"
|
||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrInvalidRetentionDate)
|
||||
|
||||
putObject(t, hc, bktName, objName)
|
||||
putObject(hc, bktName, objName)
|
||||
|
||||
retention := &data.Retention{Mode: governanceMode}
|
||||
putObjectRetentionFailed(t, hc, bktName, objName, retention, apiErrors.ErrMalformedXML)
|
||||
|
|
|
@ -2,16 +2,19 @@ package handler
|
|||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -90,10 +93,17 @@ type (
|
|||
const (
|
||||
uploadIDHeaderName = "uploadId"
|
||||
partNumberHeaderName = "partNumber"
|
||||
|
||||
prefixQueryName = "prefix"
|
||||
delimiterQueryName = "delimiter"
|
||||
maxUploadsQueryName = "max-uploads"
|
||||
encodingTypeQueryName = "encoding-type"
|
||||
keyMarkerQueryName = "key-marker"
|
||||
uploadIDMarkerQueryName = "upload-id-marker"
|
||||
)
|
||||
|
||||
func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -102,10 +112,7 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
|
||||
uploadID := uuid.New()
|
||||
additional := []zap.Field{
|
||||
zap.String("uploadID", uploadID.String()),
|
||||
zap.String("Key", reqInfo.ObjectName),
|
||||
}
|
||||
additional := []zap.Field{zap.String("uploadID", uploadID.String())}
|
||||
|
||||
p := &layer.CreateMultipartParams{
|
||||
Info: &layer.UploadInfoParams{
|
||||
|
@ -119,11 +126,11 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
if containsACLHeaders(r) {
|
||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get gate key", reqInfo, err)
|
||||
h.logAndSendError(w, "couldn't get gate key", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
if _, err = parseACLHeaders(r.Header, key); err != nil {
|
||||
h.logAndSendError(w, "could not parse acl", reqInfo, err)
|
||||
h.logAndSendError(w, "could not parse acl", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
p.Data.ACLHeaders = formACLHeadersForMultipart(r.Header)
|
||||
|
@ -139,7 +146,7 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
|
||||
p.Info.Encryption, err = formEncryptionParams(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -148,9 +155,9 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
p.Header[api.ContentType] = contentType
|
||||
}
|
||||
|
||||
p.CopiesNumber, err = getCopiesNumberOrDefault(p.Header, h.cfg.CopiesNumber)
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(p.Header, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -169,7 +176,7 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
UploadID: uploadID.String(),
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, resp); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, resp); err != nil {
|
||||
h.logAndSendError(w, "could not encode InitiateMultipartUploadResponse to response", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
@ -195,7 +202,7 @@ func formACLHeadersForMultipart(header http.Header) map[string]string {
|
|||
}
|
||||
|
||||
func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -206,15 +213,27 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
var (
|
||||
queryValues = r.URL.Query()
|
||||
uploadID = queryValues.Get(uploadIDHeaderName)
|
||||
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
|
||||
partNumStr = queryValues.Get(partNumberHeaderName)
|
||||
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("partNumber", partNumStr)}
|
||||
)
|
||||
|
||||
partNumber, err := strconv.Atoi(queryValues.Get(partNumberHeaderName))
|
||||
partNumber, err := strconv.Atoi(partNumStr)
|
||||
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
|
||||
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber))
|
||||
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := h.getBodyReader(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "failed to get body reader", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
var size uint64
|
||||
if r.ContentLength > 0 {
|
||||
size = uint64(r.ContentLength)
|
||||
}
|
||||
|
||||
p := &layer.UploadPartParams{
|
||||
Info: &layer.UploadInfoParams{
|
||||
UploadID: uploadID,
|
||||
|
@ -222,13 +241,13 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
Key: reqInfo.ObjectName,
|
||||
},
|
||||
PartNumber: partNumber,
|
||||
Size: r.ContentLength,
|
||||
Reader: r.Body,
|
||||
Size: size,
|
||||
Reader: body,
|
||||
}
|
||||
|
||||
p.Info.Encryption, err = formEncryptionParams(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -243,21 +262,23 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
w.Header().Set(api.ETag, hash)
|
||||
api.WriteSuccessResponseHeadersOnly(w)
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
versionID string
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
ctx = r.Context()
|
||||
reqInfo = middleware.GetReqInfo(ctx)
|
||||
queryValues = reqInfo.URL.Query()
|
||||
uploadID = queryValues.Get(uploadIDHeaderName)
|
||||
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
|
||||
partNumStr = queryValues.Get(partNumberHeaderName)
|
||||
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("partNumber", partNumStr)}
|
||||
)
|
||||
|
||||
partNumber, err := strconv.Atoi(queryValues.Get(partNumberHeaderName))
|
||||
partNumber, err := strconv.Atoi(partNumStr)
|
||||
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
|
||||
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber))
|
||||
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -268,7 +289,7 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
srcBucket, srcObject, err := path2BucketObject(src)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid source copy", reqInfo, err)
|
||||
h.logAndSendError(w, "invalid source copy", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -281,21 +302,23 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
srcBktInfo, err := h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get source bucket info", reqInfo, err)
|
||||
h.logAndSendError(w, "could not get source bucket info", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get target bucket info", reqInfo, err)
|
||||
h.logAndSendError(w, "could not get target bucket info", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
srcInfo, err := h.obj.GetObjectInfo(r.Context(), &layer.HeadObjectParams{
|
||||
headPrm := &layer.HeadObjectParams{
|
||||
BktInfo: srcBktInfo,
|
||||
Object: srcObject,
|
||||
VersionID: versionID,
|
||||
})
|
||||
}
|
||||
|
||||
srcInfo, err := h.obj.GetObjectInfo(ctx, headPrm)
|
||||
if err != nil {
|
||||
if errors.IsS3Error(err, errors.ErrNoSuchKey) && versionID != "" {
|
||||
h.logAndSendError(w, "could not head source object version", reqInfo,
|
||||
|
@ -320,6 +343,7 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
p := &layer.UploadCopyParams{
|
||||
Versioned: headPrm.Versioned(),
|
||||
Info: &layer.UploadInfoParams{
|
||||
UploadID: uploadID,
|
||||
Bkt: bktInfo,
|
||||
|
@ -333,16 +357,16 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
p.Info.Encryption, err = formEncryptionParams(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
if err = p.Info.Encryption.MatchObjectEncryption(layer.FormEncryptionInfo(srcInfo.Headers)); err != nil {
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrBadRequest), err), additional...)
|
||||
return
|
||||
}
|
||||
|
||||
info, err := h.obj.UploadPartCopy(r.Context(), p)
|
||||
info, err := h.obj.UploadPartCopy(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not upload part copy", reqInfo, err, additional...)
|
||||
return
|
||||
|
@ -357,13 +381,13 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
addSSECHeaders(w.Header(), r.Header)
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -372,19 +396,17 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
|
||||
var (
|
||||
sessionTokenSetEACL *session.Container
|
||||
|
||||
uploadID = r.URL.Query().Get(uploadIDHeaderName)
|
||||
uploadInfo = &layer.UploadInfoParams{
|
||||
UploadID: uploadID,
|
||||
Bkt: bktInfo,
|
||||
Key: reqInfo.ObjectName,
|
||||
}
|
||||
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
|
||||
additional = []zap.Field{zap.String("uploadID", uploadID)}
|
||||
)
|
||||
|
||||
reqBody := new(CompleteMultipartUpload)
|
||||
if err = xml.NewDecoder(r.Body).Decode(reqBody); err != nil {
|
||||
if err = h.cfg.NewCompleteMultipartDecoder(r.Body).Decode(reqBody); err != nil {
|
||||
h.logAndSendError(w, "could not read complete multipart upload xml", reqInfo,
|
||||
errors.GetAPIError(errors.ErrMalformedXML), additional...)
|
||||
return
|
||||
|
@ -399,11 +421,53 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
Parts: reqBody.Parts,
|
||||
}
|
||||
|
||||
uploadData, extendedObjInfo, err := h.obj.CompleteMultipartUpload(r.Context(), c)
|
||||
// Next operations might take some time, so we want to keep client's
|
||||
// connection alive. To do so, gateway sends periodic white spaces
|
||||
// back to the client the same way as Amazon S3 service does.
|
||||
stopPeriodicResponseWriter := periodicXMLWriter(w, h.cfg.CompleteMultipartKeepalive())
|
||||
|
||||
// Start complete multipart upload which may take some time to fetch object
|
||||
// and re-upload it part by part.
|
||||
objInfo, err := h.completeMultipartUpload(r, c, bktInfo, reqInfo)
|
||||
|
||||
// Stop periodic writer as complete multipart upload is finished
|
||||
// successfully or not.
|
||||
headerIsWritten := stopPeriodicResponseWriter()
|
||||
|
||||
responseWriter := middleware.EncodeToResponse
|
||||
errLogger := h.logAndSendError
|
||||
// Do not send XML and HTTP headers if periodic writer was invoked at this point.
|
||||
if headerIsWritten {
|
||||
responseWriter = middleware.EncodeToResponseNoHeader
|
||||
errLogger = h.logAndSendErrorNoHeader
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not complete multipart upload", reqInfo, err, additional...)
|
||||
errLogger(w, "complete multipart error", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
response := CompleteMultipartUploadResponse{
|
||||
Bucket: objInfo.Bucket,
|
||||
ETag: objInfo.HashSum,
|
||||
Key: objInfo.Name,
|
||||
}
|
||||
|
||||
// Here we previously set api.AmzVersionID header for versioned bucket.
|
||||
// It is not possible after #60, because of periodic white
|
||||
// space XML writer to keep connection with the client.
|
||||
|
||||
if err = responseWriter(w, response); err != nil {
|
||||
errLogger(w, "something went wrong", reqInfo, err, additional...)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMultipartParams, bktInfo *data.BucketInfo, reqInfo *middleware.ReqInfo) (*data.ObjectInfo, error) {
|
||||
ctx := r.Context()
|
||||
uploadData, extendedObjInfo, err := h.obj.CompleteMultipartUpload(ctx, c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not complete multipart upload: %w", err)
|
||||
}
|
||||
objInfo := extendedObjInfo.ObjectInfo
|
||||
|
||||
if len(uploadData.TagSet) != 0 {
|
||||
|
@ -416,22 +480,23 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
TagSet: uploadData.TagSet,
|
||||
NodeVersion: extendedObjInfo.NodeVersion,
|
||||
}
|
||||
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
|
||||
h.logAndSendError(w, "could not put tagging file of completed multipart upload", reqInfo, err, additional...)
|
||||
return
|
||||
if _, err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
||||
return nil, fmt.Errorf("could not put tagging file of completed multipart upload: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(uploadData.ACLHeaders) != 0 {
|
||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
||||
sessionTokenSetEACL, err := getSessionTokenSetEACL(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get gate key", reqInfo, err)
|
||||
return
|
||||
return nil, fmt.Errorf("couldn't get eacl token: %w", err)
|
||||
}
|
||||
key, err := h.bearerTokenIssuerKey(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't get gate key: %w", err)
|
||||
}
|
||||
acl, err := parseACLHeaders(r.Header, key)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not parse acl", reqInfo, err)
|
||||
return
|
||||
return nil, fmt.Errorf("could not parse acl: %w", err)
|
||||
}
|
||||
|
||||
resInfo := &resourceInfo{
|
||||
|
@ -440,12 +505,10 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
astObject, err := aclToAst(acl, resInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not translate acl of completed multipart upload to ast", reqInfo, err, additional...)
|
||||
return
|
||||
return nil, fmt.Errorf("could not translate acl of completed multipart upload to ast: %w", err)
|
||||
}
|
||||
if _, err = h.updateBucketACL(r, astObject, bktInfo, sessionTokenSetEACL); err != nil {
|
||||
h.logAndSendError(w, "could not update bucket acl while completing multipart upload", reqInfo, err, additional...)
|
||||
return
|
||||
return nil, fmt.Errorf("could not update bucket acl while completing multipart upload: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -455,32 +518,15 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
||||
}
|
||||
|
||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
}
|
||||
|
||||
response := CompleteMultipartUploadResponse{
|
||||
Bucket: objInfo.Bucket,
|
||||
ETag: objInfo.HashSum,
|
||||
Key: objInfo.Name,
|
||||
}
|
||||
|
||||
if bktSettings.VersioningEnabled() {
|
||||
w.Header().Set(api.AmzVersionID, objInfo.VersionID())
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
return objInfo, nil
|
||||
}
|
||||
|
||||
func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -490,30 +536,27 @@ func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Req
|
|||
|
||||
var (
|
||||
queryValues = reqInfo.URL.Query()
|
||||
delimiter = queryValues.Get("delimiter")
|
||||
prefix = queryValues.Get("prefix")
|
||||
maxUploadsStr = queryValues.Get(maxUploadsQueryName)
|
||||
maxUploads = layer.MaxSizeUploadsList
|
||||
)
|
||||
|
||||
if queryValues.Get("max-uploads") != "" {
|
||||
val, err := strconv.Atoi(queryValues.Get("max-uploads"))
|
||||
if err != nil || val < 0 {
|
||||
if maxUploadsStr != "" {
|
||||
val, err := strconv.Atoi(maxUploadsStr)
|
||||
if err != nil || val < 1 || val > 1000 {
|
||||
h.logAndSendError(w, "invalid maxUploads", reqInfo, errors.GetAPIError(errors.ErrInvalidMaxUploads))
|
||||
return
|
||||
}
|
||||
if val < maxUploads {
|
||||
maxUploads = val
|
||||
}
|
||||
}
|
||||
|
||||
p := &layer.ListMultipartUploadsParams{
|
||||
Bkt: bktInfo,
|
||||
Delimiter: delimiter,
|
||||
EncodingType: queryValues.Get("encoding-type"),
|
||||
KeyMarker: queryValues.Get("key-marker"),
|
||||
Delimiter: queryValues.Get(delimiterQueryName),
|
||||
EncodingType: queryValues.Get(encodingTypeQueryName),
|
||||
KeyMarker: queryValues.Get(keyMarkerQueryName),
|
||||
MaxUploads: maxUploads,
|
||||
Prefix: prefix,
|
||||
UploadIDMarker: queryValues.Get("upload-id-marker"),
|
||||
Prefix: queryValues.Get(prefixQueryName),
|
||||
UploadIDMarker: queryValues.Get(uploadIDMarkerQueryName),
|
||||
}
|
||||
|
||||
list, err := h.obj.ListMultipartUploads(r.Context(), p)
|
||||
|
@ -522,13 +565,13 @@ func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, encodeListMultipartUploadsToResponse(list, p)); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, encodeListMultipartUploadsToResponse(list, p)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -557,7 +600,7 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if queryValues.Get("part-number-marker") != "" {
|
||||
if partNumberMarker, err = strconv.Atoi(queryValues.Get("part-number-marker")); err != nil || partNumberMarker <= 0 {
|
||||
if partNumberMarker, err = strconv.Atoi(queryValues.Get("part-number-marker")); err != nil || partNumberMarker < 0 {
|
||||
h.logAndSendError(w, "invalid PartNumberMarker", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
@ -585,13 +628,13 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, encodeListPartsToResponse(list, p)); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, encodeListPartsToResponse(list, p)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -681,3 +724,53 @@ func encodeListPartsToResponse(info *layer.ListPartsInfo, params *layer.ListPart
|
|||
Parts: info.Parts,
|
||||
}
|
||||
}
|
||||
|
||||
// periodicXMLWriter creates go routine to write xml header and whitespaces
|
||||
// over time to avoid connection drop from the client. To work properly,
|
||||
// pass `http.ResponseWriter` with implemented `http.Flusher` interface.
|
||||
// Returns stop function which returns boolean if writer has been used
|
||||
// during goroutine execution. To disable writer, pass 0 duration value.
|
||||
func periodicXMLWriter(w io.Writer, dur time.Duration) (stop func() bool) {
|
||||
if dur == 0 { // 0 duration disables periodic writer
|
||||
return func() bool { return false }
|
||||
}
|
||||
|
||||
whitespaceChar := []byte(" ")
|
||||
closer := make(chan struct{})
|
||||
done := make(chan struct{})
|
||||
headerWritten := false
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
|
||||
tick := time.NewTicker(dur)
|
||||
defer tick.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
if !headerWritten {
|
||||
_, err := w.Write([]byte(xml.Header))
|
||||
headerWritten = err == nil
|
||||
}
|
||||
_, err := w.Write(whitespaceChar)
|
||||
if err != nil {
|
||||
return // is there anything we can do better than ignore error?
|
||||
}
|
||||
if buffered, ok := w.(http.Flusher); ok {
|
||||
buffered.Flush()
|
||||
}
|
||||
case <-closer:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
stop = func() bool {
|
||||
close(closer)
|
||||
<-done // wait for goroutine to stop
|
||||
return headerWritten
|
||||
}
|
||||
|
||||
return stop
|
||||
}
|
||||
|
|
323
api/handler/multipart_upload_test.go
Normal file
323
api/handler/multipart_upload_test.go
Normal file
|
@ -0,0 +1,323 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
s3Errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
partNumberMarkerQuery = "part-number-marker"
|
||||
)
|
||||
|
||||
func TestPeriodicWriter(t *testing.T) {
|
||||
const dur = 100 * time.Millisecond
|
||||
const whitespaces = 8
|
||||
expected := []byte(xml.Header)
|
||||
for i := 0; i < whitespaces; i++ {
|
||||
expected = append(expected, []byte(" ")...)
|
||||
}
|
||||
|
||||
t.Run("writes data", func(t *testing.T) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
stop := periodicXMLWriter(buf, dur)
|
||||
|
||||
// N number of whitespaces + half durations to guarantee at least N writes in buffer
|
||||
time.Sleep(whitespaces*dur + dur/2)
|
||||
require.True(t, stop())
|
||||
require.Equal(t, expected, buf.Bytes())
|
||||
|
||||
t.Run("no additional data after stop", func(t *testing.T) {
|
||||
time.Sleep(2 * dur)
|
||||
require.Equal(t, expected, buf.Bytes())
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("does not write data", func(t *testing.T) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
stop := periodicXMLWriter(buf, dur)
|
||||
time.Sleep(dur / 2)
|
||||
require.False(t, stop())
|
||||
require.Empty(t, buf.Bytes())
|
||||
|
||||
t.Run("disabled", func(t *testing.T) {
|
||||
stop = periodicXMLWriter(buf, 0)
|
||||
require.False(t, stop())
|
||||
require.Empty(t, buf.Bytes())
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestMultipartUploadInvalidPart(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-to-upload-part", "object-multipart"
|
||||
createTestBucket(hc, bktName)
|
||||
partSize := 8 // less than min part size
|
||||
|
||||
multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||
etag1, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
|
||||
etag2, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
|
||||
w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
|
||||
assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
|
||||
}
|
||||
|
||||
func TestMultipartReUploadPart(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-to-upload-part", "object-multipart"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
partSizeLast := 8 // less than min part size
|
||||
partSizeFirst := 5 * 1024 * 1024
|
||||
|
||||
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||
etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSizeLast)
|
||||
etag2, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSizeFirst)
|
||||
|
||||
list := listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
|
||||
require.Len(t, list.Parts, 2)
|
||||
require.Equal(t, etag1, list.Parts[0].ETag)
|
||||
require.Equal(t, etag2, list.Parts[1].ETag)
|
||||
|
||||
w := completeMultipartUploadBase(hc, bktName, objName, uploadInfo.UploadID, []string{etag1, etag2})
|
||||
assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
|
||||
|
||||
etag1, data1 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSizeFirst)
|
||||
etag2, data2 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSizeLast)
|
||||
|
||||
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
|
||||
require.Len(t, list.Parts, 2)
|
||||
require.Equal(t, etag1, list.Parts[0].ETag)
|
||||
require.Equal(t, etag2, list.Parts[1].ETag)
|
||||
|
||||
innerUploadInfo, err := hc.tree.GetMultipartUpload(hc.context, bktInfo, objName, uploadInfo.UploadID)
|
||||
require.NoError(t, err)
|
||||
treeParts, err := hc.tree.GetParts(hc.Context(), bktInfo, innerUploadInfo.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, treeParts, len(list.Parts))
|
||||
|
||||
w = completeMultipartUploadBase(hc, bktName, objName, uploadInfo.UploadID, []string{etag1, etag2})
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
|
||||
data, _ := getObject(hc, bktName, objName)
|
||||
equalDataSlices(t, append(data1, data2...), data)
|
||||
}
|
||||
|
||||
func TestListMultipartUploads(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-to-list-uploads"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
objName1 := "/my/object/name"
|
||||
uploadInfo1 := createMultipartUpload(hc, bktName, objName1, map[string]string{})
|
||||
objName2 := "/my/object2"
|
||||
uploadInfo2 := createMultipartUpload(hc, bktName, objName2, map[string]string{})
|
||||
objName3 := "/zzz/object/name3"
|
||||
uploadInfo3 := createMultipartUpload(hc, bktName, objName3, map[string]string{})
|
||||
|
||||
t.Run("check upload key", func(t *testing.T) {
|
||||
listUploads := listAllMultipartUploads(hc, bktName)
|
||||
require.Len(t, listUploads.Uploads, 3)
|
||||
for i, upload := range []*InitiateMultipartUploadResponse{uploadInfo1, uploadInfo2, uploadInfo3} {
|
||||
require.Equal(t, upload.UploadID, listUploads.Uploads[i].UploadID)
|
||||
require.Equal(t, upload.Key, listUploads.Uploads[i].Key)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("check max uploads", func(t *testing.T) {
|
||||
listUploads := listMultipartUploadsBase(hc, bktName, "", "", "", "", 2)
|
||||
require.Len(t, listUploads.Uploads, 2)
|
||||
require.Equal(t, uploadInfo1.UploadID, listUploads.Uploads[0].UploadID)
|
||||
require.Equal(t, uploadInfo2.UploadID, listUploads.Uploads[1].UploadID)
|
||||
})
|
||||
|
||||
t.Run("check prefix", func(t *testing.T) {
|
||||
listUploads := listMultipartUploadsBase(hc, bktName, "/my", "", "", "", -1)
|
||||
require.Len(t, listUploads.Uploads, 2)
|
||||
require.Equal(t, uploadInfo1.UploadID, listUploads.Uploads[0].UploadID)
|
||||
require.Equal(t, uploadInfo2.UploadID, listUploads.Uploads[1].UploadID)
|
||||
})
|
||||
|
||||
t.Run("check markers", func(t *testing.T) {
|
||||
t.Run("check only key-marker", func(t *testing.T) {
|
||||
listUploads := listMultipartUploadsBase(hc, bktName, "", "", "", objName2, -1)
|
||||
require.Len(t, listUploads.Uploads, 1)
|
||||
// If upload-id-marker is not specified, only the keys lexicographically greater than the specified key-marker will be included in the list.
|
||||
require.Equal(t, uploadInfo3.UploadID, listUploads.Uploads[0].UploadID)
|
||||
})
|
||||
|
||||
t.Run("check only upload-id-marker", func(t *testing.T) {
|
||||
uploadIDMarker := uploadInfo1.UploadID
|
||||
if uploadIDMarker > uploadInfo2.UploadID {
|
||||
uploadIDMarker = uploadInfo2.UploadID
|
||||
}
|
||||
listUploads := listMultipartUploadsBase(hc, bktName, "", "", uploadIDMarker, "", -1)
|
||||
// If key-marker is not specified, the upload-id-marker parameter is ignored.
|
||||
require.Len(t, listUploads.Uploads, 3)
|
||||
})
|
||||
|
||||
t.Run("check key-marker along with upload-id-marker", func(t *testing.T) {
|
||||
uploadIDMarker := "00000000-0000-0000-0000-000000000000"
|
||||
|
||||
listUploads := listMultipartUploadsBase(hc, bktName, "", "", uploadIDMarker, objName3, -1)
|
||||
require.Len(t, listUploads.Uploads, 1)
|
||||
// If upload-id-marker is specified, any multipart uploads for a key equal to the key-marker might also be included,
|
||||
// provided those multipart uploads have upload IDs lexicographically greater than the specified upload-id-marker.
|
||||
require.Equal(t, uploadInfo3.UploadID, listUploads.Uploads[0].UploadID)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestMultipartUploadSize(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-test-multipart-size", "object-multipart"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
partSize := layer.UploadMinSize
|
||||
objLen := 2 * partSize
|
||||
headers := map[string]string{}
|
||||
|
||||
data := multipartUpload(hc, bktName, objName, headers, objLen, partSize)
|
||||
require.Equal(t, objLen, len(data))
|
||||
|
||||
t.Run("check correct size in list v1", func(t *testing.T) {
|
||||
listV1 := listObjectsV1(hc, bktName, "", "", "", -1)
|
||||
require.Len(t, listV1.Contents, 1)
|
||||
require.Equal(t, objLen, int(listV1.Contents[0].Size))
|
||||
require.Equal(t, objName, listV1.Contents[0].Key)
|
||||
})
|
||||
|
||||
t.Run("check correct size in list v2", func(t *testing.T) {
|
||||
listV2 := listObjectsV2(hc, bktName, "", "", "", "", -1)
|
||||
require.Len(t, listV2.Contents, 1)
|
||||
require.Equal(t, objLen, int(listV2.Contents[0].Size))
|
||||
require.Equal(t, objName, listV2.Contents[0].Key)
|
||||
})
|
||||
|
||||
t.Run("check correct get", func(t *testing.T) {
|
||||
_, hdr := getObject(hc, bktName, objName)
|
||||
require.Equal(t, strconv.Itoa(objLen), hdr.Get(api.ContentLength))
|
||||
|
||||
part := getObjectRange(t, hc, bktName, objName, partSize, objLen-1)
|
||||
equalDataSlices(t, data[partSize:], part)
|
||||
})
|
||||
|
||||
t.Run("check correct size when part copy", func(t *testing.T) {
|
||||
objName2 := "obj2"
|
||||
uploadInfo := createMultipartUpload(hc, bktName, objName2, headers)
|
||||
sourceCopy := bktName + "/" + objName
|
||||
uploadPartCopy(hc, bktName, objName2, uploadInfo.UploadID, 1, sourceCopy, 0, 0)
|
||||
uploadPartCopy(hc, bktName, objName2, uploadInfo.UploadID, 2, sourceCopy, 0, partSize)
|
||||
})
|
||||
}
|
||||
|
||||
func TestListParts(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-test-list-parts", "object-multipart"
|
||||
_ = createTestBucket(hc, bktName)
|
||||
partSize := 5 * 1024 * 1024
|
||||
|
||||
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||
etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
|
||||
etag2, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSize)
|
||||
|
||||
list := listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
|
||||
require.Len(t, list.Parts, 2)
|
||||
require.Equal(t, etag1, list.Parts[0].ETag)
|
||||
require.Equal(t, etag2, list.Parts[1].ETag)
|
||||
|
||||
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "1", http.StatusOK)
|
||||
require.Len(t, list.Parts, 1)
|
||||
require.Equal(t, etag2, list.Parts[0].ETag)
|
||||
|
||||
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "2", http.StatusOK)
|
||||
require.Len(t, list.Parts, 0)
|
||||
|
||||
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "7", http.StatusOK)
|
||||
require.Len(t, list.Parts, 0)
|
||||
|
||||
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "-1", http.StatusInternalServerError)
|
||||
require.Len(t, list.Parts, 0)
|
||||
}
|
||||
|
||||
func uploadPartCopy(hc *handlerContext, bktName, objName, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse {
|
||||
return uploadPartCopyBase(hc, bktName, objName, false, uploadID, num, srcObj, start, end)
|
||||
}
|
||||
|
||||
func uploadPartCopyBase(hc *handlerContext, bktName, objName string, encrypted bool, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse {
|
||||
query := make(url.Values)
|
||||
query.Set(uploadIDQuery, uploadID)
|
||||
query.Set(partNumberQuery, strconv.Itoa(num))
|
||||
|
||||
w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, nil)
|
||||
if encrypted {
|
||||
setEncryptHeaders(r)
|
||||
}
|
||||
r.Header.Set(api.AmzCopySource, srcObj)
|
||||
if start+end > 0 {
|
||||
r.Header.Set(api.AmzCopySourceRange, fmt.Sprintf("bytes=%d-%d", start, end))
|
||||
}
|
||||
|
||||
hc.Handler().UploadPartCopy(w, r)
|
||||
uploadPartCopyResponse := &UploadPartCopyResponse{}
|
||||
readResponse(hc.t, w, http.StatusOK, uploadPartCopyResponse)
|
||||
|
||||
return uploadPartCopyResponse
|
||||
}
|
||||
|
||||
func listAllMultipartUploads(hc *handlerContext, bktName string) *ListMultipartUploadsResponse {
|
||||
return listMultipartUploadsBase(hc, bktName, "", "", "", "", -1)
|
||||
}
|
||||
|
||||
func listMultipartUploadsBase(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker string, maxUploads int) *ListMultipartUploadsResponse {
|
||||
query := make(url.Values)
|
||||
query.Set(prefixQueryName, prefix)
|
||||
query.Set(delimiterQueryName, delimiter)
|
||||
query.Set(uploadIDMarkerQueryName, uploadIDMarker)
|
||||
query.Set(keyMarkerQueryName, keyMarker)
|
||||
if maxUploads != -1 {
|
||||
query.Set(maxUploadsQueryName, strconv.Itoa(maxUploads))
|
||||
}
|
||||
|
||||
w, r := prepareTestRequestWithQuery(hc, bktName, "", query, nil)
|
||||
|
||||
hc.Handler().ListMultipartUploadsHandler(w, r)
|
||||
listPartsResponse := &ListMultipartUploadsResponse{}
|
||||
readResponse(hc.t, w, http.StatusOK, listPartsResponse)
|
||||
|
||||
return listPartsResponse
|
||||
}
|
||||
|
||||
func listParts(hc *handlerContext, bktName, objName string, uploadID, partNumberMarker string, status int) *ListPartsResponse {
|
||||
return listPartsBase(hc, bktName, objName, false, uploadID, partNumberMarker, status)
|
||||
}
|
||||
|
||||
func listPartsBase(hc *handlerContext, bktName, objName string, encrypted bool, uploadID, partNumberMarker string, status int) *ListPartsResponse {
|
||||
query := make(url.Values)
|
||||
query.Set(uploadIDQuery, uploadID)
|
||||
query.Set(partNumberMarkerQuery, partNumberMarker)
|
||||
|
||||
w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, nil)
|
||||
if encrypted {
|
||||
setEncryptHeaders(r)
|
||||
}
|
||||
|
||||
hc.Handler().ListPartsHandler(w, r)
|
||||
listPartsResponse := &ListPartsResponse{}
|
||||
readResponse(hc.t, w, status, listPartsResponse)
|
||||
|
||||
return listPartsResponse
|
||||
}
|
|
@ -3,18 +3,18 @@ package handler
|
|||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
)
|
||||
|
||||
func (h *handler) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not supported", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not supported", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not supported", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
}
|
||||
|
|
|
@ -8,11 +8,12 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
|
@ -21,7 +22,7 @@ type (
|
|||
Event string
|
||||
NotificationInfo *data.NotificationInfo
|
||||
BktInfo *data.BucketInfo
|
||||
ReqInfo *api.ReqInfo
|
||||
ReqInfo *middleware.ReqInfo
|
||||
User string
|
||||
Time time.Time
|
||||
}
|
||||
|
@ -96,7 +97,7 @@ var validEvents = map[string]struct{}{
|
|||
}
|
||||
|
||||
func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
|
@ -118,7 +119,12 @@ func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Re
|
|||
RequestInfo: reqInfo,
|
||||
BktInfo: bktInfo,
|
||||
Configuration: conf,
|
||||
CopiesNumber: h.cfg.CopiesNumber,
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketNotificationConfiguration(r.Context(), p); err != nil {
|
||||
|
@ -128,7 +134,7 @@ func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
|
||||
func (h *handler) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -142,14 +148,14 @@ func (h *handler) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, conf); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, conf); err != nil {
|
||||
h.logAndSendError(w, "could not encode bucket notification configuration to response", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) sendNotifications(ctx context.Context, p *SendNotificationParams) error {
|
||||
if !h.cfg.NotificatorEnabled {
|
||||
if !h.cfg.NotificatorEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -161,7 +167,7 @@ func (h *handler) sendNotifications(ctx context.Context, p *SendNotificationPara
|
|||
return nil
|
||||
}
|
||||
|
||||
box, err := layer.GetBoxData(ctx)
|
||||
box, err := middleware.GetBoxData(ctx)
|
||||
if err == nil && box.Gate.BearerToken != nil {
|
||||
p.User = bearer.ResolveIssuer(*box.Gate.BearerToken).EncodeToString()
|
||||
}
|
||||
|
@ -174,7 +180,7 @@ func (h *handler) sendNotifications(ctx context.Context, p *SendNotificationPara
|
|||
}
|
||||
|
||||
// checkBucketConfiguration checks notification configuration and generates an ID for configurations with empty ids.
|
||||
func (h *handler) checkBucketConfiguration(ctx context.Context, conf *data.NotificationConfiguration, r *api.ReqInfo) (completed bool, err error) {
|
||||
func (h *handler) checkBucketConfiguration(ctx context.Context, conf *data.NotificationConfiguration, r *middleware.ReqInfo) (completed bool, err error) {
|
||||
if conf == nil {
|
||||
return
|
||||
}
|
||||
|
@ -192,12 +198,12 @@ func (h *handler) checkBucketConfiguration(ctx context.Context, conf *data.Notif
|
|||
return
|
||||
}
|
||||
|
||||
if h.cfg.NotificatorEnabled {
|
||||
if h.cfg.NotificatorEnabled() {
|
||||
if err = h.notificator.SendTestNotification(q.QueueArn, r.BucketName, r.RequestID, r.Host, layer.TimeNow(ctx)); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
h.log.Warn("failed to send test event because notifications is disabled")
|
||||
h.reqLogger(ctx).Warn(logs.FailedToSendTestEventBecauseNotificationsIsDisabled)
|
||||
}
|
||||
|
||||
if q.ID == "" {
|
||||
|
|
|
@ -3,8 +3,8 @@ package handler
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
|
|
@ -6,16 +6,16 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
// ListObjectsV1Handler handles objects listing requests for API version 1.
|
||||
func (h *handler) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
params, err := parseListObjectsArgsV1(reqInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "failed to parse arguments", reqInfo, err)
|
||||
|
@ -33,7 +33,7 @@ func (h *handler) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, encodeV1(params, list)); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, encodeV1(params, list)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ func encodeV1(p *layer.ListObjectsParamsV1, list *layer.ListObjectsInfoV1) *List
|
|||
|
||||
// ListObjectsV2Handler handles objects listing requests for API version 2.
|
||||
func (h *handler) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
params, err := parseListObjectsArgsV2(reqInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "failed to parse arguments", reqInfo, err)
|
||||
|
@ -77,7 +77,7 @@ func (h *handler) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, encodeV2(params, list)); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, encodeV2(params, list)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ func encodeV2(p *layer.ListObjectsParamsV2, list *layer.ListObjectsInfoV2) *List
|
|||
return res
|
||||
}
|
||||
|
||||
func parseListObjectsArgsV1(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsV1, error) {
|
||||
func parseListObjectsArgsV1(reqInfo *middleware.ReqInfo) (*layer.ListObjectsParamsV1, error) {
|
||||
var (
|
||||
res layer.ListObjectsParamsV1
|
||||
queryValues = reqInfo.URL.Query()
|
||||
|
@ -120,7 +120,7 @@ func parseListObjectsArgsV1(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsV1, e
|
|||
return &res, nil
|
||||
}
|
||||
|
||||
func parseListObjectsArgsV2(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsV2, error) {
|
||||
func parseListObjectsArgsV2(reqInfo *middleware.ReqInfo) (*layer.ListObjectsParamsV2, error) {
|
||||
var (
|
||||
res layer.ListObjectsParamsV2
|
||||
queryValues = reqInfo.URL.Query()
|
||||
|
@ -142,7 +142,7 @@ func parseListObjectsArgsV2(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsV2, e
|
|||
return &res, nil
|
||||
}
|
||||
|
||||
func parseListObjectArgs(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsCommon, error) {
|
||||
func parseListObjectArgs(reqInfo *middleware.ReqInfo) (*layer.ListObjectsParamsCommon, error) {
|
||||
var (
|
||||
err error
|
||||
res layer.ListObjectsParamsCommon
|
||||
|
@ -198,6 +198,10 @@ func fillContents(src []*data.ObjectInfo, encode string, fetchOwner bool) []Obje
|
|||
ETag: obj.HashSum,
|
||||
}
|
||||
|
||||
if size, err := layer.GetObjectSize(obj); err == nil {
|
||||
res.Size = size
|
||||
}
|
||||
|
||||
if fetchOwner {
|
||||
res.Owner = &Owner{
|
||||
ID: obj.Owner.String(),
|
||||
|
@ -211,7 +215,7 @@ func fillContents(src []*data.ObjectInfo, encode string, fetchOwner bool) []Obje
|
|||
}
|
||||
|
||||
func (h *handler) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
p, err := parseListObjectVersionsRequest(reqInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "failed to parse request", reqInfo, err)
|
||||
|
@ -230,12 +234,12 @@ func (h *handler) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http
|
|||
}
|
||||
|
||||
response := encodeListObjectVersionsToResponse(info, p.BktInfo.Name)
|
||||
if err = api.EncodeToResponse(w, response); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func parseListObjectVersionsRequest(reqInfo *api.ReqInfo) (*layer.ListObjectVersionsParams, error) {
|
||||
func parseListObjectVersionsRequest(reqInfo *middleware.ReqInfo) (*layer.ListObjectVersionsParams, error) {
|
||||
var (
|
||||
err error
|
||||
res layer.ListObjectVersionsParams
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -67,11 +67,11 @@ func TestS3CompatibilityBucketListV2BothContinuationTokenStartAfter(t *testing.T
|
|||
createTestObject(tc, bktInfo, objName)
|
||||
}
|
||||
|
||||
listV2Response1 := listObjectsV2(t, tc, bktName, "", "", "bar", "", 1)
|
||||
listV2Response1 := listObjectsV2(tc, bktName, "", "", "bar", "", 1)
|
||||
nextContinuationToken := listV2Response1.NextContinuationToken
|
||||
require.Equal(t, "baz", listV2Response1.Contents[0].Key)
|
||||
|
||||
listV2Response2 := listObjectsV2(t, tc, bktName, "", "", "bar", nextContinuationToken, -1)
|
||||
listV2Response2 := listObjectsV2(tc, bktName, "", "", "bar", nextContinuationToken, -1)
|
||||
|
||||
require.Equal(t, nextContinuationToken, listV2Response2.ContinuationToken)
|
||||
require.Equal(t, "bar", listV2Response2.StartAfter)
|
||||
|
@ -92,7 +92,7 @@ func TestS3BucketListDelimiterBasic(t *testing.T) {
|
|||
createTestObject(tc, bktInfo, objName)
|
||||
}
|
||||
|
||||
listV1Response := listObjectsV1(t, tc, bktName, "", "/", "", -1)
|
||||
listV1Response := listObjectsV1(tc, bktName, "", "/", "", -1)
|
||||
require.Equal(t, "/", listV1Response.Delimiter)
|
||||
require.Equal(t, "asdf", listV1Response.Contents[0].Key)
|
||||
require.Len(t, listV1Response.CommonPrefixes, 2)
|
||||
|
@ -100,6 +100,26 @@ func TestS3BucketListDelimiterBasic(t *testing.T) {
|
|||
require.Equal(t, "quux/", listV1Response.CommonPrefixes[1].Prefix)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2DelimiterPercentage(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
objects := []string{"b%ar", "b%az", "c%ab", "foo"}
|
||||
bktInfo, _ := createBucketAndObject(tc, bktName, objects[0])
|
||||
|
||||
for _, objName := range objects[1:] {
|
||||
createTestObject(tc, bktInfo, objName)
|
||||
}
|
||||
|
||||
listV2Response := listObjectsV2(tc, bktName, "", "%", "", "", -1)
|
||||
require.Equal(t, "%", listV2Response.Delimiter)
|
||||
require.Len(t, listV2Response.Contents, 1)
|
||||
require.Equal(t, "foo", listV2Response.Contents[0].Key)
|
||||
require.Len(t, listV2Response.CommonPrefixes, 2)
|
||||
require.Equal(t, "b%", listV2Response.CommonPrefixes[0].Prefix)
|
||||
require.Equal(t, "c%", listV2Response.CommonPrefixes[1].Prefix)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2DelimiterPrefix(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -129,7 +149,7 @@ func TestS3BucketListV2DelimiterPrefix(t *testing.T) {
|
|||
validateListV2(t, tc, bktName, prefix, delim, "", 2, false, true, []string{"boo/bar"}, []string{"boo/baz/"})
|
||||
}
|
||||
|
||||
func listObjectsV2(t *testing.T, tc *handlerContext, bktName, prefix, delimiter, startAfter, continuationToken string, maxKeys int) *ListObjectsV2Response {
|
||||
func listObjectsV2(hc *handlerContext, bktName, prefix, delimiter, startAfter, continuationToken string, maxKeys int) *ListObjectsV2Response {
|
||||
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
|
||||
if len(startAfter) != 0 {
|
||||
query.Add("start-after", startAfter)
|
||||
|
@ -138,17 +158,17 @@ func listObjectsV2(t *testing.T, tc *handlerContext, bktName, prefix, delimiter,
|
|||
query.Add("continuation-token", continuationToken)
|
||||
}
|
||||
|
||||
w, r := prepareTestFullRequest(tc, bktName, "", query, nil)
|
||||
tc.Handler().ListObjectsV2Handler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
|
||||
hc.Handler().ListObjectsV2Handler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
res := &ListObjectsV2Response{}
|
||||
parseTestResponse(t, w, res)
|
||||
parseTestResponse(hc.t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func validateListV2(t *testing.T, tc *handlerContext, bktName, prefix, delimiter, continuationToken string, maxKeys int,
|
||||
isTruncated, last bool, checkObjects, checkPrefixes []string) string {
|
||||
response := listObjectsV2(t, tc, bktName, prefix, delimiter, "", continuationToken, maxKeys)
|
||||
response := listObjectsV2(tc, bktName, prefix, delimiter, "", continuationToken, maxKeys)
|
||||
|
||||
require.Equal(t, isTruncated, response.IsTruncated)
|
||||
require.Equal(t, last, len(response.NextContinuationToken) == 0)
|
||||
|
@ -182,16 +202,16 @@ func prepareCommonListObjectsQuery(prefix, delimiter string, maxKeys int) url.Va
|
|||
return query
|
||||
}
|
||||
|
||||
func listObjectsV1(t *testing.T, tc *handlerContext, bktName, prefix, delimiter, marker string, maxKeys int) *ListObjectsV1Response {
|
||||
func listObjectsV1(hc *handlerContext, bktName, prefix, delimiter, marker string, maxKeys int) *ListObjectsV1Response {
|
||||
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
|
||||
if len(marker) != 0 {
|
||||
query.Add("marker", marker)
|
||||
}
|
||||
|
||||
w, r := prepareTestFullRequest(tc, bktName, "", query, nil)
|
||||
tc.Handler().ListObjectsV1Handler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
|
||||
hc.Handler().ListObjectsV1Handler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
res := &ListObjectsV1Response{}
|
||||
parseTestResponse(t, w, res)
|
||||
parseTestResponse(hc.t, w, res)
|
||||
return res
|
||||
}
|
||||
|
|
|
@ -16,15 +16,17 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -43,13 +45,13 @@ func (p *postPolicy) condition(key string) *policyCondition {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *postPolicy) CheckContentLength(size int64) bool {
|
||||
func (p *postPolicy) CheckContentLength(size uint64) bool {
|
||||
if p.empty {
|
||||
return true
|
||||
}
|
||||
for _, condition := range p.Conditions {
|
||||
if condition.Matching == "content-length-range" {
|
||||
length := strconv.FormatInt(size, 10)
|
||||
length := strconv.FormatUint(size, 10)
|
||||
return condition.Key <= length && length <= condition.Value
|
||||
}
|
||||
}
|
||||
|
@ -179,7 +181,8 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
newEaclTable *eacl.Table
|
||||
sessionTokenEACL *session.Container
|
||||
containsACL = containsACLHeaders(r)
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
ctx = r.Context()
|
||||
reqInfo = middleware.GetReqInfo(ctx)
|
||||
)
|
||||
|
||||
if containsACL {
|
||||
|
@ -212,44 +215,57 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
metadata[api.Expires] = expires
|
||||
}
|
||||
|
||||
copiesNumber, err := getCopiesNumberOrDefault(metadata, h.cfg.CopiesNumber)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
encryptionParams, err := formEncryptionParams(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := h.getBodyReader(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "failed to get body reader", reqInfo, err)
|
||||
return
|
||||
}
|
||||
if encodings := r.Header.Get(api.ContentEncoding); len(encodings) > 0 {
|
||||
metadata[api.ContentEncoding] = encodings
|
||||
}
|
||||
|
||||
var size uint64
|
||||
if r.ContentLength > 0 {
|
||||
size = uint64(r.ContentLength)
|
||||
}
|
||||
|
||||
params := &layer.PutObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Object: reqInfo.ObjectName,
|
||||
Reader: r.Body,
|
||||
Size: r.ContentLength,
|
||||
Reader: body,
|
||||
Size: size,
|
||||
Header: metadata,
|
||||
Encryption: encryptionParams,
|
||||
CopiesNumber: copiesNumber,
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
params.Lock, err = formObjectLock(r.Context(), bktInfo, settings.LockConfiguration, r.Header)
|
||||
params.Lock, err = formObjectLock(ctx, bktInfo, settings.LockConfiguration, r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not form object lock", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
extendedObjInfo, err := h.obj.PutObject(r.Context(), params)
|
||||
extendedObjInfo, err := h.obj.PutObject(ctx, params)
|
||||
if err != nil {
|
||||
_, err2 := io.Copy(io.Discard, r.Body)
|
||||
err3 := r.Body.Close()
|
||||
_, err2 := io.Copy(io.Discard, body)
|
||||
err3 := body.Close()
|
||||
h.logAndSendError(w, "could not upload object", reqInfo, err, zap.Errors("body close errors", []error{err2, err3}))
|
||||
return
|
||||
}
|
||||
|
@ -261,8 +277,8 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
||||
}
|
||||
|
||||
if containsACL {
|
||||
|
@ -309,21 +325,49 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
w.Header().Set(api.ETag, objInfo.HashSum)
|
||||
api.WriteSuccessResponseHeadersOnly(w)
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func getCopiesNumberOrDefault(metadata map[string]string, defaultCopiesNumber uint32) (uint32, error) {
|
||||
copiesNumberStr, ok := metadata[layer.AttributeFrostfsCopiesNumber]
|
||||
if !ok {
|
||||
return defaultCopiesNumber, nil
|
||||
func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) {
|
||||
if !api.IsSignedStreamingV4(r) {
|
||||
return r.Body, nil
|
||||
}
|
||||
|
||||
copiesNumber, err := strconv.ParseUint(copiesNumberStr, 10, 32)
|
||||
encodings := r.Header.Values(api.ContentEncoding)
|
||||
var chunkedEncoding bool
|
||||
resultContentEncoding := make([]string, 0, len(encodings))
|
||||
for _, enc := range encodings {
|
||||
for _, e := range strings.Split(enc, ",") {
|
||||
e = strings.TrimSpace(e)
|
||||
if e == api.AwsChunked { // probably we should also check position of this header value
|
||||
chunkedEncoding = true
|
||||
} else {
|
||||
resultContentEncoding = append(resultContentEncoding, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
r.Header.Set(api.ContentEncoding, strings.Join(resultContentEncoding, ","))
|
||||
|
||||
if !chunkedEncoding && !h.cfg.BypassContentEncodingInChunks() {
|
||||
return nil, fmt.Errorf("%w: request is not chunk encoded, encodings '%s'",
|
||||
errors.GetAPIError(errors.ErrInvalidEncodingMethod), strings.Join(encodings, ","))
|
||||
}
|
||||
|
||||
decodeContentSize := r.Header.Get(api.AmzDecodedContentLength)
|
||||
if len(decodeContentSize) == 0 {
|
||||
return nil, errors.GetAPIError(errors.ErrMissingContentLength)
|
||||
}
|
||||
|
||||
if _, err := strconv.Atoi(decodeContentSize); err != nil {
|
||||
return nil, fmt.Errorf("%w: parse decoded content length: %s", errors.GetAPIError(errors.ErrMissingContentLength), err.Error())
|
||||
}
|
||||
|
||||
chunkReader, err := newSignV4ChunkedReader(r)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("pasrse copies number: %w", err)
|
||||
return nil, fmt.Errorf("initialize chunk reader: %w", err)
|
||||
}
|
||||
|
||||
return uint32(copiesNumber), nil
|
||||
return chunkReader, nil
|
||||
}
|
||||
|
||||
func formEncryptionParams(r *http.Request) (enc encryption.Params, err error) {
|
||||
|
@ -375,7 +419,8 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
newEaclTable *eacl.Table
|
||||
tagSet map[string]string
|
||||
sessionTokenEACL *session.Container
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
ctx = r.Context()
|
||||
reqInfo = middleware.GetReqInfo(ctx)
|
||||
metadata = make(map[string]string)
|
||||
containsACL = containsACLHeaders(r)
|
||||
)
|
||||
|
@ -396,17 +441,17 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if containsACL {
|
||||
if sessionTokenEACL, err = getSessionTokenSetEACL(r.Context()); err != nil {
|
||||
if sessionTokenEACL, err = getSessionTokenSetEACL(ctx); err != nil {
|
||||
h.logAndSendError(w, "could not get eacl session token from a box", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var contentReader io.Reader
|
||||
var size int64
|
||||
var size uint64
|
||||
if content, ok := r.MultipartForm.Value["file"]; ok {
|
||||
contentReader = bytes.NewBufferString(content[0])
|
||||
size = int64(len(content[0]))
|
||||
size = uint64(len(content[0]))
|
||||
} else {
|
||||
file, head, err := r.FormFile("file")
|
||||
if err != nil {
|
||||
|
@ -414,7 +459,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
contentReader = file
|
||||
size = head.Size
|
||||
size = uint64(head.Size)
|
||||
reqInfo.ObjectName = strings.ReplaceAll(reqInfo.ObjectName, "${filename}", head.Filename)
|
||||
}
|
||||
if !policy.CheckContentLength(size) {
|
||||
|
@ -422,7 +467,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
|
||||
bktInfo, err := h.obj.GetBucketInfo(ctx, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
|
@ -436,7 +481,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
Header: metadata,
|
||||
}
|
||||
|
||||
extendedObjInfo, err := h.obj.PutObject(r.Context(), params)
|
||||
extendedObjInfo, err := h.obj.PutObject(ctx, params)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not upload object", reqInfo, err)
|
||||
return
|
||||
|
@ -449,8 +494,8 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
||||
}
|
||||
|
||||
if acl := auth.MultipartFormValue(r, "acl"); acl != "" {
|
||||
|
@ -475,7 +520,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
NodeVersion: extendedObjInfo.NodeVersion,
|
||||
}
|
||||
|
||||
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
|
||||
if _, err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
||||
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -488,14 +533,14 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
SessionToken: sessionTokenEACL,
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketACL(r.Context(), p); err != nil {
|
||||
if err = h.obj.PutBucketACL(ctx, p); err != nil {
|
||||
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo); err != nil {
|
||||
h.log.Warn("couldn't get bucket versioning", zap.String("bucket name", reqInfo.BucketName), zap.Error(err))
|
||||
if settings, err := h.obj.GetBucketSettings(ctx, bktInfo); err != nil {
|
||||
h.reqLogger(ctx).Warn(logs.CouldntGetBucketVersioning, zap.String("bucket name", reqInfo.BucketName), zap.Error(err))
|
||||
} else if settings.VersioningEnabled() {
|
||||
w.Header().Set(api.AmzVersionID, objInfo.VersionID())
|
||||
}
|
||||
|
@ -517,7 +562,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
ETag: objInfo.HashSum,
|
||||
}
|
||||
w.WriteHeader(status)
|
||||
if _, err = w.Write(api.EncodeResponse(resp)); err != nil {
|
||||
if _, err = w.Write(middleware.EncodeResponse(resp)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
return
|
||||
|
@ -528,7 +573,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
w.WriteHeader(status)
|
||||
}
|
||||
|
||||
func checkPostPolicy(r *http.Request, reqInfo *api.ReqInfo, metadata map[string]string) (*postPolicy, error) {
|
||||
func checkPostPolicy(r *http.Request, reqInfo *middleware.ReqInfo, metadata map[string]string) (*postPolicy, error) {
|
||||
policy := &postPolicy{empty: true}
|
||||
if policyStr := auth.MultipartFormValue(r, "policy"); policyStr != "" {
|
||||
policyData, err := base64.StdEncoding.DecodeString(policyStr)
|
||||
|
@ -667,7 +712,8 @@ func parseMetadata(r *http.Request) map[string]string {
|
|||
}
|
||||
|
||||
func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
p := &layer.CreateBucketParams{
|
||||
Name: reqInfo.BucketName,
|
||||
}
|
||||
|
@ -677,7 +723,7 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
||||
key, err := h.bearerTokenIssuerKey(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get bearer token signature key", reqInfo, err)
|
||||
return
|
||||
|
@ -703,7 +749,7 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
var policies []*accessbox.ContainerPolicy
|
||||
boxData, err := layer.GetBoxData(r.Context())
|
||||
boxData, err := middleware.GetBoxData(ctx)
|
||||
if err == nil {
|
||||
policies = boxData.Policies
|
||||
p.SessionContainerCreation = boxData.Gate.SessionTokenForPut()
|
||||
|
@ -727,32 +773,31 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
p.ObjectLockEnabled = isLockEnabled(r.Header)
|
||||
|
||||
bktInfo, err := h.obj.CreateBucket(r.Context(), p)
|
||||
bktInfo, err := h.obj.CreateBucket(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not create bucket", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
h.log.Info("bucket is created", zap.String("reqId", reqInfo.RequestID),
|
||||
zap.String("bucket", reqInfo.BucketName), zap.Stringer("container_id", bktInfo.CID))
|
||||
h.reqLogger(ctx).Info(logs.BucketIsCreated, zap.Stringer("container_id", bktInfo.CID))
|
||||
|
||||
if p.ObjectLockEnabled {
|
||||
sp := &layer.PutSettingsParams{
|
||||
BktInfo: bktInfo,
|
||||
Settings: &data.BucketSettings{Versioning: data.VersioningEnabled},
|
||||
}
|
||||
if err = h.obj.PutBucketSettings(r.Context(), sp); err != nil {
|
||||
if err = h.obj.PutBucketSettings(ctx, sp); err != nil {
|
||||
h.logAndSendError(w, "couldn't enable bucket versioning", reqInfo, err,
|
||||
zap.String("container_id", bktInfo.CID.EncodeToString()))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
api.WriteSuccessResponseHeadersOnly(w)
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func (h handler) setPolicy(prm *layer.CreateBucketParams, locationConstraint string, userPolicies []*accessbox.ContainerPolicy) error {
|
||||
prm.Policy = h.cfg.Policy.Default()
|
||||
prm.Policy = h.cfg.DefaultPlacementPolicy()
|
||||
prm.LocationConstraint = locationConstraint
|
||||
|
||||
if locationConstraint == "" {
|
||||
|
@ -766,7 +811,7 @@ func (h handler) setPolicy(prm *layer.CreateBucketParams, locationConstraint str
|
|||
}
|
||||
}
|
||||
|
||||
if policy, ok := h.cfg.Policy.Get(locationConstraint); ok {
|
||||
if policy, ok := h.cfg.PlacementPolicy(locationConstraint); ok {
|
||||
prm.Policy = policy
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,15 +1,29 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -100,7 +114,7 @@ func TestEmptyPostPolicy(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
reqInfo := &api.ReqInfo{}
|
||||
reqInfo := &middleware.ReqInfo{}
|
||||
metadata := make(map[string]string)
|
||||
|
||||
_, err := checkPostPolicy(r, reqInfo, metadata)
|
||||
|
@ -126,3 +140,205 @@ func TestPutObjectOverrideCopiesNumber(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.Equal(t, "1", objInfo.Headers[layer.AttributeFrostfsCopiesNumber])
|
||||
}
|
||||
|
||||
func TestPutObjectWithNegativeContentLength(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-put", "object-for-put"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
content := []byte("content")
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
r.ContentLength = -1
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, r = prepareTestRequest(tc, bktName, objName, nil)
|
||||
tc.Handler().HeadObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
require.Equal(t, strconv.Itoa(len(content)), w.Header().Get(api.ContentLength))
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamBodyError(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-put", "object-for-put"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
content := []byte("content")
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
r.Header.Set(api.AmzContentSha256, api.StreamingContentSHA256)
|
||||
r.Header.Set(api.ContentEncoding, api.AwsChunked)
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrMissingContentLength))
|
||||
|
||||
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
||||
}
|
||||
|
||||
func TestPutObjectWithWrapReaderDiscardOnError(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-put", "object-for-put"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
content := make([]byte, 128*1024)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(t, err)
|
||||
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
tc.tp.SetObjectPutError(objName, errors.New("some error"))
|
||||
numGoroutineBefore := runtime.NumGoroutine()
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
numGoroutineAfter := runtime.NumGoroutine()
|
||||
require.Equal(t, numGoroutineBefore, numGoroutineAfter, "goroutines shouldn't leak during put object")
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamBodyAWSExample(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "examplebucket", "chunkObject.txt"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
w, req, chunk := getChunkedRequest(hc.context, t, bktName, objName)
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
data := getObjectRange(t, hc, bktName, objName, 0, 66824)
|
||||
for i := range chunk {
|
||||
require.Equal(t, chunk[i], data[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutChunkedTestContentEncoding(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "examplebucket", "chunkObject.txt"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
w, req, _ := getChunkedRequest(hc.context, t, bktName, objName)
|
||||
req.Header.Set(api.ContentEncoding, api.AwsChunked+",gzip")
|
||||
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
resp := headObjectBase(hc, bktName, objName, emptyVersion)
|
||||
require.Equal(t, "gzip", resp.Header().Get(api.ContentEncoding))
|
||||
|
||||
w, req, _ = getChunkedRequest(hc.context, t, bktName, objName)
|
||||
req.Header.Set(api.ContentEncoding, "gzip")
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrInvalidEncodingMethod))
|
||||
|
||||
hc.config.bypassContentEncodingInChunks = true
|
||||
w, req, _ = getChunkedRequest(hc.context, t, bktName, objName)
|
||||
req.Header.Set(api.ContentEncoding, "gzip")
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
resp = headObjectBase(hc, bktName, objName, emptyVersion)
|
||||
require.Equal(t, "gzip", resp.Header().Get(api.ContentEncoding))
|
||||
}
|
||||
|
||||
func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) {
|
||||
chunk := make([]byte, 65*1024)
|
||||
for i := range chunk {
|
||||
chunk[i] = 'a'
|
||||
}
|
||||
chunk1 := chunk[:64*1024]
|
||||
chunk2 := chunk[64*1024:]
|
||||
|
||||
AWSAccessKeyID := "AKIAIOSFODNN7EXAMPLE"
|
||||
AWSSecretAccessKey := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||
|
||||
awsCreds := credentials.NewStaticCredentials(AWSAccessKeyID, AWSSecretAccessKey, "")
|
||||
signer := v4.NewSigner(awsCreds)
|
||||
|
||||
reqBody := bytes.NewBufferString("10000;chunk-signature=ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648\r\n")
|
||||
_, err := reqBody.Write(chunk1)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n400;chunk-signature=0055627c9e194cb4542bae2aa5492e3c1575bbb81b612b7d234b86a503ef5497\r\n")
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.Write(chunk2)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0;chunk-signature=b6c6ea8a5354eaf15b3cb7646744f4275b71ea724fed81ceb9323e279d449df9\r\n\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("PUT", "https://s3.amazonaws.com/"+bktName+"/"+objName, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("content-encoding", "aws-chunked")
|
||||
req.Header.Set("content-length", "66824")
|
||||
req.Header.Set("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
|
||||
req.Header.Set("x-amz-decoded-content-length", "66560")
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
|
||||
signTime, err := time.Parse("20060102T150405Z", "20130524T000000Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = signer.Sign(req, nil, "s3", "us-east-1", signTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Body = io.NopCloser(reqBody)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName})
|
||||
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
req = req.WithContext(middleware.SetClientTime(req.Context(), signTime))
|
||||
req = req.WithContext(middleware.SetAuthHeaders(req.Context(), &middleware.AuthHeader{
|
||||
AccessKeyID: AWSAccessKeyID,
|
||||
SignatureV4: "4f232c4386841ef735655705268965c44a0e4690baa4adea153f7db9fa80a0a9",
|
||||
Region: "us-east-1",
|
||||
}))
|
||||
req = req.WithContext(middleware.SetBoxData(req.Context(), &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
AccessKey: AWSSecretAccessKey,
|
||||
},
|
||||
}))
|
||||
|
||||
return w, req, chunk
|
||||
}
|
||||
|
||||
func TestCreateBucket(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName := "bkt-name"
|
||||
|
||||
box, _ := createAccessBox(t)
|
||||
createBucket(t, hc, bktName, box)
|
||||
createBucketAssertS3Error(hc, bktName, box, s3errors.ErrBucketAlreadyOwnedByYou)
|
||||
|
||||
box2, _ := createAccessBox(t)
|
||||
createBucketAssertS3Error(hc, bktName, box2, s3errors.ErrBucketAlreadyExists)
|
||||
}
|
||||
|
||||
func TestPutObjectClientCut(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName, objName1, objName2 := "bkt-name", "obj-name1", "obj-name2"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
putObject(hc, bktName, objName1)
|
||||
obj1 := getObjectFromLayer(hc, objName1)[0]
|
||||
require.Empty(t, getObjectAttribute(obj1, "s3-client-cut"))
|
||||
|
||||
hc.layerFeatures.SetClientCut(true)
|
||||
putObject(hc, bktName, objName2)
|
||||
obj2 := getObjectFromLayer(hc, objName2)[0]
|
||||
require.Equal(t, "true", getObjectAttribute(obj2, "s3-client-cut"))
|
||||
}
|
||||
|
||||
func getObjectFromLayer(hc *handlerContext, objName string) []*object.Object {
|
||||
var res []*object.Object
|
||||
for _, o := range hc.tp.Objects() {
|
||||
if objName == getObjectAttribute(o, object.AttributeFilePath) {
|
||||
res = append(res, o)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func getObjectAttribute(obj *object.Object, attrName string) string {
|
||||
for _, attr := range obj.Attributes() {
|
||||
if attr.Key() == attrName {
|
||||
return attr.Value()
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ type Object struct {
|
|||
Key string
|
||||
LastModified string // time string of format "2006-01-02T15:04:05.000Z"
|
||||
ETag string `xml:"ETag,omitempty"`
|
||||
Size int64
|
||||
Size uint64
|
||||
|
||||
// Owner of the object.
|
||||
Owner *Owner `xml:"Owner,omitempty"`
|
||||
|
@ -120,7 +120,7 @@ type ObjectVersionResponse struct {
|
|||
Key string `xml:"Key"`
|
||||
LastModified string `xml:"LastModified"`
|
||||
Owner Owner `xml:"Owner"`
|
||||
Size int64 `xml:"Size"`
|
||||
Size uint64 `xml:"Size"`
|
||||
StorageClass string `xml:"StorageClass,omitempty"` // is empty!!
|
||||
VersionID string `xml:"VersionId"`
|
||||
}
|
||||
|
|
220
api/handler/s3reader.go
Normal file
220
api/handler/s3reader.go
Normal file
|
@ -0,0 +1,220 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
errs "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
)
|
||||
|
||||
const (
|
||||
chunkSignatureHeader = "chunk-signature="
|
||||
maxChunkSize = 16 << 20
|
||||
)
|
||||
|
||||
type (
|
||||
s3ChunkReader struct {
|
||||
reader *bufio.Reader
|
||||
streamSigner *v4.StreamSigner
|
||||
|
||||
requestTime time.Time
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
errGiantChunk = errors.New("chunk too big: choose chunk size <= 16MiB")
|
||||
errMalformedChunkedEncoding = errors.New("malformed chunked encoding")
|
||||
)
|
||||
|
||||
func (c *s3ChunkReader) Close() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3ChunkReader) Read(buf []byte) (num int, err error) {
|
||||
if c.offset > 0 {
|
||||
num = copy(buf, c.buffer[c.offset:])
|
||||
if num == len(buf) {
|
||||
c.offset += num
|
||||
return num, nil
|
||||
}
|
||||
c.offset = 0
|
||||
buf = buf[num:]
|
||||
}
|
||||
|
||||
var size int
|
||||
for {
|
||||
b, err := c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b == ';' { // separating character
|
||||
break
|
||||
}
|
||||
|
||||
// Manually deserialize the size since AWS specified
|
||||
// the chunk size to be of variable width. In particular,
|
||||
// a size of 16 is encoded as `10` while a size of 64 KB
|
||||
// is `10000`.
|
||||
switch {
|
||||
case b >= '0' && b <= '9':
|
||||
size = size<<4 | int(b-'0')
|
||||
case b >= 'a' && b <= 'f':
|
||||
size = size<<4 | int(b-('a'-10))
|
||||
case b >= 'A' && b <= 'F':
|
||||
size = size<<4 | int(b-('A'-10))
|
||||
default:
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
if size > maxChunkSize {
|
||||
c.err = errGiantChunk
|
||||
return num, c.err
|
||||
}
|
||||
}
|
||||
|
||||
// Now, we read the signature of the following payload and expect:
|
||||
// chunk-signature=" + <signature-as-hex> + "\r\n"
|
||||
//
|
||||
// The signature is 64 bytes long (hex-encoded SHA256 hash) and
|
||||
// starts with a 16 byte header: len("chunk-signature=") + 64 == 80.
|
||||
var signature [80]byte
|
||||
_, err = io.ReadFull(c.reader, signature[:])
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if !bytes.HasPrefix(signature[:], []byte(chunkSignatureHeader)) {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err := c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\r' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if cap(c.buffer) < size {
|
||||
c.buffer = make([]byte, size)
|
||||
} else {
|
||||
c.buffer = c.buffer[:size]
|
||||
}
|
||||
|
||||
// Now, we read the payload and compute its SHA-256 hash.
|
||||
_, err = io.ReadFull(c.reader, c.buffer)
|
||||
if err == io.EOF && size != 0 {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if b != '\r' || err != nil {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
// Once we have read the entire chunk successfully, we verify
|
||||
// that the received signature matches our computed signature.
|
||||
|
||||
calculatedSignature, err := c.streamSigner.GetSignature(nil, c.buffer, c.requestTime)
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if string(signature[16:]) != hex.EncodeToString(calculatedSignature) {
|
||||
c.err = errs.GetAPIError(errs.ErrSignatureDoesNotMatch)
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
// If the chunk size is zero we return io.EOF. As specified by AWS,
|
||||
// only the last chunk is zero-sized.
|
||||
if size == 0 {
|
||||
c.err = io.EOF
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
c.offset = copy(buf, c.buffer)
|
||||
num += c.offset
|
||||
return num, err
|
||||
}
|
||||
|
||||
func newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, error) {
|
||||
box, err := middleware.GetBoxData(req.Context())
|
||||
if err != nil {
|
||||
return nil, errs.GetAPIError(errs.ErrAuthorizationHeaderMalformed)
|
||||
}
|
||||
|
||||
authHeaders, err := middleware.GetAuthHeaders(req.Context())
|
||||
if err != nil {
|
||||
return nil, errs.GetAPIError(errs.ErrAuthorizationHeaderMalformed)
|
||||
}
|
||||
|
||||
currentCredentials := credentials.NewStaticCredentials(authHeaders.AccessKeyID, box.Gate.AccessKey, "")
|
||||
seed, err := hex.DecodeString(authHeaders.SignatureV4)
|
||||
if err != nil {
|
||||
return nil, errs.GetAPIError(errs.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
|
||||
reqTime, err := middleware.GetClientTime(req.Context())
|
||||
if err != nil {
|
||||
return nil, errs.GetAPIError(errs.ErrMalformedDate)
|
||||
}
|
||||
newStreamSigner := v4.NewStreamSigner(authHeaders.Region, "s3", seed, currentCredentials)
|
||||
|
||||
return &s3ChunkReader{
|
||||
reader: bufio.NewReader(req.Body),
|
||||
streamSigner: newStreamSigner,
|
||||
requestTime: reqTime,
|
||||
buffer: make([]byte, 64*1024),
|
||||
}, nil
|
||||
}
|
|
@ -8,10 +8,12 @@ import (
|
|||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -24,7 +26,8 @@ const (
|
|||
)
|
||||
|
||||
func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
tagSet, err := readTagSet(r.Body)
|
||||
if err != nil {
|
||||
|
@ -46,7 +49,7 @@ func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request
|
|||
},
|
||||
TagSet: tagSet,
|
||||
}
|
||||
nodeVersion, err := h.obj.PutObjectTagging(r.Context(), tagPrm)
|
||||
nodeVersion, err := h.obj.PutObjectTagging(ctx, tagPrm)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not put object tagging", reqInfo, err)
|
||||
return
|
||||
|
@ -63,15 +66,15 @@ func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *handler) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -102,13 +105,14 @@ func (h *handler) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request
|
|||
if settings.VersioningEnabled() {
|
||||
w.Header().Set(api.AmzVersionID, versionID)
|
||||
}
|
||||
if err = api.EncodeToResponse(w, encodeTagging(tagSet)); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, encodeTagging(tagSet)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -122,7 +126,7 @@ func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Requ
|
|||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||
}
|
||||
|
||||
nodeVersion, err := h.obj.DeleteObjectTagging(r.Context(), p)
|
||||
nodeVersion, err := h.obj.DeleteObjectTagging(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not delete object tagging", reqInfo, err)
|
||||
return
|
||||
|
@ -139,15 +143,15 @@ func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Requ
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (h *handler) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
tagSet, err := readTagSet(r.Body)
|
||||
if err != nil {
|
||||
|
@ -168,7 +172,7 @@ func (h *handler) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request
|
|||
}
|
||||
|
||||
func (h *handler) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -182,14 +186,14 @@ func (h *handler) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request
|
|||
return
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, encodeTagging(tagSet)); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, encodeTagging(tagSet)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
|
|
@ -3,58 +3,58 @@ package handler
|
|||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
)
|
||||
|
||||
func (h *handler) SelectObjectContentHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketAccelerateHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketRequestPaymentHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketLoggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketReplicationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) ListObjectsV2MHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
|
|
@ -2,21 +2,33 @@ package handler
|
|||
|
||||
import (
|
||||
"context"
|
||||
errorsStd "errors"
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
frosterrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (h *handler) logAndSendError(w http.ResponseWriter, logText string, reqInfo *api.ReqInfo, err error, additional ...zap.Field) {
|
||||
code := api.WriteErrorResponse(w, reqInfo, transformToS3Error(err))
|
||||
func (h *handler) reqLogger(ctx context.Context) *zap.Logger {
|
||||
reqLogger := middleware.GetReqLog(ctx)
|
||||
if reqLogger != nil {
|
||||
return reqLogger
|
||||
}
|
||||
return h.log
|
||||
}
|
||||
|
||||
func (h *handler) logAndSendError(w http.ResponseWriter, logText string, reqInfo *middleware.ReqInfo, err error, additional ...zap.Field) {
|
||||
code := middleware.WriteErrorResponse(w, reqInfo, transformToS3Error(err))
|
||||
fields := []zap.Field{
|
||||
zap.Int("status", code),
|
||||
zap.String("request_id", reqInfo.RequestID),
|
||||
|
@ -26,20 +38,44 @@ func (h *handler) logAndSendError(w http.ResponseWriter, logText string, reqInfo
|
|||
zap.String("description", logText),
|
||||
zap.Error(err)}
|
||||
fields = append(fields, additional...)
|
||||
h.log.Error("call method", fields...)
|
||||
if traceID, err := trace.TraceIDFromHex(reqInfo.TraceID); err == nil && traceID.IsValid() {
|
||||
fields = append(fields, zap.String("trace_id", reqInfo.TraceID))
|
||||
}
|
||||
h.log.Error(logs.RequestFailed, fields...) // consider using h.reqLogger (it requires accept context.Context or http.Request)
|
||||
}
|
||||
|
||||
func (h *handler) logAndSendErrorNoHeader(w http.ResponseWriter, logText string, reqInfo *middleware.ReqInfo, err error, additional ...zap.Field) {
|
||||
middleware.WriteErrorResponseNoHeader(w, reqInfo, transformToS3Error(err))
|
||||
fields := []zap.Field{
|
||||
zap.String("request_id", reqInfo.RequestID),
|
||||
zap.String("method", reqInfo.API),
|
||||
zap.String("bucket", reqInfo.BucketName),
|
||||
zap.String("object", reqInfo.ObjectName),
|
||||
zap.String("description", logText),
|
||||
zap.Error(err)}
|
||||
fields = append(fields, additional...)
|
||||
if traceID, err := trace.TraceIDFromHex(reqInfo.TraceID); err == nil && traceID.IsValid() {
|
||||
fields = append(fields, zap.String("trace_id", reqInfo.TraceID))
|
||||
}
|
||||
h.log.Error(logs.RequestFailed, fields...) // consider using h.reqLogger (it requires accept context.Context or http.Request)
|
||||
}
|
||||
|
||||
func transformToS3Error(err error) error {
|
||||
if _, ok := err.(errors.Error); ok {
|
||||
err = frosterrors.UnwrapErr(err) // this wouldn't work with errors.Join
|
||||
if _, ok := err.(s3errors.Error); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
if errorsStd.Is(err, layer.ErrAccessDenied) ||
|
||||
errorsStd.Is(err, layer.ErrNodeAccessDenied) {
|
||||
return errors.GetAPIError(errors.ErrAccessDenied)
|
||||
if errors.Is(err, layer.ErrAccessDenied) ||
|
||||
errors.Is(err, layer.ErrNodeAccessDenied) {
|
||||
return s3errors.GetAPIError(s3errors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
return errors.GetAPIError(errors.ErrInternalError)
|
||||
if errors.Is(err, layer.ErrGatewayTimeout) {
|
||||
return s3errors.GetAPIError(s3errors.ErrGatewayTimeout)
|
||||
}
|
||||
|
||||
return s3errors.GetAPIError(s3errors.ErrInternalError)
|
||||
}
|
||||
|
||||
func (h *handler) ResolveBucket(ctx context.Context, bucket string) (*data.BucketInfo, error) {
|
||||
|
@ -74,26 +110,26 @@ func parseRange(s string) (*layer.RangeParams, error) {
|
|||
prefix := "bytes="
|
||||
|
||||
if !strings.HasPrefix(s, prefix) {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
}
|
||||
|
||||
s = strings.TrimPrefix(s, prefix)
|
||||
|
||||
valuesStr := strings.Split(s, "-")
|
||||
if len(valuesStr) != 2 {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
}
|
||||
|
||||
values := make([]uint64, 0, len(valuesStr))
|
||||
for _, v := range valuesStr {
|
||||
num, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
}
|
||||
values = append(values, num)
|
||||
}
|
||||
if values[0] > values[1] {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
}
|
||||
|
||||
return &layer.RangeParams{
|
||||
|
@ -103,13 +139,13 @@ func parseRange(s string) (*layer.RangeParams, error) {
|
|||
}
|
||||
|
||||
func getSessionTokenSetEACL(ctx context.Context) (*session.Container, error) {
|
||||
boxData, err := layer.GetBoxData(ctx)
|
||||
boxData, err := middleware.GetBoxData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sessionToken := boxData.Gate.SessionTokenForSetEACL()
|
||||
if sessionToken == nil {
|
||||
return nil, errors.GetAPIError(errors.ErrAccessDenied)
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
return sessionToken, nil
|
||||
|
|
64
api/handler/util_test.go
Normal file
64
api/handler/util_test.go
Normal file
|
@ -0,0 +1,64 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTransformS3Errors(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
err error
|
||||
expected s3errors.ErrorCode
|
||||
}{
|
||||
{
|
||||
name: "simple std error to internal error",
|
||||
err: errors.New("some error"),
|
||||
expected: s3errors.ErrInternalError,
|
||||
},
|
||||
{
|
||||
name: "layer access denied error to s3 access denied error",
|
||||
err: layer.ErrAccessDenied,
|
||||
expected: s3errors.ErrAccessDenied,
|
||||
},
|
||||
{
|
||||
name: "wrapped layer access denied error to s3 access denied error",
|
||||
err: fmt.Errorf("wrap: %w", layer.ErrAccessDenied),
|
||||
expected: s3errors.ErrAccessDenied,
|
||||
},
|
||||
{
|
||||
name: "layer node access denied error to s3 access denied error",
|
||||
err: layer.ErrNodeAccessDenied,
|
||||
expected: s3errors.ErrAccessDenied,
|
||||
},
|
||||
{
|
||||
name: "layer gateway timeout error to s3 gateway timeout error",
|
||||
err: layer.ErrGatewayTimeout,
|
||||
expected: s3errors.ErrGatewayTimeout,
|
||||
},
|
||||
{
|
||||
name: "s3 error to s3 error",
|
||||
err: s3errors.GetAPIError(s3errors.ErrInvalidPart),
|
||||
expected: s3errors.ErrInvalidPart,
|
||||
},
|
||||
{
|
||||
name: "wrapped s3 error to s3 error",
|
||||
err: fmt.Errorf("wrap: %w", s3errors.GetAPIError(s3errors.ErrInvalidPart)),
|
||||
expected: s3errors.ErrInvalidPart,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := transformToS3Error(tc.err)
|
||||
s3err, ok := err.(s3errors.Error)
|
||||
require.True(t, ok, "error must be s3 error")
|
||||
require.Equalf(t, tc.expected, s3err.ErrCode,
|
||||
"expected: '%s', got: '%s'",
|
||||
s3errors.GetAPIError(tc.expected).Code, s3errors.GetAPIError(s3err.ErrCode).Code)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -4,14 +4,14 @@ import (
|
|||
"encoding/xml"
|
||||
"net/http"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
)
|
||||
|
||||
func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
configuration := new(VersioningConfiguration)
|
||||
if err := xml.NewDecoder(r.Body).Decode(configuration); err != nil {
|
||||
|
@ -57,7 +57,7 @@ func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
// GetBucketVersioningHandler implements bucket versioning getter handler.
|
||||
func (h *handler) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -71,7 +71,7 @@ func (h *handler) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, formVersioningConfiguration(settings)); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, formVersioningConfiguration(settings)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
package api
|
||||
|
||||
import "net/http"
|
||||
|
||||
// Standard S3 HTTP request/response constants.
|
||||
const (
|
||||
MetadataPrefix = "X-Amz-Meta-"
|
||||
|
@ -39,11 +41,13 @@ const (
|
|||
IfMatch = "If-Match"
|
||||
IfNoneMatch = "If-None-Match"
|
||||
|
||||
AmzContentSha256 = "X-Amz-Content-Sha256"
|
||||
AmzCopyIfModifiedSince = "X-Amz-Copy-Source-If-Modified-Since"
|
||||
AmzCopyIfUnmodifiedSince = "X-Amz-Copy-Source-If-Unmodified-Since"
|
||||
AmzCopyIfMatch = "X-Amz-Copy-Source-If-Match"
|
||||
AmzCopyIfNoneMatch = "X-Amz-Copy-Source-If-None-Match"
|
||||
AmzACL = "X-Amz-Acl"
|
||||
AmzDecodedContentLength = "X-Amz-Decoded-Content-Length"
|
||||
AmzGrantFullControl = "X-Amz-Grant-Full-Control"
|
||||
AmzGrantRead = "X-Amz-Grant-Read"
|
||||
AmzGrantWrite = "X-Amz-Grant-Write"
|
||||
|
@ -62,6 +66,7 @@ const (
|
|||
AmzServerSideEncryptionCustomerKey = "x-amz-server-side-encryption-customer-key"
|
||||
AmzServerSideEncryptionCustomerKeyMD5 = "x-amz-server-side-encryption-customer-key-MD5"
|
||||
|
||||
OwnerID = "X-Owner-Id"
|
||||
ContainerID = "X-Container-Id"
|
||||
ContainerName = "X-Container-Name"
|
||||
ContainerZone = "X-Container-Zone"
|
||||
|
@ -77,9 +82,13 @@ const (
|
|||
AccessControlRequestMethod = "Access-Control-Request-Method"
|
||||
AccessControlRequestHeaders = "Access-Control-Request-Headers"
|
||||
|
||||
AwsChunked = "aws-chunked"
|
||||
|
||||
Vary = "Vary"
|
||||
|
||||
DefaultLocationConstraint = "default"
|
||||
|
||||
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
)
|
||||
|
||||
// S3 request query params.
|
||||
|
@ -106,3 +115,8 @@ var SystemMetadata = map[string]struct{}{
|
|||
LastModified: {},
|
||||
ETag: {},
|
||||
}
|
||||
|
||||
func IsSignedStreamingV4(r *http.Request) bool {
|
||||
return r.Header.Get(AmzContentSha256) == StreamingContentSHA256 &&
|
||||
r.Method == http.MethodPut
|
||||
}
|
||||
|
|
71
api/host_bucket_router.go
Normal file
71
api/host_bucket_router.go
Normal file
|
@ -0,0 +1,71 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
type HostBucketRouter struct {
|
||||
routes map[string]chi.Router
|
||||
bktParam string
|
||||
defaultRouter chi.Router
|
||||
}
|
||||
|
||||
func NewHostBucketRouter(bktParam string) HostBucketRouter {
|
||||
return HostBucketRouter{
|
||||
routes: make(map[string]chi.Router),
|
||||
bktParam: bktParam,
|
||||
}
|
||||
}
|
||||
|
||||
func (hr *HostBucketRouter) Default(router chi.Router) {
|
||||
hr.defaultRouter = router
|
||||
}
|
||||
|
||||
func (hr HostBucketRouter) Map(host string, h chi.Router) {
|
||||
hr.routes[strings.ToLower(host)] = h
|
||||
}
|
||||
|
||||
func (hr HostBucketRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
bucket, domain := getBucketDomain(getHost(r))
|
||||
router, ok := hr.routes[strings.ToLower(domain)]
|
||||
if !ok {
|
||||
router = hr.defaultRouter
|
||||
if router == nil {
|
||||
http.Error(w, http.StatusText(404), 404)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if rctx := chi.RouteContext(r.Context()); rctx != nil && bucket != "" {
|
||||
rctx.URLParams.Add(hr.bktParam, bucket)
|
||||
}
|
||||
|
||||
router.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func getBucketDomain(host string) (bucket string, domain string) {
|
||||
parts := strings.Split(host, ".")
|
||||
if len(parts) > 1 {
|
||||
return parts[0], strings.Join(parts[1:], ".")
|
||||
}
|
||||
return "", host
|
||||
}
|
||||
|
||||
// getHost tries its best to return the request host.
|
||||
// According to section 14.23 of RFC 2616 the Host header
|
||||
// can include the port number if the default value of 80 is not used.
|
||||
func getHost(r *http.Request) string {
|
||||
host := r.Host
|
||||
if r.URL.IsAbs() {
|
||||
host = r.URL.Host
|
||||
}
|
||||
|
||||
if i := strings.Index(host, ":"); i != -1 {
|
||||
host = host[:i]
|
||||
}
|
||||
|
||||
return host
|
||||
}
|
|
@ -1,11 +1,12 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -61,7 +62,7 @@ func (c *Cache) GetBucket(name string) *data.BucketInfo {
|
|||
|
||||
func (c *Cache) PutBucket(bktInfo *data.BucketInfo) {
|
||||
if err := c.bucketCache.Put(bktInfo); err != nil {
|
||||
c.logger.Warn("couldn't put bucket info into cache",
|
||||
c.logger.Warn(logs.CouldntPutBucketInfoIntoCache,
|
||||
zap.String("bucket name", bktInfo.Name),
|
||||
zap.Stringer("bucket cid", bktInfo.CID),
|
||||
zap.Error(err))
|
||||
|
@ -104,13 +105,13 @@ func (c *Cache) GetLastObject(owner user.ID, bktName, objName string) *data.Exte
|
|||
|
||||
func (c *Cache) PutObject(owner user.ID, extObjInfo *data.ExtendedObjectInfo) {
|
||||
if err := c.objCache.PutObject(extObjInfo); err != nil {
|
||||
c.logger.Warn("couldn't add object to cache", zap.Error(err),
|
||||
c.logger.Warn(logs.CouldntAddObjectToCache, zap.Error(err),
|
||||
zap.String("object_name", extObjInfo.ObjectInfo.Name), zap.String("bucket_name", extObjInfo.ObjectInfo.Bucket),
|
||||
zap.String("cid", extObjInfo.ObjectInfo.CID.EncodeToString()), zap.String("oid", extObjInfo.ObjectInfo.ID.EncodeToString()))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, extObjInfo.ObjectInfo.Address().EncodeToString()); err != nil {
|
||||
c.logger.Warn("couldn't cache access control operation", zap.Error(err))
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -118,7 +119,7 @@ func (c *Cache) PutObjectWithName(owner user.ID, extObjInfo *data.ExtendedObject
|
|||
c.PutObject(owner, extObjInfo)
|
||||
|
||||
if err := c.namesCache.Put(extObjInfo.ObjectInfo.NiceName(), extObjInfo.ObjectInfo.Address()); err != nil {
|
||||
c.logger.Warn("couldn't put obj address to name cache",
|
||||
c.logger.Warn(logs.CouldntPutObjAddressToNameCache,
|
||||
zap.String("obj nice name", extObjInfo.ObjectInfo.NiceName()),
|
||||
zap.Error(err))
|
||||
}
|
||||
|
@ -134,11 +135,11 @@ func (c *Cache) GetList(owner user.ID, key cache.ObjectsListKey) []*data.NodeVer
|
|||
|
||||
func (c *Cache) PutList(owner user.ID, key cache.ObjectsListKey, list []*data.NodeVersion) {
|
||||
if err := c.listsCache.PutVersions(key, list); err != nil {
|
||||
c.logger.Warn("couldn't cache list of objects", zap.Error(err))
|
||||
c.logger.Warn(logs.CouldntCacheListOfObjects, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key.String()); err != nil {
|
||||
c.logger.Warn("couldn't cache access control operation", zap.Error(err))
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -152,11 +153,11 @@ func (c *Cache) GetTagging(owner user.ID, key string) map[string]string {
|
|||
|
||||
func (c *Cache) PutTagging(owner user.ID, key string, tags map[string]string) {
|
||||
if err := c.systemCache.PutTagging(key, tags); err != nil {
|
||||
c.logger.Error("couldn't cache tags", zap.Error(err))
|
||||
c.logger.Error(logs.CouldntCacheTags, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key); err != nil {
|
||||
c.logger.Warn("couldn't cache access control operation", zap.Error(err))
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -174,11 +175,11 @@ func (c *Cache) GetLockInfo(owner user.ID, key string) *data.LockInfo {
|
|||
|
||||
func (c *Cache) PutLockInfo(owner user.ID, key string, lockInfo *data.LockInfo) {
|
||||
if err := c.systemCache.PutLockInfo(key, lockInfo); err != nil {
|
||||
c.logger.Error("couldn't cache lock info", zap.Error(err))
|
||||
c.logger.Error(logs.CouldntCacheLockInfo, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key); err != nil {
|
||||
c.logger.Warn("couldn't cache access control operation", zap.Error(err))
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -195,11 +196,11 @@ func (c *Cache) GetSettings(owner user.ID, bktInfo *data.BucketInfo) *data.Bucke
|
|||
func (c *Cache) PutSettings(owner user.ID, bktInfo *data.BucketInfo, settings *data.BucketSettings) {
|
||||
key := bktInfo.Name + bktInfo.SettingsObjectName()
|
||||
if err := c.systemCache.PutSettings(key, settings); err != nil {
|
||||
c.logger.Warn("couldn't cache bucket settings", zap.String("bucket", bktInfo.Name), zap.Error(err))
|
||||
c.logger.Warn(logs.CouldntCacheBucketSettings, zap.String("bucket", bktInfo.Name), zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key); err != nil {
|
||||
c.logger.Warn("couldn't cache access control operation", zap.Error(err))
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -217,11 +218,11 @@ func (c *Cache) PutCORS(owner user.ID, bkt *data.BucketInfo, cors *data.CORSConf
|
|||
key := bkt.Name + bkt.CORSObjectName()
|
||||
|
||||
if err := c.systemCache.PutCORS(key, cors); err != nil {
|
||||
c.logger.Warn("couldn't cache cors", zap.String("bucket", bkt.Name), zap.Error(err))
|
||||
c.logger.Warn(logs.CouldntCacheCors, zap.String("bucket", bkt.Name), zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key); err != nil {
|
||||
c.logger.Warn("couldn't cache access control operation", zap.Error(err))
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -242,10 +243,10 @@ func (c *Cache) GetNotificationConfiguration(owner user.ID, bktInfo *data.Bucket
|
|||
func (c *Cache) PutNotificationConfiguration(owner user.ID, bktInfo *data.BucketInfo, configuration *data.NotificationConfiguration) {
|
||||
key := bktInfo.Name + bktInfo.NotificationConfigurationObjectName()
|
||||
if err := c.systemCache.PutNotificationConfiguration(key, configuration); err != nil {
|
||||
c.logger.Warn("couldn't cache notification configuration", zap.String("bucket", bktInfo.Name), zap.Error(err))
|
||||
c.logger.Warn(logs.CouldntCacheNotificationConfiguration, zap.String("bucket", bktInfo.Name), zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key); err != nil {
|
||||
c.logger.Warn("couldn't cache access control operation", zap.Error(err))
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,15 +2,16 @@ package layer
|
|||
|
||||
import (
|
||||
"context"
|
||||
errorsStd "errors"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
)
|
||||
|
||||
func (n *layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *ObjectVersion, nodeVersion *data.NodeVersion) (map[string]string, *data.LockInfo, error) {
|
||||
var err error
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
|
||||
tags := n.cache.GetTagging(owner, objectTaggingCacheKey(objVersion))
|
||||
lockInfo := n.cache.GetLockInfo(owner, lockObjectKey(objVersion))
|
||||
|
@ -28,8 +29,8 @@ func (n *layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *ObjectV
|
|||
|
||||
tags, lockInfo, err = n.treeService.GetObjectTaggingAndLock(ctx, objVersion.BktInfo, nodeVersion)
|
||||
if err != nil {
|
||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
||||
return nil, nil, errors.GetAPIError(errors.ErrNoSuchKey)
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
|
@ -5,15 +5,16 @@ import (
|
|||
"fmt"
|
||||
"strconv"
|
||||
|
||||
v2container "github.com/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/session"
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -34,8 +35,7 @@ func (n *layer) containerInfo(ctx context.Context, idCnr cid.ID) (*data.BucketIn
|
|||
var (
|
||||
err error
|
||||
res *container.Container
|
||||
rid = api.GetRequestID(ctx)
|
||||
log = n.log.With(zap.Stringer("cid", idCnr), zap.String("request_id", rid))
|
||||
log = n.reqLogger(ctx).With(zap.Stringer("cid", idCnr))
|
||||
|
||||
info = &data.BucketInfo{
|
||||
CID: idCnr,
|
||||
|
@ -44,10 +44,8 @@ func (n *layer) containerInfo(ctx context.Context, idCnr cid.ID) (*data.BucketIn
|
|||
)
|
||||
res, err = n.frostFS.Container(ctx, idCnr)
|
||||
if err != nil {
|
||||
log.Error("could not fetch container", zap.Error(err))
|
||||
|
||||
if client.IsErrContainerNotFound(err) {
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchBucket)
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchBucket), err.Error())
|
||||
}
|
||||
return nil, fmt.Errorf("get frostfs container: %w", err)
|
||||
}
|
||||
|
@ -66,7 +64,7 @@ func (n *layer) containerInfo(ctx context.Context, idCnr cid.ID) (*data.BucketIn
|
|||
if len(attrLockEnabled) > 0 {
|
||||
info.ObjectLockEnabled, err = strconv.ParseBool(attrLockEnabled)
|
||||
if err != nil {
|
||||
log.Error("could not parse container object lock enabled attribute",
|
||||
log.Error(logs.CouldNotParseContainerObjectLockEnabledAttribute,
|
||||
zap.String("lock_enabled", attrLockEnabled),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
@ -79,17 +77,9 @@ func (n *layer) containerInfo(ctx context.Context, idCnr cid.ID) (*data.BucketIn
|
|||
}
|
||||
|
||||
func (n *layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
|
||||
var (
|
||||
err error
|
||||
own = n.Owner(ctx)
|
||||
res []cid.ID
|
||||
rid = api.GetRequestID(ctx)
|
||||
)
|
||||
res, err = n.frostFS.UserContainers(ctx, own)
|
||||
res, err := n.frostFS.UserContainers(ctx, n.BearerOwner(ctx))
|
||||
if err != nil {
|
||||
n.log.Error("could not list user containers",
|
||||
zap.String("request_id", rid),
|
||||
zap.Error(err))
|
||||
n.reqLogger(ctx).Error(logs.CouldNotListUserContainers, zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -97,9 +87,7 @@ func (n *layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
|
|||
for i := range res {
|
||||
info, err := n.containerInfo(ctx, res[i])
|
||||
if err != nil {
|
||||
n.log.Error("could not fetch container info",
|
||||
zap.String("request_id", rid),
|
||||
zap.Error(err))
|
||||
n.reqLogger(ctx).Error(logs.CouldNotFetchContainerInfo, zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -110,14 +98,13 @@ func (n *layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
|
|||
}
|
||||
|
||||
func (n *layer) createContainer(ctx context.Context, p *CreateBucketParams) (*data.BucketInfo, error) {
|
||||
ownerID := n.Owner(ctx)
|
||||
if p.LocationConstraint == "" {
|
||||
p.LocationConstraint = api.DefaultLocationConstraint // s3tests_boto3.functional.test_s3:test_bucket_get_location
|
||||
}
|
||||
bktInfo := &data.BucketInfo{
|
||||
Name: p.Name,
|
||||
Zone: v2container.SysAttributeZoneDefault,
|
||||
Owner: ownerID,
|
||||
Owner: n.BearerOwner(ctx),
|
||||
Created: TimeNow(ctx),
|
||||
LocationConstraint: p.LocationConstraint,
|
||||
ObjectLockEnabled: p.ObjectLockEnabled,
|
||||
|
|
|
@ -8,8 +8,9 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -38,14 +39,13 @@ func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
|||
|
||||
prm := PrmObjectCreate{
|
||||
Container: p.BktInfo.CID,
|
||||
Creator: p.BktInfo.Owner,
|
||||
Payload: &buf,
|
||||
Filepath: p.BktInfo.CORSObjectName(),
|
||||
CreationTime: TimeNow(ctx),
|
||||
CopiesNumber: p.CopiesNumber,
|
||||
CopiesNumber: p.CopiesNumbers,
|
||||
}
|
||||
|
||||
objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
_, objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("put system object: %w", err)
|
||||
}
|
||||
|
@ -58,14 +58,13 @@ func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
|||
|
||||
if !objIDToDeleteNotFound {
|
||||
if err = n.objectDelete(ctx, p.BktInfo, objIDToDelete); err != nil {
|
||||
n.log.Error("couldn't delete cors object", zap.Error(err),
|
||||
n.reqLogger(ctx).Error(logs.CouldntDeleteCorsObject, zap.Error(err),
|
||||
zap.String("cnrID", p.BktInfo.CID.EncodeToString()),
|
||||
zap.String("bucket name", p.BktInfo.Name),
|
||||
zap.String("objID", objIDToDelete.EncodeToString()))
|
||||
}
|
||||
}
|
||||
|
||||
n.cache.PutCORS(n.Owner(ctx), p.BktInfo, cors)
|
||||
n.cache.PutCORS(n.BearerOwner(ctx), p.BktInfo, cors)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -74,7 +73,7 @@ func (n *layer) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (*d
|
|||
cors, err := n.getCORS(ctx, bktInfo)
|
||||
if err != nil {
|
||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchCORSConfiguration)
|
||||
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchCORSConfiguration), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -7,16 +7,16 @@ import (
|
|||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/container"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
)
|
||||
|
||||
// PrmContainerCreate groups parameters of FrostFS.CreateContainer operation.
|
||||
|
@ -91,9 +91,6 @@ type PrmObjectCreate struct {
|
|||
// Container to store the object.
|
||||
Container cid.ID
|
||||
|
||||
// FrostFS identifier of the object creator.
|
||||
Creator user.ID
|
||||
|
||||
// Key-value object attributes.
|
||||
Attributes [][2]string
|
||||
|
||||
|
@ -113,7 +110,10 @@ type PrmObjectCreate struct {
|
|||
Payload io.Reader
|
||||
|
||||
// Number of object copies that is enough to consider put successful.
|
||||
CopiesNumber uint32
|
||||
CopiesNumber []uint32
|
||||
|
||||
// Enables client side object preparing.
|
||||
ClientCut bool
|
||||
}
|
||||
|
||||
// PrmObjectDelete groups parameters of FrostFS.DeleteObject operation.
|
||||
|
@ -128,8 +128,29 @@ type PrmObjectDelete struct {
|
|||
Object oid.ID
|
||||
}
|
||||
|
||||
// PrmObjectSearch groups parameters of FrostFS.sear SearchObjects operation.
|
||||
type PrmObjectSearch struct {
|
||||
// Authentication parameters.
|
||||
PrmAuth
|
||||
|
||||
// Container to select the objects from.
|
||||
Container cid.ID
|
||||
|
||||
// Key-value object attribute which should be
|
||||
// presented in selected objects. Optional, empty key means any.
|
||||
ExactAttribute [2]string
|
||||
|
||||
// File prefix of the selected objects. Optional, empty value means any.
|
||||
FilePrefix string
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrAccessDenied is returned from FrostFS in case of access violation.
|
||||
var ErrAccessDenied = errors.New("access denied")
|
||||
ErrAccessDenied = errors.New("access denied")
|
||||
|
||||
// ErrGatewayTimeout is returned from FrostFS in case of timeout, deadline exceeded etc.
|
||||
ErrGatewayTimeout = errors.New("gateway timeout")
|
||||
)
|
||||
|
||||
// FrostFS represents virtual connection to FrostFS network.
|
||||
type FrostFS interface {
|
||||
|
@ -210,6 +231,15 @@ type FrostFS interface {
|
|||
// It returns any error encountered which prevented the removal request from being sent.
|
||||
DeleteObject(context.Context, PrmObjectDelete) error
|
||||
|
||||
// SearchObjects performs object search from the NeoFS container according
|
||||
// to the specified parameters. It searches user's objects only.
|
||||
//
|
||||
// It returns ErrAccessDenied on selection access violation.
|
||||
//
|
||||
// It returns exactly one non-nil value. It returns any error encountered which
|
||||
// prevented the objects from being selected.
|
||||
SearchObjects(context.Context, PrmObjectSearch) ([]oid.ID, error)
|
||||
|
||||
// TimeToEpoch computes current epoch and the epoch that corresponds to the provided now and future time.
|
||||
// Note:
|
||||
// * future time must be after the now
|
||||
|
|
|
@ -10,34 +10,53 @@ import (
|
|||
"io"
|
||||
"time"
|
||||
|
||||
objectv2 "github.com/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/user"
|
||||
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
)
|
||||
|
||||
type FeatureSettingsMock struct {
|
||||
clientCut bool
|
||||
}
|
||||
|
||||
func (k *FeatureSettingsMock) ClientCut() bool {
|
||||
return k.clientCut
|
||||
}
|
||||
|
||||
func (k *FeatureSettingsMock) SetClientCut(clientCut bool) {
|
||||
k.clientCut = clientCut
|
||||
}
|
||||
|
||||
type TestFrostFS struct {
|
||||
FrostFS
|
||||
|
||||
objects map[string]*object.Object
|
||||
objectErrors map[string]error
|
||||
objectPutErrors map[string]error
|
||||
containers map[string]*container.Container
|
||||
eaclTables map[string]*eacl.Table
|
||||
currentEpoch uint64
|
||||
key *keys.PrivateKey
|
||||
}
|
||||
|
||||
func NewTestFrostFS() *TestFrostFS {
|
||||
func NewTestFrostFS(key *keys.PrivateKey) *TestFrostFS {
|
||||
return &TestFrostFS{
|
||||
objects: make(map[string]*object.Object),
|
||||
objectErrors: make(map[string]error),
|
||||
objectPutErrors: make(map[string]error),
|
||||
containers: make(map[string]*container.Container),
|
||||
eaclTables: make(map[string]*eacl.Table),
|
||||
key: key,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -45,6 +64,22 @@ func (t *TestFrostFS) CurrentEpoch() uint64 {
|
|||
return t.currentEpoch
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) SetObjectError(addr oid.Address, err error) {
|
||||
if err == nil {
|
||||
delete(t.objectErrors, addr.EncodeToString())
|
||||
} else {
|
||||
t.objectErrors[addr.EncodeToString()] = err
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) SetObjectPutError(fileName string, err error) {
|
||||
if err == nil {
|
||||
delete(t.objectPutErrors, fileName)
|
||||
} else {
|
||||
t.objectPutErrors[fileName] = err
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) Objects() []*object.Object {
|
||||
res := make([]*object.Object, 0, len(t.objects))
|
||||
|
||||
|
@ -55,6 +90,16 @@ func (t *TestFrostFS) Objects() []*object.Object {
|
|||
return res
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) ObjectExists(objID oid.ID) bool {
|
||||
for _, obj := range t.objects {
|
||||
if id, _ := obj.ID(); id.Equals(objID) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) AddObject(key string, obj *object.Object) {
|
||||
t.objects[key] = obj
|
||||
}
|
||||
|
@ -142,9 +187,13 @@ func (t *TestFrostFS) ReadObject(ctx context.Context, prm PrmObjectRead) (*Objec
|
|||
|
||||
sAddr := addr.EncodeToString()
|
||||
|
||||
if err := t.objectErrors[sAddr]; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if obj, ok := t.objects[sAddr]; ok {
|
||||
owner := getOwner(ctx)
|
||||
if !obj.OwnerID().Equals(owner) && !t.isPublicRead(prm.Container) {
|
||||
owner := getBearerOwner(ctx)
|
||||
if !t.checkAccess(prm.Container, owner, eacl.OperationGet) {
|
||||
return nil, ErrAccessDenied
|
||||
}
|
||||
|
||||
|
@ -161,10 +210,10 @@ func (t *TestFrostFS) ReadObject(ctx context.Context, prm PrmObjectRead) (*Objec
|
|||
}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("object not found %s", addr)
|
||||
return nil, fmt.Errorf("%w: %s", &apistatus.ObjectNotFound{}, addr)
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) CreateObject(ctx context.Context, prm PrmObjectCreate) (oid.ID, error) {
|
||||
func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.ID, error) {
|
||||
b := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, b); err != nil {
|
||||
return oid.ID{}, err
|
||||
|
@ -174,6 +223,10 @@ func (t *TestFrostFS) CreateObject(ctx context.Context, prm PrmObjectCreate) (oi
|
|||
|
||||
attrs := make([]object.Attribute, 0)
|
||||
|
||||
if err := t.objectPutErrors[prm.Filepath]; err != nil {
|
||||
return oid.ID{}, err
|
||||
}
|
||||
|
||||
if prm.Filepath != "" {
|
||||
a := object.NewAttribute()
|
||||
a.SetKey(object.AttributeFilePath)
|
||||
|
@ -181,6 +234,13 @@ func (t *TestFrostFS) CreateObject(ctx context.Context, prm PrmObjectCreate) (oi
|
|||
attrs = append(attrs, *a)
|
||||
}
|
||||
|
||||
if prm.ClientCut {
|
||||
a := object.NewAttribute()
|
||||
a.SetKey("s3-client-cut")
|
||||
a.SetValue("true")
|
||||
attrs = append(attrs, *a)
|
||||
}
|
||||
|
||||
for i := range prm.Attributes {
|
||||
a := object.NewAttribute()
|
||||
a.SetKey(prm.Attributes[i][0])
|
||||
|
@ -188,13 +248,16 @@ func (t *TestFrostFS) CreateObject(ctx context.Context, prm PrmObjectCreate) (oi
|
|||
attrs = append(attrs, *a)
|
||||
}
|
||||
|
||||
var owner user.ID
|
||||
user.IDFromKey(&owner, t.key.PrivateKey.PublicKey)
|
||||
|
||||
obj := object.New()
|
||||
obj.SetContainerID(prm.Container)
|
||||
obj.SetID(id)
|
||||
obj.SetPayloadSize(prm.PayloadSize)
|
||||
obj.SetAttributes(attrs...)
|
||||
obj.SetCreationEpoch(t.currentEpoch)
|
||||
obj.SetOwnerID(&prm.Creator)
|
||||
obj.SetOwnerID(&owner)
|
||||
t.currentEpoch++
|
||||
|
||||
if len(prm.Locks) > 0 {
|
||||
|
@ -228,9 +291,13 @@ func (t *TestFrostFS) DeleteObject(ctx context.Context, prm PrmObjectDelete) err
|
|||
addr.SetContainer(prm.Container)
|
||||
addr.SetObject(prm.Object)
|
||||
|
||||
if obj, ok := t.objects[addr.EncodeToString()]; ok {
|
||||
owner := getOwner(ctx)
|
||||
if !obj.OwnerID().Equals(owner) {
|
||||
if err := t.objectErrors[addr.EncodeToString()]; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := t.objects[addr.EncodeToString()]; ok {
|
||||
owner := getBearerOwner(ctx)
|
||||
if !t.checkAccess(prm.Container, owner, eacl.OperationDelete) {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
|
@ -282,27 +349,43 @@ func (t *TestFrostFS) ContainerEACL(_ context.Context, cnrID cid.ID) (*eacl.Tabl
|
|||
return table, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) isPublicRead(cnrID cid.ID) bool {
|
||||
table, ok := t.eaclTables[cnrID.EncodeToString()]
|
||||
func (t *TestFrostFS) checkAccess(cnrID cid.ID, owner user.ID, op eacl.Operation) bool {
|
||||
cnr, ok := t.containers[cnrID.EncodeToString()]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if !cnr.BasicACL().Extendable() {
|
||||
return cnr.Owner().Equals(owner)
|
||||
}
|
||||
|
||||
table, ok := t.eaclTables[cnrID.EncodeToString()]
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, rec := range table.Records() {
|
||||
if rec.Operation() == eacl.OperationGet && len(rec.Filters()) == 0 {
|
||||
if rec.Operation() == op && len(rec.Filters()) == 0 {
|
||||
for _, trgt := range rec.Targets() {
|
||||
if trgt.Role() == eacl.RoleOthers {
|
||||
return rec.Action() == eacl.ActionAllow
|
||||
}
|
||||
var targetOwner user.ID
|
||||
for _, pk := range eacl.TargetECDSAKeys(&trgt) {
|
||||
user.IDFromKey(&targetOwner, *pk)
|
||||
if targetOwner.Equals(owner) {
|
||||
return rec.Action() == eacl.ActionAllow
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getOwner(ctx context.Context) user.ID {
|
||||
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
func getBearerOwner(ctx context.Context) user.ID {
|
||||
if bd, err := middleware.GetBoxData(ctx); err == nil && bd.Gate.BearerToken != nil {
|
||||
return bearer.ResolveIssuer(*bd.Gate.BearerToken)
|
||||
}
|
||||
|
|
@ -11,18 +11,20 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nats-io/nats.go"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"go.uber.org/zap"
|
||||
|
@ -44,22 +46,30 @@ type (
|
|||
Resolve(ctx context.Context, name string) (cid.ID, error)
|
||||
}
|
||||
|
||||
FeatureSettings interface {
|
||||
ClientCut() bool
|
||||
}
|
||||
|
||||
layer struct {
|
||||
frostFS FrostFS
|
||||
gateOwner user.ID
|
||||
log *zap.Logger
|
||||
anonKey AnonymousKey
|
||||
resolver BucketResolver
|
||||
ncontroller EventListener
|
||||
cache *Cache
|
||||
treeService TreeService
|
||||
features FeatureSettings
|
||||
}
|
||||
|
||||
Config struct {
|
||||
GateOwner user.ID
|
||||
ChainAddress string
|
||||
Caches *CachesConfig
|
||||
AnonKey AnonymousKey
|
||||
Resolver BucketResolver
|
||||
TreeService TreeService
|
||||
Features FeatureSettings
|
||||
}
|
||||
|
||||
// AnonymousKey contains data for anonymous requests.
|
||||
|
@ -72,7 +82,7 @@ type (
|
|||
Range *RangeParams
|
||||
ObjectInfo *data.ObjectInfo
|
||||
BucketInfo *data.BucketInfo
|
||||
Writer io.Writer
|
||||
Versioned bool
|
||||
Encryption encryption.Params
|
||||
}
|
||||
|
||||
|
@ -101,12 +111,21 @@ type (
|
|||
PutObjectParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Object string
|
||||
Size int64
|
||||
Size uint64
|
||||
Reader io.Reader
|
||||
Header map[string]string
|
||||
Lock *data.ObjectLock
|
||||
Encryption encryption.Params
|
||||
CopiesNumber uint32
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
PutCombinedObjectParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Object string
|
||||
Size uint64
|
||||
Header map[string]string
|
||||
Lock *data.ObjectLock
|
||||
Encryption encryption.Params
|
||||
}
|
||||
|
||||
DeleteObjectParams struct {
|
||||
|
@ -125,21 +144,22 @@ type (
|
|||
PutCORSParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Reader io.Reader
|
||||
CopiesNumber uint32
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
// CopyObjectParams stores object copy request parameters.
|
||||
CopyObjectParams struct {
|
||||
SrcVersioned bool
|
||||
SrcObject *data.ObjectInfo
|
||||
ScrBktInfo *data.BucketInfo
|
||||
DstBktInfo *data.BucketInfo
|
||||
DstObject string
|
||||
SrcSize int64
|
||||
SrcSize uint64
|
||||
Header map[string]string
|
||||
Range *RangeParams
|
||||
Lock *data.ObjectLock
|
||||
Encryption encryption.Params
|
||||
CopiesNuber uint32
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
// CreateBucketParams stores bucket create request parameters.
|
||||
CreateBucketParams struct {
|
||||
|
@ -183,6 +203,13 @@ type (
|
|||
Error error
|
||||
}
|
||||
|
||||
ObjectPayload struct {
|
||||
r io.Reader
|
||||
params getParams
|
||||
encrypted bool
|
||||
decryptedLen uint64
|
||||
}
|
||||
|
||||
// Client provides S3 API client interface.
|
||||
Client interface {
|
||||
Initialize(ctx context.Context, c EventListener) error
|
||||
|
@ -202,7 +229,7 @@ type (
|
|||
CreateBucket(ctx context.Context, p *CreateBucketParams) (*data.BucketInfo, error)
|
||||
DeleteBucket(ctx context.Context, p *DeleteBucketParams) error
|
||||
|
||||
GetObject(ctx context.Context, p *GetObjectParams) error
|
||||
GetObject(ctx context.Context, p *GetObjectParams) (*ObjectPayload, error)
|
||||
GetObjectInfo(ctx context.Context, p *HeadObjectParams) (*data.ObjectInfo, error)
|
||||
GetExtendedObjectInfo(ctx context.Context, p *HeadObjectParams) (*data.ExtendedObjectInfo, error)
|
||||
|
||||
|
@ -266,16 +293,22 @@ func (f MsgHandlerFunc) HandleMessage(ctx context.Context, msg *nats.Msg) error
|
|||
return f(ctx, msg)
|
||||
}
|
||||
|
||||
func (p HeadObjectParams) Versioned() bool {
|
||||
return len(p.VersionID) > 0
|
||||
}
|
||||
|
||||
// NewLayer creates an instance of a layer. It checks credentials
|
||||
// and establishes gRPC connection with the node.
|
||||
func NewLayer(log *zap.Logger, frostFS FrostFS, config *Config) Client {
|
||||
return &layer{
|
||||
frostFS: frostFS,
|
||||
log: log,
|
||||
gateOwner: config.GateOwner,
|
||||
anonKey: config.AnonKey,
|
||||
resolver: config.Resolver,
|
||||
cache: NewCache(config.Caches),
|
||||
treeService: config.TreeService,
|
||||
features: config.Features,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -302,22 +335,22 @@ func (n *layer) IsNotificationEnabled() bool {
|
|||
|
||||
// IsAuthenticatedRequest checks if access box exists in the current request.
|
||||
func IsAuthenticatedRequest(ctx context.Context) bool {
|
||||
_, ok := ctx.Value(api.BoxData).(*accessbox.Box)
|
||||
return ok
|
||||
_, err := middleware.GetBoxData(ctx)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// TimeNow returns client time from request or time.Now().
|
||||
func TimeNow(ctx context.Context) time.Time {
|
||||
if now, ok := ctx.Value(api.ClientTime).(time.Time); ok {
|
||||
if now, err := middleware.GetClientTime(ctx); err == nil {
|
||||
return now
|
||||
}
|
||||
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// Owner returns owner id from BearerToken (context) or from client owner.
|
||||
func (n *layer) Owner(ctx context.Context) user.ID {
|
||||
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
||||
// BearerOwner returns owner id from BearerToken (context) or from client owner.
|
||||
func (n *layer) BearerOwner(ctx context.Context) user.ID {
|
||||
if bd, err := middleware.GetBoxData(ctx); err == nil && bd.Gate.BearerToken != nil {
|
||||
return bearer.ResolveIssuer(*bd.Gate.BearerToken)
|
||||
}
|
||||
|
||||
|
@ -327,9 +360,17 @@ func (n *layer) Owner(ctx context.Context) user.ID {
|
|||
return ownerID
|
||||
}
|
||||
|
||||
func (n *layer) reqLogger(ctx context.Context) *zap.Logger {
|
||||
reqLogger := middleware.GetReqLog(ctx)
|
||||
if reqLogger != nil {
|
||||
return reqLogger
|
||||
}
|
||||
return n.log
|
||||
}
|
||||
|
||||
func (n *layer) prepareAuthParameters(ctx context.Context, prm *PrmAuth, bktOwner user.ID) {
|
||||
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
||||
if bktOwner.Equals(bearer.ResolveIssuer(*bd.Gate.BearerToken)) {
|
||||
if bd, err := middleware.GetBoxData(ctx); err == nil && bd.Gate.BearerToken != nil {
|
||||
if bd.Gate.BearerToken.Impersonate() || bktOwner.Equals(bearer.ResolveIssuer(*bd.Gate.BearerToken)) {
|
||||
prm.BearerToken = bd.Gate.BearerToken
|
||||
return
|
||||
}
|
||||
|
@ -351,8 +392,10 @@ func (n *layer) GetBucketInfo(ctx context.Context, name string) (*data.BucketInf
|
|||
|
||||
containerID, err := n.ResolveBucket(ctx, name)
|
||||
if err != nil {
|
||||
n.log.Debug("bucket not found", zap.Error(err))
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchBucket)
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucket), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return n.containerInfo(ctx, containerID)
|
||||
|
@ -383,10 +426,10 @@ func (n *layer) ListBuckets(ctx context.Context) ([]*data.BucketInfo, error) {
|
|||
}
|
||||
|
||||
// GetObject from storage.
|
||||
func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
|
||||
func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) (*ObjectPayload, error) {
|
||||
var params getParams
|
||||
|
||||
params.oid = p.ObjectInfo.ID
|
||||
params.objInfo = p.ObjectInfo
|
||||
params.bktInfo = p.BucketInfo
|
||||
|
||||
var decReader *encryption.Decrypter
|
||||
|
@ -394,7 +437,7 @@ func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
|
|||
var err error
|
||||
decReader, err = getDecrypter(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating decrypter: %w", err)
|
||||
return nil, fmt.Errorf("creating decrypter: %w", err)
|
||||
}
|
||||
params.off = decReader.EncryptedOffset()
|
||||
params.ln = decReader.EncryptedLength()
|
||||
|
@ -408,32 +451,58 @@ func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
|
|||
}
|
||||
}
|
||||
|
||||
payload, err := n.initObjectPayloadReader(ctx, params)
|
||||
r, err := n.initObjectPayloadReader(ctx, params)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init object payload reader: %w", err)
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
if p.Versioned {
|
||||
err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchVersion), err.Error())
|
||||
} else {
|
||||
err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("init object payload reader: %w", err)
|
||||
}
|
||||
|
||||
var decryptedLen uint64
|
||||
if decReader != nil {
|
||||
if err = decReader.SetReader(r); err != nil {
|
||||
return nil, fmt.Errorf("set reader to decrypter: %w", err)
|
||||
}
|
||||
r = io.LimitReader(decReader, int64(decReader.DecryptedLength()))
|
||||
decryptedLen = decReader.DecryptedLength()
|
||||
}
|
||||
|
||||
return &ObjectPayload{
|
||||
r: r,
|
||||
params: params,
|
||||
encrypted: decReader != nil,
|
||||
decryptedLen: decryptedLen,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Read implements io.Reader. If you want to use ObjectPayload as io.Reader
|
||||
// you must not use ObjectPayload.StreamTo method and vice versa.
|
||||
func (o *ObjectPayload) Read(p []byte) (int, error) {
|
||||
return o.r.Read(p)
|
||||
}
|
||||
|
||||
// StreamTo reads all payload to provided writer.
|
||||
// If you want to use this method you must not use ObjectPayload.Read and vice versa.
|
||||
func (o *ObjectPayload) StreamTo(w io.Writer) error {
|
||||
bufSize := uint64(32 * 1024) // configure?
|
||||
if params.ln != 0 && params.ln < bufSize {
|
||||
bufSize = params.ln
|
||||
if o.params.ln != 0 && o.params.ln < bufSize {
|
||||
bufSize = o.params.ln
|
||||
}
|
||||
|
||||
// alloc buffer for copying
|
||||
buf := make([]byte, bufSize) // sync-pool it?
|
||||
|
||||
r := payload
|
||||
if decReader != nil {
|
||||
if err = decReader.SetReader(payload); err != nil {
|
||||
return fmt.Errorf("set reader to decrypter: %w", err)
|
||||
}
|
||||
r = io.LimitReader(decReader, int64(decReader.DecryptedLength()))
|
||||
}
|
||||
|
||||
// copy full payload
|
||||
written, err := io.CopyBuffer(p.Writer, r, buf)
|
||||
written, err := io.CopyBuffer(w, o.r, buf)
|
||||
if err != nil {
|
||||
if decReader != nil {
|
||||
return fmt.Errorf("copy object payload written: '%d', decLength: '%d', params.ln: '%d' : %w", written, decReader.DecryptedLength(), params.ln, err)
|
||||
if o.encrypted {
|
||||
return fmt.Errorf("copy object payload written: '%d', decLength: '%d', params.ln: '%d' : %w", written, o.decryptedLen, o.params.ln, err)
|
||||
}
|
||||
return fmt.Errorf("copy object payload written: '%d': %w", written, err)
|
||||
}
|
||||
|
@ -485,21 +554,17 @@ func (n *layer) GetExtendedObjectInfo(ctx context.Context, p *HeadObjectParams)
|
|||
var objInfo *data.ExtendedObjectInfo
|
||||
var err error
|
||||
|
||||
if len(p.VersionID) == 0 {
|
||||
objInfo, err = n.headLastVersionIfNotDeleted(ctx, p.BktInfo, p.Object)
|
||||
} else {
|
||||
if p.Versioned() {
|
||||
objInfo, err = n.headVersion(ctx, p.BktInfo, p)
|
||||
} else {
|
||||
objInfo, err = n.headLastVersionIfNotDeleted(ctx, p.BktInfo, p.Object)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
n.log.Debug("get object",
|
||||
zap.String("reqId", reqInfo.RequestID),
|
||||
zap.String("bucket", p.BktInfo.Name),
|
||||
n.reqLogger(ctx).Debug(logs.GetObject,
|
||||
zap.Stringer("cid", p.BktInfo.CID),
|
||||
zap.String("object", objInfo.ObjectInfo.Name),
|
||||
zap.Stringer("oid", objInfo.ObjectInfo.ID))
|
||||
|
||||
return objInfo, nil
|
||||
|
@ -507,30 +572,25 @@ func (n *layer) GetExtendedObjectInfo(ctx context.Context, p *HeadObjectParams)
|
|||
|
||||
// CopyObject from one bucket into another bucket.
|
||||
func (n *layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*data.ExtendedObjectInfo, error) {
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
go func() {
|
||||
err := n.GetObject(ctx, &GetObjectParams{
|
||||
objPayload, err := n.GetObject(ctx, &GetObjectParams{
|
||||
ObjectInfo: p.SrcObject,
|
||||
Writer: pw,
|
||||
Versioned: p.SrcVersioned,
|
||||
Range: p.Range,
|
||||
BucketInfo: p.ScrBktInfo,
|
||||
Encryption: p.Encryption,
|
||||
})
|
||||
|
||||
if err = pw.CloseWithError(err); err != nil {
|
||||
n.log.Error("could not get object", zap.Error(err))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object to copy: %w", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return n.PutObject(ctx, &PutObjectParams{
|
||||
BktInfo: p.DstBktInfo,
|
||||
Object: p.DstObject,
|
||||
Size: p.SrcSize,
|
||||
Reader: pr,
|
||||
Reader: objPayload,
|
||||
Header: p.Header,
|
||||
Encryption: p.Encryption,
|
||||
CopiesNumber: p.CopiesNuber,
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -549,11 +609,11 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
if len(obj.VersionID) != 0 || settings.Unversioned() {
|
||||
var nodeVersion *data.NodeVersion
|
||||
if nodeVersion, obj.Error = n.getNodeVersionToDelete(ctx, bkt, obj); obj.Error != nil {
|
||||
return dismissNotFoundError(obj)
|
||||
return n.handleNotFoundError(bkt, obj)
|
||||
}
|
||||
|
||||
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nodeVersion, obj); obj.Error != nil {
|
||||
return obj
|
||||
return n.handleObjectDeleteErrors(ctx, bkt, obj, nodeVersion.ID)
|
||||
}
|
||||
|
||||
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeVersion.ID)
|
||||
|
@ -561,21 +621,38 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
return obj
|
||||
}
|
||||
|
||||
var newVersion *data.NodeVersion
|
||||
lastVersion, err := n.getLastNodeVersion(ctx, bkt, obj)
|
||||
if err != nil {
|
||||
obj.Error = err
|
||||
return n.handleNotFoundError(bkt, obj)
|
||||
}
|
||||
|
||||
if settings.VersioningSuspended() {
|
||||
obj.VersionID = data.UnversionedObjectVersionID
|
||||
|
||||
var nodeVersion *data.NodeVersion
|
||||
if nodeVersion, obj.Error = n.getNodeVersionToDelete(ctx, bkt, obj); obj.Error != nil {
|
||||
return dismissNotFoundError(obj)
|
||||
var nullVersionToDelete *data.NodeVersion
|
||||
if lastVersion.IsUnversioned {
|
||||
if !lastVersion.IsDeleteMarker() {
|
||||
nullVersionToDelete = lastVersion
|
||||
}
|
||||
|
||||
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nodeVersion, obj); obj.Error != nil {
|
||||
} else if nullVersionToDelete, obj.Error = n.getNodeVersionToDelete(ctx, bkt, obj); obj.Error != nil {
|
||||
if !isNotFoundError(obj.Error) {
|
||||
return obj
|
||||
}
|
||||
}
|
||||
|
||||
if nullVersionToDelete != nil {
|
||||
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nullVersionToDelete, obj); obj.Error != nil {
|
||||
return n.handleObjectDeleteErrors(ctx, bkt, obj, nullVersionToDelete.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if lastVersion.IsDeleteMarker() {
|
||||
obj.DeleteMarkVersion = lastVersion.OID.EncodeToString()
|
||||
return obj
|
||||
}
|
||||
|
||||
randOID, err := getRandomOID()
|
||||
if err != nil {
|
||||
obj.Error = fmt.Errorf("couldn't get random oid: %w", err)
|
||||
|
@ -584,14 +661,14 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
|
||||
obj.DeleteMarkVersion = randOID.EncodeToString()
|
||||
|
||||
newVersion = &data.NodeVersion{
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
OID: randOID,
|
||||
FilePath: obj.Name,
|
||||
},
|
||||
DeleteMarker: &data.DeleteMarkerInfo{
|
||||
Created: TimeNow(ctx),
|
||||
Owner: n.Owner(ctx),
|
||||
Owner: n.gateOwner,
|
||||
},
|
||||
IsUnversioned: settings.VersioningSuspended(),
|
||||
}
|
||||
|
@ -605,15 +682,46 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
return obj
|
||||
}
|
||||
|
||||
func dismissNotFoundError(obj *VersionedObject) *VersionedObject {
|
||||
if errors.IsS3Error(obj.Error, errors.ErrNoSuchKey) ||
|
||||
errors.IsS3Error(obj.Error, errors.ErrNoSuchVersion) {
|
||||
func (n *layer) handleNotFoundError(bkt *data.BucketInfo, obj *VersionedObject) *VersionedObject {
|
||||
if isNotFoundError(obj.Error) {
|
||||
obj.Error = nil
|
||||
n.cache.CleanListCacheEntriesContainingObject(obj.Name, bkt.CID)
|
||||
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
|
||||
}
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
func (n *layer) handleObjectDeleteErrors(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject, nodeID uint64) *VersionedObject {
|
||||
if client.IsErrObjectAlreadyRemoved(obj.Error) {
|
||||
n.reqLogger(ctx).Debug(logs.ObjectAlreadyRemoved,
|
||||
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID))
|
||||
|
||||
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeID)
|
||||
if obj.Error != nil {
|
||||
return obj
|
||||
}
|
||||
|
||||
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
|
||||
}
|
||||
|
||||
if client.IsErrObjectNotFound(obj.Error) {
|
||||
n.reqLogger(ctx).Debug(logs.ObjectNotFound,
|
||||
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID))
|
||||
|
||||
obj.Error = nil
|
||||
|
||||
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
|
||||
}
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
func isNotFoundError(err error) bool {
|
||||
return errors.IsS3Error(err, errors.ErrNoSuchKey) ||
|
||||
errors.IsS3Error(err, errors.ErrNoSuchVersion)
|
||||
}
|
||||
|
||||
func (n *layer) getNodeVersionToDelete(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject) (*data.NodeVersion, error) {
|
||||
objVersion := &ObjectVersion{
|
||||
BktInfo: bkt,
|
||||
|
@ -625,6 +733,17 @@ func (n *layer) getNodeVersionToDelete(ctx context.Context, bkt *data.BucketInfo
|
|||
return n.getNodeVersion(ctx, objVersion)
|
||||
}
|
||||
|
||||
func (n *layer) getLastNodeVersion(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject) (*data.NodeVersion, error) {
|
||||
objVersion := &ObjectVersion{
|
||||
BktInfo: bkt,
|
||||
ObjectName: obj.Name,
|
||||
VersionID: "",
|
||||
NoErrorOnDeleteMarker: true,
|
||||
}
|
||||
|
||||
return n.getNodeVersion(ctx, objVersion)
|
||||
}
|
||||
|
||||
func (n *layer) removeOldVersion(ctx context.Context, bkt *data.BucketInfo, nodeVersion *data.NodeVersion, obj *VersionedObject) (string, error) {
|
||||
if nodeVersion.IsDeleteMarker() {
|
||||
return obj.VersionID, nil
|
||||
|
@ -665,15 +784,14 @@ func (n *layer) ResolveBucket(ctx context.Context, name string) (cid.ID, error)
|
|||
return cid.ID{}, err
|
||||
}
|
||||
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
n.log.Info("resolve bucket", zap.String("reqId", reqInfo.RequestID), zap.String("bucket", name), zap.Stringer("cid", cnrID))
|
||||
n.reqLogger(ctx).Info(logs.ResolveBucket, zap.Stringer("cid", cnrID))
|
||||
}
|
||||
|
||||
return cnrID, nil
|
||||
}
|
||||
|
||||
func (n *layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
|
||||
nodeVersions, err := n.bucketNodeVersions(ctx, p.BktInfo, "")
|
||||
nodeVersions, err := n.getAllObjectsVersions(ctx, p.BktInfo, "", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -4,7 +4,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -29,7 +30,7 @@ func TestObjectLockAttributes(t *testing.T) {
|
|||
Until: time.Now(),
|
||||
},
|
||||
},
|
||||
CopiesNumber: 0,
|
||||
CopiesNumbers: []uint32{0},
|
||||
}
|
||||
|
||||
err = tc.layer.PutLockInfo(tc.ctx, p)
|
||||
|
@ -43,10 +44,10 @@ func TestObjectLockAttributes(t *testing.T) {
|
|||
|
||||
expEpoch := false
|
||||
for _, attr := range lockObj.Attributes() {
|
||||
if attr.Key() == AttributeExpirationEpoch {
|
||||
if attr.Key() == object.SysAttributeExpEpoch {
|
||||
expEpoch = true
|
||||
}
|
||||
}
|
||||
|
||||
require.Truef(t, expEpoch, "system header __NEOFS__EXPIRATION_EPOCH presence")
|
||||
require.Truef(t, expEpoch, "system header __SYSTEM__EXPIRATION_EPOCH presence")
|
||||
}
|
||||
|
|
156
api/layer/multi_object_reader.go
Normal file
156
api/layer/multi_object_reader.go
Normal file
|
@ -0,0 +1,156 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
type partObj struct {
|
||||
OID oid.ID
|
||||
Size uint64
|
||||
}
|
||||
|
||||
type readerInitiator interface {
|
||||
initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFSParams) (io.Reader, error)
|
||||
}
|
||||
|
||||
// implements io.Reader of payloads of the object list stored in the FrostFS network.
|
||||
type multiObjectReader struct {
|
||||
ctx context.Context
|
||||
|
||||
layer readerInitiator
|
||||
|
||||
startPartOffset uint64
|
||||
endPartLength uint64
|
||||
|
||||
prm getFrostFSParams
|
||||
|
||||
curIndex int
|
||||
curReader io.Reader
|
||||
|
||||
parts []partObj
|
||||
}
|
||||
|
||||
type multiObjectReaderConfig struct {
|
||||
layer readerInitiator
|
||||
|
||||
// the offset of complete object and total size to read
|
||||
off, ln uint64
|
||||
|
||||
bktInfo *data.BucketInfo
|
||||
parts []partObj
|
||||
}
|
||||
|
||||
var (
|
||||
errOffsetIsOutOfRange = errors.New("offset is out of payload range")
|
||||
errLengthIsOutOfRange = errors.New("length is out of payload range")
|
||||
errEmptyPartsList = errors.New("empty parts list")
|
||||
errorZeroRangeLength = errors.New("zero range length")
|
||||
)
|
||||
|
||||
func newMultiObjectReader(ctx context.Context, cfg multiObjectReaderConfig) (*multiObjectReader, error) {
|
||||
if len(cfg.parts) == 0 {
|
||||
return nil, errEmptyPartsList
|
||||
}
|
||||
|
||||
r := &multiObjectReader{
|
||||
ctx: ctx,
|
||||
layer: cfg.layer,
|
||||
prm: getFrostFSParams{
|
||||
bktInfo: cfg.bktInfo,
|
||||
},
|
||||
parts: cfg.parts,
|
||||
}
|
||||
|
||||
if cfg.off+cfg.ln == 0 {
|
||||
return r, nil
|
||||
}
|
||||
|
||||
if cfg.off > 0 && cfg.ln == 0 {
|
||||
return nil, errorZeroRangeLength
|
||||
}
|
||||
|
||||
startPartIndex, startPartOffset := findStartPart(cfg)
|
||||
if startPartIndex == -1 {
|
||||
return nil, errOffsetIsOutOfRange
|
||||
}
|
||||
r.startPartOffset = startPartOffset
|
||||
|
||||
endPartIndex, endPartLength := findEndPart(cfg)
|
||||
if endPartIndex == -1 {
|
||||
return nil, errLengthIsOutOfRange
|
||||
}
|
||||
r.endPartLength = endPartLength
|
||||
|
||||
r.parts = cfg.parts[startPartIndex : endPartIndex+1]
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func findStartPart(cfg multiObjectReaderConfig) (index int, offset uint64) {
|
||||
position := cfg.off
|
||||
for i, part := range cfg.parts {
|
||||
// Strict inequality when searching for start position to avoid reading zero length part.
|
||||
if position < part.Size {
|
||||
return i, position
|
||||
}
|
||||
position -= part.Size
|
||||
}
|
||||
|
||||
return -1, 0
|
||||
}
|
||||
|
||||
func findEndPart(cfg multiObjectReaderConfig) (index int, length uint64) {
|
||||
position := cfg.off + cfg.ln
|
||||
for i, part := range cfg.parts {
|
||||
// Non-strict inequality when searching for end position to avoid out of payload range error.
|
||||
if position <= part.Size {
|
||||
return i, position
|
||||
}
|
||||
position -= part.Size
|
||||
}
|
||||
|
||||
return -1, 0
|
||||
}
|
||||
|
||||
func (x *multiObjectReader) Read(p []byte) (n int, err error) {
|
||||
if x.curReader != nil {
|
||||
n, err = x.curReader.Read(p)
|
||||
if !errors.Is(err, io.EOF) {
|
||||
return n, err
|
||||
}
|
||||
x.curIndex++
|
||||
}
|
||||
|
||||
if x.curIndex == len(x.parts) {
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
x.prm.oid = x.parts[x.curIndex].OID
|
||||
|
||||
if x.curIndex == 0 {
|
||||
x.prm.off = x.startPartOffset
|
||||
x.prm.ln = x.parts[x.curIndex].Size - x.startPartOffset
|
||||
}
|
||||
|
||||
if x.curIndex == len(x.parts)-1 {
|
||||
x.prm.ln = x.endPartLength - x.prm.off
|
||||
}
|
||||
|
||||
x.curReader, err = x.layer.initFrostFSObjectPayloadReader(x.ctx, x.prm)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("init payload reader for the next part: %w", err)
|
||||
}
|
||||
|
||||
x.prm.off = 0
|
||||
x.prm.ln = 0
|
||||
|
||||
next, err := x.Read(p[n:])
|
||||
|
||||
return n + next, err
|
||||
}
|
137
api/layer/multi_object_reader_test.go
Normal file
137
api/layer/multi_object_reader_test.go
Normal file
|
@ -0,0 +1,137 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type readerInitiatorMock struct {
|
||||
parts map[oid.ID][]byte
|
||||
}
|
||||
|
||||
func (r *readerInitiatorMock) initFrostFSObjectPayloadReader(_ context.Context, p getFrostFSParams) (io.Reader, error) {
|
||||
partPayload, ok := r.parts[p.oid]
|
||||
if !ok {
|
||||
return nil, errors.New("part not found")
|
||||
}
|
||||
|
||||
if p.off+p.ln == 0 {
|
||||
return bytes.NewReader(partPayload), nil
|
||||
}
|
||||
|
||||
if p.off > uint64(len(partPayload)-1) {
|
||||
return nil, fmt.Errorf("invalid offset: %d/%d", p.off, len(partPayload))
|
||||
}
|
||||
|
||||
if p.off+p.ln > uint64(len(partPayload)) {
|
||||
return nil, fmt.Errorf("invalid range: %d-%d/%d", p.off, p.off+p.ln, len(partPayload))
|
||||
}
|
||||
|
||||
return bytes.NewReader(partPayload[p.off : p.off+p.ln]), nil
|
||||
}
|
||||
|
||||
func prepareDataReader() ([]byte, []partObj, *readerInitiatorMock) {
|
||||
mockInitReader := &readerInitiatorMock{
|
||||
parts: map[oid.ID][]byte{
|
||||
oidtest.ID(): []byte("first part 1"),
|
||||
oidtest.ID(): []byte("second part 2"),
|
||||
oidtest.ID(): []byte("third part 3"),
|
||||
},
|
||||
}
|
||||
|
||||
var fullPayload []byte
|
||||
parts := make([]partObj, 0, len(mockInitReader.parts))
|
||||
for id, payload := range mockInitReader.parts {
|
||||
parts = append(parts, partObj{OID: id, Size: uint64(len(payload))})
|
||||
fullPayload = append(fullPayload, payload...)
|
||||
}
|
||||
|
||||
return fullPayload, parts, mockInitReader
|
||||
}
|
||||
|
||||
func TestMultiReader(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
fullPayload, parts, mockInitReader := prepareDataReader()
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
off uint64
|
||||
ln uint64
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "simple read all",
|
||||
},
|
||||
{
|
||||
name: "simple read with length",
|
||||
ln: uint64(len(fullPayload)),
|
||||
},
|
||||
{
|
||||
name: "middle of parts",
|
||||
off: parts[0].Size + 2,
|
||||
ln: 4,
|
||||
},
|
||||
{
|
||||
name: "first and second",
|
||||
off: parts[0].Size - 4,
|
||||
ln: 8,
|
||||
},
|
||||
{
|
||||
name: "first and third",
|
||||
off: parts[0].Size - 4,
|
||||
ln: parts[1].Size + 8,
|
||||
},
|
||||
{
|
||||
name: "second part",
|
||||
off: parts[0].Size,
|
||||
ln: parts[1].Size,
|
||||
},
|
||||
{
|
||||
name: "second and third",
|
||||
off: parts[0].Size,
|
||||
ln: parts[1].Size + parts[2].Size,
|
||||
},
|
||||
{
|
||||
name: "offset out of range",
|
||||
off: uint64(len(fullPayload) + 1),
|
||||
ln: 1,
|
||||
err: errOffsetIsOutOfRange,
|
||||
},
|
||||
{
|
||||
name: "zero length",
|
||||
off: parts[1].Size + 1,
|
||||
ln: 0,
|
||||
err: errorZeroRangeLength,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
multiReader, err := newMultiObjectReader(ctx, multiObjectReaderConfig{
|
||||
layer: mockInitReader,
|
||||
parts: parts,
|
||||
off: tc.off,
|
||||
ln: tc.ln,
|
||||
})
|
||||
require.ErrorIs(t, err, tc.err)
|
||||
|
||||
if tc.err == nil {
|
||||
off := tc.off
|
||||
ln := tc.ln
|
||||
if off+ln == 0 {
|
||||
ln = uint64(len(fullPayload))
|
||||
}
|
||||
data, err := io.ReadAll(multiReader)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fullPayload[off:off+ln], data)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,9 +1,11 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
stderrors "errors"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
@ -11,12 +13,12 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/minio/sio"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -26,6 +28,10 @@ const (
|
|||
UploadPartNumberAttributeName = "S3-Upload-Part-Number"
|
||||
UploadCompletedParts = "S3-Completed-Parts"
|
||||
|
||||
// MultipartObjectSize contains the real object size if object is combined (payload contains list of parts).
|
||||
// This header is used to determine if object is combined.
|
||||
MultipartObjectSize = "S3-Multipart-Object-Size"
|
||||
|
||||
metaPrefix = "meta-"
|
||||
aclPrefix = "acl-"
|
||||
|
||||
|
@ -33,8 +39,8 @@ const (
|
|||
MaxSizePartsList = 1000
|
||||
UploadMinPartNumber = 1
|
||||
UploadMaxPartNumber = 10000
|
||||
uploadMinSize = 5 * 1048576 // 5MB
|
||||
uploadMaxSize = 5 * 1073741824 // 5GB
|
||||
UploadMinSize = 5 * 1024 * 1024 // 5MB
|
||||
UploadMaxSize = 1024 * UploadMinSize // 5GB
|
||||
)
|
||||
|
||||
type (
|
||||
|
@ -49,7 +55,7 @@ type (
|
|||
Info *UploadInfoParams
|
||||
Header map[string]string
|
||||
Data *UploadData
|
||||
CopiesNumber uint32
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
UploadData struct {
|
||||
|
@ -60,11 +66,12 @@ type (
|
|||
UploadPartParams struct {
|
||||
Info *UploadInfoParams
|
||||
PartNumber int
|
||||
Size int64
|
||||
Size uint64
|
||||
Reader io.Reader
|
||||
}
|
||||
|
||||
UploadCopyParams struct {
|
||||
Versioned bool
|
||||
Info *UploadInfoParams
|
||||
SrcObjInfo *data.ObjectInfo
|
||||
SrcBktInfo *data.BucketInfo
|
||||
|
@ -91,7 +98,7 @@ type (
|
|||
ETag string
|
||||
LastModified string
|
||||
PartNumber int
|
||||
Size int64
|
||||
Size uint64
|
||||
}
|
||||
|
||||
ListMultipartUploadsParams struct {
|
||||
|
@ -143,10 +150,10 @@ func (n *layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
|
|||
info := &data.MultipartInfo{
|
||||
Key: p.Info.Key,
|
||||
UploadID: p.Info.UploadID,
|
||||
Owner: n.Owner(ctx),
|
||||
Owner: n.gateOwner,
|
||||
Created: TimeNow(ctx),
|
||||
Meta: make(map[string]string, metaSize),
|
||||
CopiesNumber: p.CopiesNumber,
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
}
|
||||
|
||||
for key, val := range p.Header {
|
||||
|
@ -175,14 +182,14 @@ func (n *layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
|
|||
func (n *layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, error) {
|
||||
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Info.Bkt, p.Info.Key, p.Info.UploadID)
|
||||
if err != nil {
|
||||
if stderrors.Is(err, ErrNodeNotFound) {
|
||||
return "", errors.GetAPIError(errors.ErrNoSuchUpload)
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return "", fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchUpload), err.Error())
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
if p.Size > uploadMaxSize {
|
||||
return "", errors.GetAPIError(errors.ErrEntityTooLarge)
|
||||
if p.Size > UploadMaxSize {
|
||||
return "", fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooLarge), p.Size, UploadMaxSize)
|
||||
}
|
||||
|
||||
objInfo, err := n.uploadPart(ctx, multipartInfo, p)
|
||||
|
@ -196,67 +203,65 @@ func (n *layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, er
|
|||
func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInfo, p *UploadPartParams) (*data.ObjectInfo, error) {
|
||||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
||||
if err := p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
|
||||
n.log.Warn("mismatched obj encryptionInfo", zap.Error(err))
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidEncryptionParameters)
|
||||
n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err))
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidEncryptionParameters)
|
||||
}
|
||||
|
||||
bktInfo := p.Info.Bkt
|
||||
prm := PrmObjectCreate{
|
||||
Container: bktInfo.CID,
|
||||
Creator: bktInfo.Owner,
|
||||
Attributes: make([][2]string, 2),
|
||||
Payload: p.Reader,
|
||||
CreationTime: TimeNow(ctx),
|
||||
CopiesNumber: multipartInfo.CopiesNumber,
|
||||
CopiesNumber: multipartInfo.CopiesNumbers,
|
||||
}
|
||||
|
||||
decSize := p.Size
|
||||
if p.Info.Encryption.Enabled() {
|
||||
r, encSize, err := encryptionReader(p.Reader, uint64(p.Size), p.Info.Encryption.Key())
|
||||
r, encSize, err := encryptionReader(p.Reader, p.Size, p.Info.Encryption.Key())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create ecnrypted reader: %w", err)
|
||||
}
|
||||
prm.Attributes = append(prm.Attributes, [2]string{AttributeDecryptedSize, strconv.FormatInt(p.Size, 10)})
|
||||
prm.Attributes = append(prm.Attributes, [2]string{AttributeDecryptedSize, strconv.FormatUint(p.Size, 10)})
|
||||
prm.Payload = r
|
||||
p.Size = int64(encSize)
|
||||
p.Size = encSize
|
||||
}
|
||||
|
||||
prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID
|
||||
prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber)
|
||||
|
||||
id, hash, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
size, id, hash, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.Info.Encryption.Enabled() {
|
||||
size = decSize
|
||||
}
|
||||
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
n.log.Debug("upload part",
|
||||
zap.String("reqId", reqInfo.RequestID),
|
||||
zap.String("bucket", bktInfo.Name), zap.Stringer("cid", bktInfo.CID),
|
||||
zap.String("multipart upload", p.Info.UploadID),
|
||||
zap.Int("part number", p.PartNumber), zap.String("object", p.Info.Key), zap.Stringer("oid", id))
|
||||
n.reqLogger(ctx).Debug(logs.UploadPart,
|
||||
zap.String("multipart upload", p.Info.UploadID), zap.Int("part number", p.PartNumber),
|
||||
zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
|
||||
|
||||
partInfo := &data.PartInfo{
|
||||
Key: p.Info.Key,
|
||||
UploadID: p.Info.UploadID,
|
||||
Number: p.PartNumber,
|
||||
OID: id,
|
||||
Size: decSize,
|
||||
Size: size,
|
||||
ETag: hex.EncodeToString(hash),
|
||||
Created: prm.CreationTime,
|
||||
}
|
||||
|
||||
oldPartID, err := n.treeService.AddPart(ctx, bktInfo, multipartInfo.ID, partInfo)
|
||||
oldPartIDNotFound := stderrors.Is(err, ErrNoNodeToRemove)
|
||||
oldPartIDNotFound := errors.Is(err, ErrNoNodeToRemove)
|
||||
if err != nil && !oldPartIDNotFound {
|
||||
return nil, err
|
||||
}
|
||||
if !oldPartIDNotFound {
|
||||
if err = n.objectDelete(ctx, bktInfo, oldPartID); err != nil {
|
||||
n.log.Error("couldn't delete old part object", zap.Error(err),
|
||||
zap.String("cnrID", bktInfo.CID.EncodeToString()),
|
||||
zap.String("bucket name", bktInfo.Name),
|
||||
zap.String("objID", oldPartID.EncodeToString()))
|
||||
n.reqLogger(ctx).Error(logs.CouldntDeleteOldPartObject, zap.Error(err),
|
||||
zap.String("cid", bktInfo.CID.EncodeToString()),
|
||||
zap.String("oid", oldPartID.EncodeToString()))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -277,91 +282,53 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
func (n *layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.ObjectInfo, error) {
|
||||
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Info.Bkt, p.Info.Key, p.Info.UploadID)
|
||||
if err != nil {
|
||||
if stderrors.Is(err, ErrNodeNotFound) {
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchUpload)
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchUpload), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
size := p.SrcObjInfo.Size
|
||||
srcObjectSize := p.SrcObjInfo.Size
|
||||
|
||||
if objSize, err := GetObjectSize(p.SrcObjInfo); err == nil {
|
||||
srcObjectSize = objSize
|
||||
}
|
||||
|
||||
if p.Range != nil {
|
||||
size = int64(p.Range.End - p.Range.Start + 1)
|
||||
if p.Range.End > uint64(p.SrcObjInfo.Size) {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidCopyPartRangeSource)
|
||||
size = p.Range.End - p.Range.Start + 1
|
||||
if p.Range.End > srcObjectSize {
|
||||
return nil, fmt.Errorf("%w: %d-%d/%d", s3errors.GetAPIError(s3errors.ErrInvalidCopyPartRangeSource), p.Range.Start, p.Range.End, srcObjectSize)
|
||||
}
|
||||
}
|
||||
if size > uploadMaxSize {
|
||||
return nil, errors.GetAPIError(errors.ErrEntityTooLarge)
|
||||
if size > UploadMaxSize {
|
||||
return nil, fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooLarge), size, UploadMaxSize)
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
go func() {
|
||||
err = n.GetObject(ctx, &GetObjectParams{
|
||||
objPayload, err := n.GetObject(ctx, &GetObjectParams{
|
||||
ObjectInfo: p.SrcObjInfo,
|
||||
Writer: pw,
|
||||
Versioned: p.Versioned,
|
||||
Range: p.Range,
|
||||
BucketInfo: p.SrcBktInfo,
|
||||
})
|
||||
|
||||
if err = pw.CloseWithError(err); err != nil {
|
||||
n.log.Error("could not get object", zap.Error(err))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object to upload copy: %w", err)
|
||||
}
|
||||
}()
|
||||
|
||||
params := &UploadPartParams{
|
||||
Info: p.Info,
|
||||
PartNumber: p.PartNumber,
|
||||
Size: size,
|
||||
Reader: pr,
|
||||
Reader: objPayload,
|
||||
}
|
||||
|
||||
return n.uploadPart(ctx, multipartInfo, params)
|
||||
}
|
||||
|
||||
// implements io.Reader of payloads of the object list stored in the FrostFS network.
|
||||
type multiObjectReader struct {
|
||||
ctx context.Context
|
||||
|
||||
layer *layer
|
||||
|
||||
prm getParams
|
||||
|
||||
curReader io.Reader
|
||||
|
||||
parts []*data.PartInfo
|
||||
}
|
||||
|
||||
func (x *multiObjectReader) Read(p []byte) (n int, err error) {
|
||||
if x.curReader != nil {
|
||||
n, err = x.curReader.Read(p)
|
||||
if !stderrors.Is(err, io.EOF) {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(x.parts) == 0 {
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
x.prm.oid = x.parts[0].OID
|
||||
|
||||
x.curReader, err = x.layer.initObjectPayloadReader(x.ctx, x.prm)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("init payload reader for the next part: %w", err)
|
||||
}
|
||||
|
||||
x.parts = x.parts[1:]
|
||||
|
||||
next, err := x.Read(p[n:])
|
||||
|
||||
return n + next, err
|
||||
}
|
||||
|
||||
func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipartParams) (*UploadData, *data.ExtendedObjectInfo, error) {
|
||||
for i := 1; i < len(p.Parts); i++ {
|
||||
if p.Parts[i].PartNumber <= p.Parts[i-1].PartNumber {
|
||||
return nil, nil, errors.GetAPIError(errors.ErrInvalidPartOrder)
|
||||
return nil, nil, s3errors.GetAPIError(s3errors.ErrInvalidPartOrder)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -372,10 +339,10 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
||||
|
||||
if len(partsInfo) < len(p.Parts) {
|
||||
return nil, nil, errors.GetAPIError(errors.ErrInvalidPart)
|
||||
return nil, nil, fmt.Errorf("%w: found %d parts, need %d", s3errors.GetAPIError(s3errors.ErrInvalidPart), len(partsInfo), len(p.Parts))
|
||||
}
|
||||
|
||||
var multipartObjetSize int64
|
||||
var multipartObjetSize uint64
|
||||
var encMultipartObjectSize uint64
|
||||
parts := make([]*data.PartInfo, 0, len(p.Parts))
|
||||
|
||||
|
@ -383,17 +350,19 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
for i, part := range p.Parts {
|
||||
partInfo := partsInfo[part.PartNumber]
|
||||
if partInfo == nil || part.ETag != partInfo.ETag {
|
||||
return nil, nil, errors.GetAPIError(errors.ErrInvalidPart)
|
||||
return nil, nil, fmt.Errorf("%w: unknown part %d or etag mismatched", s3errors.GetAPIError(s3errors.ErrInvalidPart), part.PartNumber)
|
||||
}
|
||||
delete(partsInfo, part.PartNumber)
|
||||
|
||||
// for the last part we have no minimum size limit
|
||||
if i != len(p.Parts)-1 && partInfo.Size < uploadMinSize {
|
||||
return nil, nil, errors.GetAPIError(errors.ErrEntityTooSmall)
|
||||
if i != len(p.Parts)-1 && partInfo.Size < UploadMinSize {
|
||||
return nil, nil, fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooSmall), partInfo.Size, UploadMinSize)
|
||||
}
|
||||
parts = append(parts, partInfo)
|
||||
multipartObjetSize += partInfo.Size // even if encryption is enabled size is actual (decrypted)
|
||||
|
||||
if encInfo.Enabled {
|
||||
encPartSize, err := sio.EncryptedSize(uint64(partInfo.Size))
|
||||
encPartSize, err := sio.EncryptedSize(partInfo.Size)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("compute encrypted size: %w", err)
|
||||
}
|
||||
|
@ -411,6 +380,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
|
||||
initMetadata := make(map[string]string, len(multipartInfo.Meta)+1)
|
||||
initMetadata[UploadCompletedParts] = completedPartsHeader.String()
|
||||
initMetadata[MultipartObjectSize] = strconv.FormatUint(multipartObjetSize, 10)
|
||||
|
||||
uploadData := &UploadData{
|
||||
TagSet: make(map[string]string),
|
||||
|
@ -430,43 +400,39 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
initMetadata[AttributeEncryptionAlgorithm] = encInfo.Algorithm
|
||||
initMetadata[AttributeHMACKey] = encInfo.HMACKey
|
||||
initMetadata[AttributeHMACSalt] = encInfo.HMACSalt
|
||||
initMetadata[AttributeDecryptedSize] = strconv.FormatInt(multipartObjetSize, 10)
|
||||
multipartObjetSize = int64(encMultipartObjectSize)
|
||||
initMetadata[AttributeDecryptedSize] = strconv.FormatUint(multipartObjetSize, 10)
|
||||
multipartObjetSize = encMultipartObjectSize
|
||||
}
|
||||
|
||||
r := &multiObjectReader{
|
||||
ctx: ctx,
|
||||
layer: n,
|
||||
parts: parts,
|
||||
partsData, err := json.Marshal(parts)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("marshal parst for combined object: %w", err)
|
||||
}
|
||||
|
||||
r.prm.bktInfo = p.Info.Bkt
|
||||
|
||||
extObjInfo, err := n.PutObject(ctx, &PutObjectParams{
|
||||
BktInfo: p.Info.Bkt,
|
||||
Object: p.Info.Key,
|
||||
Reader: r,
|
||||
Reader: bytes.NewReader(partsData),
|
||||
Header: initMetadata,
|
||||
Size: multipartObjetSize,
|
||||
Encryption: p.Info.Encryption,
|
||||
CopiesNumber: multipartInfo.CopiesNumber,
|
||||
CopiesNumbers: multipartInfo.CopiesNumbers,
|
||||
})
|
||||
if err != nil {
|
||||
n.log.Error("could not put a completed object (multipart upload)",
|
||||
n.reqLogger(ctx).Error(logs.CouldNotPutCompletedObject,
|
||||
zap.String("uploadID", p.Info.UploadID),
|
||||
zap.String("uploadKey", p.Info.Key),
|
||||
zap.Error(err))
|
||||
|
||||
return nil, nil, errors.GetAPIError(errors.ErrInternalError)
|
||||
return nil, nil, s3errors.GetAPIError(s3errors.ErrInternalError)
|
||||
}
|
||||
|
||||
var addr oid.Address
|
||||
addr.SetContainer(p.Info.Bkt.CID)
|
||||
for _, partInfo := range partsInfo {
|
||||
if err = n.objectDelete(ctx, p.Info.Bkt, partInfo.OID); err != nil {
|
||||
n.log.Warn("could not delete upload part",
|
||||
zap.Stringer("object id", &partInfo.OID),
|
||||
zap.Stringer("bucket id", p.Info.Bkt.CID),
|
||||
n.reqLogger(ctx).Warn(logs.CouldNotDeleteUploadPart,
|
||||
zap.Stringer("cid", p.Info.Bkt.CID), zap.Stringer("oid", &partInfo.OID),
|
||||
zap.Error(err))
|
||||
}
|
||||
addr.SetObject(partInfo.OID)
|
||||
|
@ -544,7 +510,7 @@ func (n *layer) AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) e
|
|||
|
||||
for _, info := range parts {
|
||||
if err = n.objectDelete(ctx, p.Bkt, info.OID); err != nil {
|
||||
n.log.Warn("couldn't delete part", zap.String("cid", p.Bkt.CID.EncodeToString()),
|
||||
n.reqLogger(ctx).Warn(logs.CouldntDeletePart, zap.String("cid", p.Bkt.CID.EncodeToString()),
|
||||
zap.String("oid", info.OID.EncodeToString()), zap.Int("part number", info.Number), zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
@ -561,8 +527,8 @@ func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
|
|||
|
||||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
||||
if err = p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
|
||||
n.log.Warn("mismatched obj encryptionInfo", zap.Error(err))
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidEncryptionParameters)
|
||||
n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err))
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidEncryptionParameters)
|
||||
}
|
||||
|
||||
res.Owner = multipartInfo.Owner
|
||||
|
@ -582,6 +548,10 @@ func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
|
|||
return parts[i].PartNumber < parts[j].PartNumber
|
||||
})
|
||||
|
||||
if len(parts) == 0 || p.PartNumberMarker >= parts[len(parts)-1].PartNumber {
|
||||
res.Parts = make([]*Part, 0)
|
||||
return &res, nil
|
||||
}
|
||||
if p.PartNumberMarker != 0 {
|
||||
for i, part := range parts {
|
||||
if part.PartNumber > p.PartNumberMarker {
|
||||
|
@ -605,8 +575,8 @@ func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
|
|||
func (n *layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.MultipartInfo, map[int]*data.PartInfo, error) {
|
||||
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Bkt, p.Key, p.UploadID)
|
||||
if err != nil {
|
||||
if stderrors.Is(err, ErrNodeNotFound) {
|
||||
return nil, nil, errors.GetAPIError(errors.ErrNoSuchUpload)
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchUpload), err.Error())
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -625,12 +595,8 @@ func (n *layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.
|
|||
oids[i] = part.OID.EncodeToString()
|
||||
}
|
||||
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
n.log.Debug("part details",
|
||||
zap.String("reqId", reqInfo.RequestID),
|
||||
zap.String("bucket", p.Bkt.Name),
|
||||
n.reqLogger(ctx).Debug(logs.PartDetails,
|
||||
zap.Stringer("cid", p.Bkt.CID),
|
||||
zap.String("object", p.Key),
|
||||
zap.String("upload id", p.UploadID),
|
||||
zap.Ints("part numbers", partsNumbers),
|
||||
zap.Strings("oids", oids))
|
||||
|
|
|
@ -7,16 +7,17 @@ import (
|
|||
errorsStd "errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type PutBucketNotificationConfigurationParams struct {
|
||||
RequestInfo *api.ReqInfo
|
||||
RequestInfo *middleware.ReqInfo
|
||||
BktInfo *data.BucketInfo
|
||||
Configuration *data.NotificationConfiguration
|
||||
CopiesNumber uint32
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBucketNotificationConfigurationParams) error {
|
||||
|
@ -27,14 +28,13 @@ func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBu
|
|||
|
||||
prm := PrmObjectCreate{
|
||||
Container: p.BktInfo.CID,
|
||||
Creator: p.BktInfo.Owner,
|
||||
Payload: bytes.NewReader(confXML),
|
||||
Filepath: p.BktInfo.NotificationConfigurationObjectName(),
|
||||
CreationTime: TimeNow(ctx),
|
||||
CopiesNumber: p.CopiesNumber,
|
||||
CopiesNumber: p.CopiesNumbers,
|
||||
}
|
||||
|
||||
objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
_, objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -47,20 +47,19 @@ func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBu
|
|||
|
||||
if !objIDToDeleteNotFound {
|
||||
if err = n.objectDelete(ctx, p.BktInfo, objIDToDelete); err != nil {
|
||||
n.log.Error("couldn't delete notification configuration object", zap.Error(err),
|
||||
zap.String("cnrID", p.BktInfo.CID.EncodeToString()),
|
||||
zap.String("bucket name", p.BktInfo.Name),
|
||||
zap.String("objID", objIDToDelete.EncodeToString()))
|
||||
n.reqLogger(ctx).Error(logs.CouldntDeleteNotificationConfigurationObject, zap.Error(err),
|
||||
zap.String("cid", p.BktInfo.CID.EncodeToString()),
|
||||
zap.String("oid", objIDToDelete.EncodeToString()))
|
||||
}
|
||||
}
|
||||
|
||||
n.cache.PutNotificationConfiguration(n.Owner(ctx), p.BktInfo, p.Configuration)
|
||||
n.cache.PutNotificationConfiguration(n.BearerOwner(ctx), p.BktInfo, p.Configuration)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *layer) GetBucketNotificationConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (*data.NotificationConfiguration, error) {
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
if conf := n.cache.GetNotificationConfiguration(owner, bktInfo); conf != nil {
|
||||
return conf, nil
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -14,14 +15,15 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/client"
|
||||
cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/minio/sio"
|
||||
"github.com/panjf2000/ants/v2"
|
||||
"go.uber.org/zap"
|
||||
|
@ -32,6 +34,14 @@ type (
|
|||
// payload range
|
||||
off, ln uint64
|
||||
|
||||
objInfo *data.ObjectInfo
|
||||
bktInfo *data.BucketInfo
|
||||
}
|
||||
|
||||
getFrostFSParams struct {
|
||||
// payload range
|
||||
off, ln uint64
|
||||
|
||||
oid oid.ID
|
||||
bktInfo *data.BucketInfo
|
||||
}
|
||||
|
@ -98,9 +108,54 @@ func (n *layer) objectHead(ctx context.Context, bktInfo *data.BucketInfo, idObj
|
|||
return res.Head, nil
|
||||
}
|
||||
|
||||
func (n *layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Reader, error) {
|
||||
if _, isCombined := p.objInfo.Headers[MultipartObjectSize]; !isCombined {
|
||||
return n.initFrostFSObjectPayloadReader(ctx, getFrostFSParams{
|
||||
off: p.off,
|
||||
ln: p.ln,
|
||||
oid: p.objInfo.ID,
|
||||
bktInfo: p.bktInfo,
|
||||
})
|
||||
}
|
||||
|
||||
combinedObj, err := n.objectGet(ctx, p.bktInfo, p.objInfo.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get combined object '%s': %w", p.objInfo.ID.EncodeToString(), err)
|
||||
}
|
||||
|
||||
var parts []*data.PartInfo
|
||||
if err = json.Unmarshal(combinedObj.Payload(), &parts); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal combined object parts: %w", err)
|
||||
}
|
||||
|
||||
isEncrypted := FormEncryptionInfo(p.objInfo.Headers).Enabled
|
||||
objParts := make([]partObj, len(parts))
|
||||
for i, part := range parts {
|
||||
size := part.Size
|
||||
if isEncrypted {
|
||||
if size, err = sio.EncryptedSize(part.Size); err != nil {
|
||||
return nil, fmt.Errorf("compute encrypted size: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
objParts[i] = partObj{
|
||||
OID: part.OID,
|
||||
Size: size,
|
||||
}
|
||||
}
|
||||
|
||||
return newMultiObjectReader(ctx, multiObjectReaderConfig{
|
||||
layer: n,
|
||||
off: p.off,
|
||||
ln: p.ln,
|
||||
parts: objParts,
|
||||
bktInfo: p.bktInfo,
|
||||
})
|
||||
}
|
||||
|
||||
// initializes payload reader of the FrostFS object.
|
||||
// Zero range corresponds to full payload (panics if only offset is set).
|
||||
func (n *layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Reader, error) {
|
||||
func (n *layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFSParams) (io.Reader, error) {
|
||||
prm := PrmObjectRead{
|
||||
Container: p.bktInfo.CID,
|
||||
Object: p.oid,
|
||||
|
@ -170,7 +225,7 @@ func ParseCompletedPartHeader(hdr string) (*Part, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid completed part number '%s': %w", partInfo[0], err)
|
||||
}
|
||||
size, err := strconv.Atoi(partInfo[1])
|
||||
size, err := strconv.ParseUint(partInfo[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid completed part size '%s': %w", partInfo[1], err)
|
||||
}
|
||||
|
@ -178,39 +233,29 @@ func ParseCompletedPartHeader(hdr string) (*Part, error) {
|
|||
return &Part{
|
||||
ETag: partInfo[2],
|
||||
PartNumber: num,
|
||||
Size: int64(size),
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PutObject stores object into FrostFS, took payload from io.Reader.
|
||||
func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.ExtendedObjectInfo, error) {
|
||||
owner := n.Owner(ctx)
|
||||
|
||||
bktSettings, err := n.GetBucketSettings(ctx, p.BktInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't get versioning settings object: %w", err)
|
||||
}
|
||||
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
FilePath: p.Object,
|
||||
Size: p.Size,
|
||||
},
|
||||
IsUnversioned: !bktSettings.VersioningEnabled(),
|
||||
}
|
||||
|
||||
r := p.Reader
|
||||
if p.Encryption.Enabled() {
|
||||
p.Header[AttributeDecryptedSize] = strconv.FormatInt(p.Size, 10)
|
||||
p.Header[AttributeDecryptedSize] = strconv.FormatUint(p.Size, 10)
|
||||
if err = addEncryptionHeaders(p.Header, p.Encryption); err != nil {
|
||||
return nil, fmt.Errorf("add encryption header: %w", err)
|
||||
}
|
||||
|
||||
var encSize uint64
|
||||
if r, encSize, err = encryptionReader(p.Reader, uint64(p.Size), p.Encryption.Key()); err != nil {
|
||||
if r, encSize, err = encryptionReader(p.Reader, p.Size, p.Encryption.Key()); err != nil {
|
||||
return nil, fmt.Errorf("create encrypter: %w", err)
|
||||
}
|
||||
p.Size = int64(encSize)
|
||||
p.Size = encSize
|
||||
}
|
||||
|
||||
if r != nil {
|
||||
|
@ -229,12 +274,11 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
|
||||
prm := PrmObjectCreate{
|
||||
Container: p.BktInfo.CID,
|
||||
Creator: owner,
|
||||
PayloadSize: uint64(p.Size),
|
||||
PayloadSize: p.Size,
|
||||
Filepath: p.Object,
|
||||
Payload: r,
|
||||
CreationTime: TimeNow(ctx),
|
||||
CopiesNumber: p.CopiesNumber,
|
||||
CopiesNumber: p.CopiesNumbers,
|
||||
}
|
||||
|
||||
prm.Attributes = make([][2]string, 0, len(p.Header))
|
||||
|
@ -243,19 +287,24 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
prm.Attributes = append(prm.Attributes, [2]string{k, v})
|
||||
}
|
||||
|
||||
id, hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
size, id, hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
n.log.Debug("put object",
|
||||
zap.String("reqId", reqInfo.RequestID),
|
||||
zap.String("bucket", p.BktInfo.Name), zap.Stringer("cid", p.BktInfo.CID),
|
||||
zap.String("object", p.Object), zap.Stringer("oid", id))
|
||||
n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
||||
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
OID: id,
|
||||
ETag: hex.EncodeToString(hash),
|
||||
FilePath: p.Object,
|
||||
Size: size,
|
||||
},
|
||||
IsUnversioned: !bktSettings.VersioningEnabled(),
|
||||
IsCombined: p.Header[MultipartObjectSize] != "",
|
||||
}
|
||||
|
||||
newVersion.OID = id
|
||||
newVersion.ETag = hex.EncodeToString(hash)
|
||||
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
|
||||
return nil, fmt.Errorf("couldn't add new verion to tree service: %w", err)
|
||||
}
|
||||
|
@ -268,7 +317,7 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
VersionID: id.EncodeToString(),
|
||||
},
|
||||
NewLock: p.Lock,
|
||||
CopiesNumber: p.CopiesNumber,
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
NodeVersion: newVersion, // provide new version to make one less tree service call in PutLockInfo
|
||||
}
|
||||
|
||||
|
@ -283,10 +332,10 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
ID: id,
|
||||
CID: p.BktInfo.CID,
|
||||
|
||||
Owner: owner,
|
||||
Owner: n.gateOwner,
|
||||
Bucket: p.BktInfo.Name,
|
||||
Name: p.Object,
|
||||
Size: p.Size,
|
||||
Size: size,
|
||||
Created: prm.CreationTime,
|
||||
Headers: p.Header,
|
||||
ContentType: p.Header[api.ContentType],
|
||||
|
@ -298,13 +347,13 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
NodeVersion: newVersion,
|
||||
}
|
||||
|
||||
n.cache.PutObjectWithName(owner, extendedObjInfo)
|
||||
n.cache.PutObjectWithName(n.BearerOwner(ctx), extendedObjInfo)
|
||||
|
||||
return extendedObjInfo, nil
|
||||
}
|
||||
|
||||
func (n *layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.BucketInfo, objectName string) (*data.ExtendedObjectInfo, error) {
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
if extObjInfo := n.cache.GetLastObject(owner, bkt.Name, objectName); extObjInfo != nil {
|
||||
return extObjInfo, nil
|
||||
}
|
||||
|
@ -312,17 +361,20 @@ func (n *layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.Bucke
|
|||
node, err := n.treeService.GetLatestVersion(ctx, bkt, objectName)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchKey)
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if node.IsDeleteMarker() {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchKey)
|
||||
return nil, fmt.Errorf("%w: found version is delete marker", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey))
|
||||
}
|
||||
|
||||
meta, err := n.objectHead(ctx, bkt, node.OID)
|
||||
if err != nil {
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
objInfo := objectInfoFromMeta(bkt, meta)
|
||||
|
@ -344,7 +396,7 @@ func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
|||
foundVersion, err = n.treeService.GetUnversioned(ctx, bkt, p.Object)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion)
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -361,11 +413,11 @@ func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
|||
}
|
||||
}
|
||||
if foundVersion == nil {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion)
|
||||
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion))
|
||||
}
|
||||
}
|
||||
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
if extObjInfo := n.cache.GetObject(owner, newAddress(bkt.CID, foundVersion.OID)); extObjInfo != nil {
|
||||
return extObjInfo, nil
|
||||
}
|
||||
|
@ -373,7 +425,7 @@ func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
|||
meta, err := n.objectHead(ctx, bkt, foundVersion.OID)
|
||||
if err != nil {
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion)
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -405,17 +457,24 @@ func (n *layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idOb
|
|||
|
||||
// objectPutAndHash prepare auth parameters and invoke frostfs.CreateObject.
|
||||
// Returns object ID and payload sha256 hash.
|
||||
func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (oid.ID, []byte, error) {
|
||||
func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (uint64, oid.ID, []byte, error) {
|
||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
||||
prm.ClientCut = n.features.ClientCut()
|
||||
var size uint64
|
||||
hash := sha256.New()
|
||||
prm.Payload = wrapReader(prm.Payload, 64*1024, func(buf []byte) {
|
||||
size += uint64(len(buf))
|
||||
hash.Write(buf)
|
||||
})
|
||||
id, err := n.frostFS.CreateObject(ctx, prm)
|
||||
if err != nil {
|
||||
return oid.ID{}, nil, err
|
||||
if _, errDiscard := io.Copy(io.Discard, prm.Payload); errDiscard != nil {
|
||||
n.reqLogger(ctx).Warn(logs.FailedToDiscardPutPayloadProbablyGoroutineLeaks, zap.Error(errDiscard))
|
||||
}
|
||||
return id, hash.Sum(nil), nil
|
||||
|
||||
return 0, oid.ID{}, nil, err
|
||||
}
|
||||
return size, id, hash.Sum(nil), nil
|
||||
}
|
||||
|
||||
// ListObjectsV1 returns objects in a bucket for requests of Version 1.
|
||||
|
@ -486,7 +545,7 @@ func (n *layer) getLatestObjectsVersions(ctx context.Context, p allObjectParams)
|
|||
return nil, nil, nil
|
||||
}
|
||||
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
cacheKey := cache.CreateObjectsListCacheKey(p.Bucket.CID, p.Prefix, true)
|
||||
nodeVersions := n.cache.GetList(owner, cacheKey)
|
||||
|
||||
|
@ -560,7 +619,8 @@ func nodesGenerator(ctx context.Context, p allObjectParams, nodeVersions []*data
|
|||
}
|
||||
|
||||
func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams, input <-chan *data.NodeVersion) (<-chan *data.ObjectInfo, error) {
|
||||
pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{n.log}))
|
||||
reqLog := n.reqLogger(ctx)
|
||||
pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{reqLog}))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("coudln't init go pool for listing: %w", err)
|
||||
}
|
||||
|
@ -587,8 +647,8 @@ func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams,
|
|||
if oi == nil {
|
||||
// try to get object again
|
||||
if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter); oi == nil {
|
||||
// form object info with data that the tree node contains
|
||||
oi = getPartialObjectInfo(p.Bucket, node)
|
||||
// do not process object which are definitely missing in object service
|
||||
return
|
||||
}
|
||||
}
|
||||
select {
|
||||
|
@ -598,7 +658,7 @@ func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams,
|
|||
})
|
||||
if err != nil {
|
||||
wg.Done()
|
||||
n.log.Warn("failed to submit task to pool", zap.Error(err))
|
||||
reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err))
|
||||
}
|
||||
}(node)
|
||||
}
|
||||
|
@ -610,22 +670,10 @@ func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams,
|
|||
return objCh, nil
|
||||
}
|
||||
|
||||
// getPartialObjectInfo form data.ObjectInfo using data available in data.NodeVersion.
|
||||
func getPartialObjectInfo(bktInfo *data.BucketInfo, node *data.NodeVersion) *data.ObjectInfo {
|
||||
return &data.ObjectInfo{
|
||||
ID: node.OID,
|
||||
CID: bktInfo.CID,
|
||||
Bucket: bktInfo.Name,
|
||||
Name: node.FilePath,
|
||||
Size: node.Size,
|
||||
HashSum: node.ETag,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *layer) bucketNodeVersions(ctx context.Context, bkt *data.BucketInfo, prefix string) ([]*data.NodeVersion, error) {
|
||||
var err error
|
||||
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
cacheKey := cache.CreateObjectsListCacheKey(bkt.CID, prefix, false)
|
||||
nodeVersions := n.cache.GetList(owner, cacheKey)
|
||||
|
||||
|
@ -745,14 +793,14 @@ func (n *layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo
|
|||
return oiDir
|
||||
}
|
||||
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
if extInfo := n.cache.GetObject(owner, newAddress(bktInfo.CID, node.OID)); extInfo != nil {
|
||||
return extInfo.ObjectInfo
|
||||
}
|
||||
|
||||
meta, err := n.objectHead(ctx, bktInfo, node.OID)
|
||||
if err != nil {
|
||||
n.log.Warn("could not fetch object meta", zap.Error(err))
|
||||
n.reqLogger(ctx).Warn(logs.CouldNotFetchObjectMeta, zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -9,20 +9,20 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
const (
|
||||
AttributeComplianceMode = ".s3-compliance-mode"
|
||||
AttributeExpirationEpoch = "__NEOFS__EXPIRATION_EPOCH"
|
||||
)
|
||||
|
||||
type PutLockInfoParams struct {
|
||||
ObjVersion *ObjectVersion
|
||||
NewLock *data.ObjectLock
|
||||
CopiesNumber uint32
|
||||
CopiesNumbers []uint32
|
||||
NodeVersion *data.NodeVersion // optional
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ func (n *layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err erro
|
|||
}
|
||||
}
|
||||
lock := &data.ObjectLock{Retention: newLock.Retention}
|
||||
retentionOID, err := n.putLockObject(ctx, p.ObjVersion.BktInfo, versionNode.OID, lock, p.CopiesNumber)
|
||||
retentionOID, err := n.putLockObject(ctx, p.ObjVersion.BktInfo, versionNode.OID, lock, p.CopiesNumbers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ func (n *layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err erro
|
|||
if newLock.LegalHold != nil {
|
||||
if newLock.LegalHold.Enabled && !lockInfo.IsLegalHoldSet() {
|
||||
lock := &data.ObjectLock{LegalHold: newLock.LegalHold}
|
||||
legalHoldOID, err := n.putLockObject(ctx, p.ObjVersion.BktInfo, versionNode.OID, lock, p.CopiesNumber)
|
||||
legalHoldOID, err := n.putLockObject(ctx, p.ObjVersion.BktInfo, versionNode.OID, lock, p.CopiesNumbers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -95,14 +95,14 @@ func (n *layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err erro
|
|||
return fmt.Errorf("couldn't put lock into tree: %w", err)
|
||||
}
|
||||
|
||||
n.cache.PutLockInfo(n.Owner(ctx), lockObjectKey(p.ObjVersion), lockInfo)
|
||||
n.cache.PutLockInfo(n.BearerOwner(ctx), lockObjectKey(p.ObjVersion), lockInfo)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *layer) getNodeVersionFromCacheOrFrostfs(ctx context.Context, objVersion *ObjectVersion) (nodeVersion *data.NodeVersion, err error) {
|
||||
// check cache if node version is stored inside extendedObjectVersion
|
||||
nodeVersion = n.getNodeVersionFromCache(n.Owner(ctx), objVersion)
|
||||
nodeVersion = n.getNodeVersionFromCache(n.BearerOwner(ctx), objVersion)
|
||||
if nodeVersion == nil {
|
||||
// else get node version from tree service
|
||||
return n.getNodeVersion(ctx, objVersion)
|
||||
|
@ -111,10 +111,9 @@ func (n *layer) getNodeVersionFromCacheOrFrostfs(ctx context.Context, objVersion
|
|||
return nodeVersion, nil
|
||||
}
|
||||
|
||||
func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, lock *data.ObjectLock, copiesNumber uint32) (oid.ID, error) {
|
||||
func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, lock *data.ObjectLock, copiesNumber []uint32) (oid.ID, error) {
|
||||
prm := PrmObjectCreate{
|
||||
Container: bktInfo.CID,
|
||||
Creator: bktInfo.Owner,
|
||||
Locks: []oid.ID{objID},
|
||||
CreationTime: TimeNow(ctx),
|
||||
CopiesNumber: copiesNumber,
|
||||
|
@ -126,12 +125,12 @@ func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, obj
|
|||
return oid.ID{}, err
|
||||
}
|
||||
|
||||
id, _, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
_, id, _, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
return id, err
|
||||
}
|
||||
|
||||
func (n *layer) GetLockInfo(ctx context.Context, objVersion *ObjectVersion) (*data.LockInfo, error) {
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
if lockInfo := n.cache.GetLockInfo(owner, lockObjectKey(objVersion)); lockInfo != nil {
|
||||
return lockInfo, nil
|
||||
}
|
||||
|
@ -155,7 +154,7 @@ func (n *layer) GetLockInfo(ctx context.Context, objVersion *ObjectVersion) (*da
|
|||
}
|
||||
|
||||
func (n *layer) getCORS(ctx context.Context, bkt *data.BucketInfo) (*data.CORSConfiguration, error) {
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
if cors := n.cache.GetCORS(owner, bkt); cors != nil {
|
||||
return cors, nil
|
||||
}
|
||||
|
@ -167,7 +166,7 @@ func (n *layer) getCORS(ctx context.Context, bkt *data.BucketInfo) (*data.CORSCo
|
|||
}
|
||||
|
||||
if objIDNotFound {
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchCORSConfiguration)
|
||||
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchCORSConfiguration), err.Error())
|
||||
}
|
||||
|
||||
obj, err := n.objectGet(ctx, bkt, objID)
|
||||
|
@ -192,7 +191,7 @@ func lockObjectKey(objVersion *ObjectVersion) string {
|
|||
}
|
||||
|
||||
func (n *layer) GetBucketSettings(ctx context.Context, bktInfo *data.BucketInfo) (*data.BucketSettings, error) {
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
if settings := n.cache.GetSettings(owner, bktInfo); settings != nil {
|
||||
return settings, nil
|
||||
}
|
||||
|
@ -215,7 +214,7 @@ func (n *layer) PutBucketSettings(ctx context.Context, p *PutSettingsParams) err
|
|||
return fmt.Errorf("failed to get settings node: %w", err)
|
||||
}
|
||||
|
||||
n.cache.PutSettings(n.Owner(ctx), p.BktInfo, p.Settings)
|
||||
n.cache.PutSettings(n.BearerOwner(ctx), p.BktInfo, p.Settings)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -238,7 +237,7 @@ func (n *layer) attributesFromLock(ctx context.Context, lock *data.ObjectLock) (
|
|||
}
|
||||
|
||||
if lock.LegalHold != nil && lock.LegalHold.Enabled {
|
||||
// todo: (@KirillovDenis) reconsider this when FrostFS will support Legal Hold https://github.com/TrueCloudLab/frostfs-contract/issues/2
|
||||
// todo: (@KirillovDenis) reconsider this when FrostFS will support Legal Hold https://git.frostfs.info/TrueCloudLab/frostfs-contract/issues/2
|
||||
// Currently lock object must have an expiration epoch.
|
||||
// Besides we need to override retention expiration epoch since legal hold cannot be deleted yet.
|
||||
expEpoch = math.MaxUint64
|
||||
|
@ -246,7 +245,7 @@ func (n *layer) attributesFromLock(ctx context.Context, lock *data.ObjectLock) (
|
|||
|
||||
if expEpoch != 0 {
|
||||
result = append(result, [2]string{
|
||||
AttributeExpirationEpoch, strconv.FormatUint(expEpoch, 10),
|
||||
object.SysAttributeExpEpoch, strconv.FormatUint(expEpoch, 10),
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -2,14 +2,15 @@ package layer
|
|||
|
||||
import (
|
||||
"context"
|
||||
errorsStd "errors"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -30,7 +31,7 @@ type PutObjectTaggingParams struct {
|
|||
|
||||
func (n *layer) GetObjectTagging(ctx context.Context, p *GetObjectTaggingParams) (string, map[string]string, error) {
|
||||
var err error
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
|
||||
if len(p.ObjectVersion.VersionID) != 0 && p.ObjectVersion.VersionID != data.UnversionedObjectVersionID {
|
||||
if tags := n.cache.GetTagging(owner, objectTaggingCacheKey(p.ObjectVersion)); tags != nil {
|
||||
|
@ -53,8 +54,8 @@ func (n *layer) GetObjectTagging(ctx context.Context, p *GetObjectTaggingParams)
|
|||
|
||||
tags, err := n.treeService.GetObjectTagging(ctx, p.ObjectVersion.BktInfo, nodeVersion)
|
||||
if err != nil {
|
||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
||||
return "", nil, errors.GetAPIError(errors.ErrNoSuchKey)
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return "", nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
return "", nil, err
|
||||
}
|
||||
|
@ -76,13 +77,13 @@ func (n *layer) PutObjectTagging(ctx context.Context, p *PutObjectTaggingParams)
|
|||
|
||||
err = n.treeService.PutObjectTagging(ctx, p.ObjectVersion.BktInfo, nodeVersion, p.TagSet)
|
||||
if err != nil {
|
||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchKey)
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.cache.PutTagging(n.Owner(ctx), objectTaggingCacheKey(p.ObjectVersion), p.TagSet)
|
||||
n.cache.PutTagging(n.BearerOwner(ctx), objectTaggingCacheKey(p.ObjectVersion), p.TagSet)
|
||||
|
||||
return nodeVersion, nil
|
||||
}
|
||||
|
@ -95,8 +96,8 @@ func (n *layer) DeleteObjectTagging(ctx context.Context, p *ObjectVersion) (*dat
|
|||
|
||||
err = n.treeService.DeleteObjectTagging(ctx, p.BktInfo, version)
|
||||
if err != nil {
|
||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchKey)
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -109,14 +110,14 @@ func (n *layer) DeleteObjectTagging(ctx context.Context, p *ObjectVersion) (*dat
|
|||
}
|
||||
|
||||
func (n *layer) GetBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) (map[string]string, error) {
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
|
||||
if tags := n.cache.GetTagging(owner, bucketTaggingCacheKey(bktInfo.CID)); tags != nil {
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
tags, err := n.treeService.GetBucketTagging(ctx, bktInfo)
|
||||
if err != nil && !errorsStd.Is(err, ErrNodeNotFound) {
|
||||
if err != nil && !errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -130,7 +131,7 @@ func (n *layer) PutBucketTagging(ctx context.Context, bktInfo *data.BucketInfo,
|
|||
return err
|
||||
}
|
||||
|
||||
n.cache.PutTagging(n.Owner(ctx), bucketTaggingCacheKey(bktInfo.CID), tagSet)
|
||||
n.cache.PutTagging(n.BearerOwner(ctx), bucketTaggingCacheKey(bktInfo.CID), tagSet)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -169,20 +170,19 @@ func (n *layer) getNodeVersion(ctx context.Context, objVersion *ObjectVersion) (
|
|||
}
|
||||
}
|
||||
if version == nil {
|
||||
err = errors.GetAPIError(errors.ErrNoSuchVersion)
|
||||
err = fmt.Errorf("%w: there isn't tree node with requested version id", s3errors.GetAPIError(s3errors.ErrNoSuchVersion))
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil && version.IsDeleteMarker() && !objVersion.NoErrorOnDeleteMarker || errorsStd.Is(err, ErrNodeNotFound) {
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchKey)
|
||||
if err == nil && version.IsDeleteMarker() && !objVersion.NoErrorOnDeleteMarker {
|
||||
return nil, fmt.Errorf("%w: found version is delete marker", s3errors.GetAPIError(s3errors.ErrNoSuchKey))
|
||||
} else if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
|
||||
if err == nil && version != nil && !version.IsDeleteMarker() {
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
n.log.Debug("target details",
|
||||
zap.String("reqId", reqInfo.RequestID),
|
||||
zap.String("bucket", objVersion.BktInfo.Name), zap.Stringer("cid", objVersion.BktInfo.CID),
|
||||
zap.String("object", objVersion.ObjectName), zap.Stringer("oid", version.OID))
|
||||
n.reqLogger(ctx).Debug(logs.GetTreeNode,
|
||||
zap.Stringer("cid", objVersion.BktInfo.CID), zap.Stringer("oid", version.OID))
|
||||
}
|
||||
|
||||
return version, err
|
||||
|
|
|
@ -6,8 +6,8 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
type TreeServiceMock struct {
|
||||
|
@ -59,17 +59,17 @@ func (t *TreeServiceMock) DeleteObjectTagging(_ context.Context, bktInfo *data.B
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) (map[string]string, error) {
|
||||
func (t *TreeServiceMock) GetBucketTagging(context.Context, *data.BucketInfo) (map[string]string, error) {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) PutBucketTagging(ctx context.Context, bktInfo *data.BucketInfo, tagSet map[string]string) error {
|
||||
func (t *TreeServiceMock) PutBucketTagging(context.Context, *data.BucketInfo, map[string]string) error {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) DeleteBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) error {
|
||||
func (t *TreeServiceMock) DeleteBucketTagging(context.Context, *data.BucketInfo) error {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
@ -100,15 +100,15 @@ func (t *TreeServiceMock) GetSettingsNode(_ context.Context, bktInfo *data.Bucke
|
|||
return settings, nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetNotificationConfigurationNode(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
|
||||
func (t *TreeServiceMock) GetNotificationConfigurationNode(context.Context, *data.BucketInfo) (oid.ID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) PutNotificationConfigurationNode(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
|
||||
func (t *TreeServiceMock) PutNotificationConfigurationNode(context.Context, *data.BucketInfo, oid.ID) (oid.ID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
|
||||
func (t *TreeServiceMock) GetBucketCORS(_ context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
|
||||
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return oid.ID{}, nil
|
||||
|
@ -122,7 +122,7 @@ func (t *TreeServiceMock) GetBucketCORS(ctx context.Context, bktInfo *data.Bucke
|
|||
return node.OID, nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) PutBucketCORS(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
|
||||
func (t *TreeServiceMock) PutBucketCORS(_ context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
|
||||
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
systemMap = make(map[string]*data.BaseNodeVersion)
|
||||
|
@ -137,7 +137,7 @@ func (t *TreeServiceMock) PutBucketCORS(ctx context.Context, bktInfo *data.Bucke
|
|||
return oid.ID{}, ErrNoNodeToRemove
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
|
||||
func (t *TreeServiceMock) DeleteBucketCORS(context.Context, *data.BucketInfo) (oid.ID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
@ -314,7 +314,7 @@ func (t *TreeServiceMock) CreateMultipartUpload(_ context.Context, bktInfo *data
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetMultipartUploadsByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.MultipartInfo, error) {
|
||||
func (t *TreeServiceMock) GetMultipartUploadsByPrefix(context.Context, *data.BucketInfo, string) ([]*data.MultipartInfo, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
@ -407,7 +407,7 @@ LOOP:
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) PutLock(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64, lock *data.LockInfo) error {
|
||||
func (t *TreeServiceMock) PutLock(_ context.Context, bktInfo *data.BucketInfo, nodeID uint64, lock *data.LockInfo) error {
|
||||
cnrLockMap, ok := t.locks[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
t.locks[bktInfo.CID.EncodeToString()] = map[uint64]*data.LockInfo{
|
||||
|
@ -421,7 +421,7 @@ func (t *TreeServiceMock) PutLock(ctx context.Context, bktInfo *data.BucketInfo,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetLock(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64) (*data.LockInfo, error) {
|
||||
func (t *TreeServiceMock) GetLock(_ context.Context, bktInfo *data.BucketInfo, nodeID uint64) (*data.LockInfo, error) {
|
||||
cnrLockMap, ok := t.locks[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
|
|
|
@ -4,8 +4,8 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
// TreeService provide interface to interact with tree service using s3 data models.
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
|
@ -9,11 +8,9 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
)
|
||||
|
||||
type (
|
||||
|
@ -74,7 +71,7 @@ func objectInfoFromMeta(bkt *data.BucketInfo, meta *object.Object) *data.ObjectI
|
|||
mimeType = contentType
|
||||
delete(headers, object.AttributeContentType)
|
||||
}
|
||||
if val, ok := headers[object.AttributeTimestamp]; !ok {
|
||||
if val, ok := headers[object.AttributeTimestamp]; !ok { //nolint:revive
|
||||
// ignore empty value
|
||||
} else if dt, err := strconv.ParseInt(val, 10, 64); err == nil {
|
||||
creation = time.Unix(dt, 0)
|
||||
|
@ -94,11 +91,29 @@ func objectInfoFromMeta(bkt *data.BucketInfo, meta *object.Object) *data.ObjectI
|
|||
ContentType: mimeType,
|
||||
Headers: headers,
|
||||
Owner: *meta.OwnerID(),
|
||||
Size: int64(meta.PayloadSize()),
|
||||
Size: meta.PayloadSize(),
|
||||
CreationEpoch: meta.CreationEpoch(),
|
||||
HashSum: hex.EncodeToString(payloadChecksum.Value()),
|
||||
}
|
||||
}
|
||||
|
||||
func GetObjectSize(objInfo *data.ObjectInfo) (uint64, error) {
|
||||
var err error
|
||||
fullSize := objInfo.Size
|
||||
|
||||
if objInfo.Headers[AttributeDecryptedSize] != "" {
|
||||
if fullSize, err = strconv.ParseUint(objInfo.Headers[AttributeDecryptedSize], 10, 64); err != nil {
|
||||
return 0, fmt.Errorf("invalid decrypted size header: %w", err)
|
||||
}
|
||||
} else if objInfo.Headers[MultipartObjectSize] != "" {
|
||||
if fullSize, err = strconv.ParseUint(objInfo.Headers[MultipartObjectSize], 10, 64); err != nil {
|
||||
return 0, fmt.Errorf("invalid multipart size header: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return fullSize, nil
|
||||
}
|
||||
|
||||
func FormEncryptionInfo(headers map[string]string) encryption.ObjectEncryption {
|
||||
algorithm := headers[AttributeEncryptionAlgorithm]
|
||||
return encryption.ObjectEncryption{
|
||||
|
@ -136,18 +151,3 @@ func NameFromString(name string) (string, string) {
|
|||
ind := strings.LastIndex(name, PathSeparator)
|
||||
return name[ind+1:], name[:ind+1]
|
||||
}
|
||||
|
||||
// GetBoxData extracts accessbox.Box from context.
|
||||
func GetBoxData(ctx context.Context) (*accessbox.Box, error) {
|
||||
var boxData *accessbox.Box
|
||||
data, ok := ctx.Value(api.BoxData).(*accessbox.Box)
|
||||
if !ok || data == nil {
|
||||
return nil, fmt.Errorf("couldn't get box data from context")
|
||||
}
|
||||
|
||||
boxData = data
|
||||
if boxData.Gate == nil {
|
||||
boxData.Gate = &accessbox.GateData{}
|
||||
}
|
||||
return boxData, nil
|
||||
}
|
||||
|
|
|
@ -6,18 +6,18 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultTestCreated = time.Now()
|
||||
defaultTestPayload = []byte("test object payload")
|
||||
defaultTestPayloadLength = int64(len(defaultTestPayload))
|
||||
defaultTestPayloadLength = uint64(len(defaultTestPayload))
|
||||
defaultTestContentType = http.DetectContentType(defaultTestPayload)
|
||||
)
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"sort"
|
||||
|
||||
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
)
|
||||
|
||||
func (n *layer) ListObjectVersions(ctx context.Context, p *ListObjectVersionsParams) (*ListObjectVersionsInfo, error) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue