forked from TrueCloudLab/frostfs-s3-gw
Compare commits
8 commits
master
...
support/v0
Author | SHA1 | Date | |
---|---|---|---|
2c2540b134 | |||
|
9c8d629f2b | ||
|
9c795c8639 | ||
|
3556fec9c1 | ||
|
dfdfb913d5 | ||
|
80fc3b4ac3 | ||
459a848c84 | |||
2f93844417 |
195 changed files with 5958 additions and 11699 deletions
|
@ -1,7 +1,7 @@
|
|||
FROM golang:1.19 as builder
|
||||
|
||||
ARG BUILD=now
|
||||
ARG REPO=git.frostfs.info/TrueCloudLab/frostfs-s3-gw
|
||||
ARG REPO=github.com/nspcc-dev/neofs-s3-gw
|
||||
ARG VERSION=dev
|
||||
|
||||
WORKDIR /src
|
||||
|
@ -10,12 +10,12 @@ COPY . /src
|
|||
RUN make
|
||||
|
||||
# Executable image
|
||||
FROM alpine AS frostfs-s3-gw
|
||||
FROM alpine AS neofs-s3-gw
|
||||
RUN apk add --no-cache bash ca-certificates
|
||||
|
||||
WORKDIR /
|
||||
|
||||
COPY --from=builder /src/bin/frostfs-s3-gw /bin/frostfs-s3-gw
|
||||
COPY --from=builder /src/bin/frostfs-s3-authmate /bin/frostfs-s3-authmate
|
||||
COPY --from=builder /src/bin/neofs-s3-gw /bin/neofs-s3-gw
|
||||
COPY --from=builder /src/bin/neofs-s3-authmate /bin/neofs-s3-authmate
|
||||
|
||||
ENTRYPOINT ["/bin/frostfs-s3-gw"]
|
||||
ENTRYPOINT ["/bin/neofs-s3-gw"]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
FROM alpine AS frostfs-s3-gw
|
||||
FROM alpine AS neofs-s3-gw
|
||||
RUN apk add --no-cache bash ca-certificates
|
||||
|
||||
WORKDIR /
|
||||
|
||||
COPY /bin/frostfs-s3-gw /bin/frostfs-s3-gw
|
||||
COPY /bin/frostfs-s3-authmate /bin/frostfs-s3-authmate
|
||||
COPY /bin/neofs-s3-gw /bin/neofs-s3-gw
|
||||
COPY /bin/neofs-s3-authmate /bin/neofs-s3-authmate
|
||||
|
||||
ENTRYPOINT ["/bin/frostfs-s3-gw"]
|
||||
ENTRYPOINT ["/bin/neofs-s3-gw"]
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
builds:
|
||||
name: Builds
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.19', '1.20' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Build binary
|
||||
run: make
|
|
@ -1,20 +0,0 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
dco:
|
||||
name: DCO
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
|
||||
- name: Run commit format checker
|
||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v1
|
||||
with:
|
||||
from: 3fbad97a
|
|
@ -1,34 +0,0 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: golangci-lint
|
||||
uses: https://github.com/golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: latest
|
||||
|
||||
tests:
|
||||
name: Tests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.19', '1.20' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
|
@ -1,21 +0,0 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
vulncheck:
|
||||
name: Vulncheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
- name: Run govulncheck
|
||||
run: govulncheck ./...
|
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
|
@ -1 +1 @@
|
|||
* @alexvanin @dkirillov
|
||||
* @alexvanin @masterSplinter01 @KirillovDenis
|
||||
|
|
45
.github/ISSUE_TEMPLATE/bug_report.md
vendored
45
.github/ISSUE_TEMPLATE/bug_report.md
vendored
|
@ -1,45 +0,0 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: community, triage, bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
<!--- If you're describing a bug, tell us what should happen -->
|
||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
||||
|
||||
## Current Behavior
|
||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
||||
|
||||
## Possible Solution
|
||||
<!--- Not obligatory -->
|
||||
<!--- If no reason/fix/additions for the bug can be suggested, -->
|
||||
<!--- uncomment the following phrase: -->
|
||||
|
||||
<!--- No fix can be suggested by a QA engineer. Further solutions shall be up to developers. -->
|
||||
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. -->
|
||||
|
||||
1.
|
||||
|
||||
## Context
|
||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
||||
|
||||
## Regression
|
||||
<!-- Is this issue a regression? (Yes / No) -->
|
||||
<!-- If Yes, optionally please include version or commit id or PR# that caused this regression, if you have these details. -->
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version used:
|
||||
* Server setup and configuration:
|
||||
* Operating System and version (`uname -a`):
|
1
.github/ISSUE_TEMPLATE/config.yml
vendored
1
.github/ISSUE_TEMPLATE/config.yml
vendored
|
@ -1 +0,0 @@
|
|||
blank_issues_enabled: false
|
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
|
@ -1,20 +0,0 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: community, triage
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Is your feature request related to a problem? Please describe.
|
||||
<!--- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
|
||||
|
||||
## Describe the solution you'd like
|
||||
<!--- A clear and concise description of what you want to happen. -->
|
||||
|
||||
## Describe alternatives you've considered
|
||||
<!--- A clear and concise description of any alternative solutions or features you've considered. -->
|
||||
|
||||
## Additional context
|
||||
<!--- Add any other context or screenshots about the feature request here. -->
|
70
.github/logo.svg
vendored
70
.github/logo.svg
vendored
|
@ -1,70 +0,0 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 25.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<svg version="1.1" id="Слой_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 184.2 51.8" style="enable-background:new 0 0 184.2 51.8;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{display:none;}
|
||||
.st1{display:inline;}
|
||||
.st2{fill:#01E397;}
|
||||
.st3{display:inline;fill:#010032;}
|
||||
.st4{display:inline;fill:#00E599;}
|
||||
.st5{display:inline;fill:#00AF92;}
|
||||
.st6{fill:#00C3E5;}
|
||||
</style>
|
||||
<g id="Layer_2">
|
||||
<g id="Layer_1-2" class="st0">
|
||||
<g class="st1">
|
||||
<path class="st2" d="M146.6,18.3v7.2h10.9V29h-10.9v10.7h-4V14.8h18v3.5H146.6z"/>
|
||||
<path class="st2" d="M180,15.7c1.7,0.9,3,2.2,4,3.8l-3,2.7c-0.6-1.3-1.5-2.4-2.6-3.3c-1.3-0.7-2.8-1-4.3-1
|
||||
c-1.4-0.1-2.8,0.3-4,1.1c-0.9,0.5-1.5,1.5-1.4,2.6c0,1,0.5,1.9,1.4,2.4c1.5,0.8,3.2,1.3,4.9,1.5c1.9,0.3,3.7,0.8,5.4,1.6
|
||||
c1.2,0.5,2.2,1.3,2.9,2.3c0.6,1,1,2.2,0.9,3.4c0,1.4-0.5,2.7-1.3,3.8c-0.9,1.2-2.1,2.1-3.5,2.6c-1.7,0.6-3.4,0.9-5.2,0.8
|
||||
c-5,0-8.6-1.6-10.7-5l2.9-2.8c0.7,1.4,1.8,2.5,3.1,3.3c1.5,0.7,3.1,1.1,4.7,1c1.5,0.1,2.9-0.2,4.2-0.9c0.9-0.5,1.5-1.5,1.5-2.6
|
||||
c0-0.9-0.5-1.8-1.3-2.2c-1.5-0.7-3.1-1.2-4.8-1.5c-1.9-0.3-3.7-0.8-5.5-1.5c-1.2-0.5-2.2-1.4-3-2.4c-0.6-1-1-2.2-0.9-3.4
|
||||
c0-1.4,0.4-2.7,1.2-3.8c0.8-1.2,2-2.2,3.3-2.8c1.6-0.7,3.4-1.1,5.2-1C176.1,14.3,178.2,14.8,180,15.7z"/>
|
||||
</g>
|
||||
<path class="st3" d="M73.3,16.3c1.9,1.9,2.9,4.5,2.7,7.1v15.9h-4V24.8c0-2.6-0.5-4.5-1.6-5.7c-1.2-1.2-2.8-1.8-4.5-1.7
|
||||
c-1.3,0-2.5,0.3-3.7,0.8c-1.2,0.7-2.2,1.7-2.9,2.9c-0.8,1.5-1.1,3.2-1.1,4.9v13.3h-4V15.1l3.6,1.5v1.7c0.8-1.5,2.1-2.6,3.6-3.3
|
||||
c1.5-0.8,3.2-1.2,4.9-1.1C68.9,13.8,71.3,14.7,73.3,16.3z"/>
|
||||
<path class="st3" d="M104.4,28.3H85.6c0.1,2.2,1,4.3,2.5,5.9c1.5,1.4,3.5,2.2,5.6,2.1c1.6,0.1,3.2-0.2,4.6-0.9
|
||||
c1.1-0.6,2-1.6,2.5-2.8l3.3,1.8c-0.9,1.7-2.3,3.1-4,4c-2,1-4.2,1.5-6.4,1.4c-3.7,0-6.7-1.1-8.8-3.4s-3.2-5.5-3.2-9.6s1-7.2,3-9.5
|
||||
s5-3.4,8.7-3.4c2.1-0.1,4.2,0.5,6.1,1.5c1.6,1,3,2.5,3.8,4.2c0.9,1.8,1.3,3.9,1.3,5.9C104.6,26.4,104.6,27.4,104.4,28.3z
|
||||
M88.1,19.3c-1.4,1.5-2.2,3.4-2.4,5.5h15.1c-0.2-2-1-3.9-2.3-5.5c-1.4-1.3-3.2-2-5.1-1.9C91.5,17.3,89.6,18,88.1,19.3z"/>
|
||||
<path class="st3" d="M131,17.3c2.2,2.3,3.2,5.5,3.2,9.5s-1,7.3-3.2,9.6s-5.1,3.4-8.8,3.4s-6.7-1.1-8.9-3.4s-3.2-5.5-3.2-9.6
|
||||
s1.1-7.2,3.2-9.5s5.1-3.4,8.9-3.4S128.9,15,131,17.3z M116.2,19.9c-1.5,2-2.2,4.4-2.1,6.9c-0.2,2.5,0.6,5,2.1,7
|
||||
c1.5,1.7,3.7,2.7,6,2.6c2.3,0.1,4.4-0.9,5.9-2.6c1.5-2,2.3-4.5,2.1-7c0.1-2.5-0.6-4.9-2.1-6.9c-1.5-1.7-3.6-2.7-5.9-2.6
|
||||
C119.9,17.2,117.7,18.2,116.2,19.9z"/>
|
||||
<polygon class="st4" points="0,9.1 0,43.7 22.5,51.8 22.5,16.9 46.8,7.9 24.8,0 "/>
|
||||
<polygon class="st5" points="24.3,17.9 24.3,36.8 46.8,44.9 46.8,9.6 "/>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st6" d="M41.6,17.5H28.2v6.9h10.4v3.3H28.2v10.2h-3.9V14.2h17.2V17.5z"/>
|
||||
<path class="st6" d="M45.8,37.9v-18h3.3l0.4,3.2c0.5-1.2,1.2-2.1,2.1-2.7c0.9-0.6,2.1-0.9,3.5-0.9c0.4,0,0.7,0,1.1,0.1
|
||||
c0.4,0.1,0.7,0.2,0.9,0.3l-0.5,3.4c-0.3-0.1-0.6-0.2-0.9-0.2C55.4,23,54.9,23,54.4,23c-0.7,0-1.5,0.2-2.2,0.6
|
||||
c-0.7,0.4-1.3,1-1.8,1.8s-0.7,1.8-0.7,3v9.5H45.8z"/>
|
||||
<path class="st6" d="M68.6,19.6c1.8,0,3.3,0.4,4.6,1.1c1.3,0.7,2.4,1.8,3.1,3.2s1.1,3.1,1.1,5c0,1.9-0.4,3.6-1.1,5
|
||||
c-0.8,1.4-1.8,2.5-3.1,3.2c-1.3,0.7-2.9,1.1-4.6,1.1s-3.3-0.4-4.6-1.1c-1.3-0.7-2.4-1.8-3.2-3.2c-0.8-1.4-1.2-3.1-1.2-5
|
||||
c0-1.9,0.4-3.6,1.2-5s1.8-2.5,3.2-3.2C65.3,19.9,66.8,19.6,68.6,19.6z M68.6,22.6c-1.1,0-2,0.2-2.8,0.7c-0.8,0.5-1.3,1.2-1.7,2.1
|
||||
s-0.6,2.1-0.6,3.5c0,1.3,0.2,2.5,0.6,3.4s1,1.7,1.7,2.2s1.7,0.7,2.8,0.7c1.1,0,2-0.2,2.7-0.7c0.7-0.5,1.3-1.2,1.7-2.2
|
||||
s0.6-2.1,0.6-3.4c0-1.4-0.2-2.5-0.6-3.5s-1-1.6-1.7-2.1C70.6,22.8,69.6,22.6,68.6,22.6z"/>
|
||||
<path class="st6" d="M89.2,38.3c-1.8,0-3.4-0.3-4.9-1c-1.5-0.7-2.7-1.7-3.5-3l2.7-2.3c0.5,1,1.3,1.8,2.3,2.4
|
||||
c1,0.6,2.2,0.9,3.6,0.9c1.1,0,2-0.2,2.6-0.6c0.6-0.4,1-0.9,1-1.6c0-0.5-0.2-0.9-0.5-1.2s-0.9-0.6-1.7-0.8l-3.8-0.8
|
||||
c-1.9-0.4-3.3-1-4.1-1.9c-0.8-0.9-1.2-1.9-1.2-3.3c0-1,0.3-1.9,0.9-2.7c0.6-0.8,1.4-1.5,2.5-2s2.5-0.8,4-0.8c1.8,0,3.3,0.3,4.6,1
|
||||
c1.3,0.6,2.2,1.5,2.9,2.7l-2.7,2.2c-0.5-1-1.1-1.7-2-2.1c-0.9-0.5-1.8-0.7-2.8-0.7c-0.8,0-1.4,0.1-2,0.3c-0.6,0.2-1,0.5-1.3,0.8
|
||||
c-0.3,0.3-0.4,0.7-0.4,1.2c0,0.5,0.2,0.9,0.5,1.3s1,0.6,1.9,0.8l4.1,0.9c1.7,0.3,2.9,0.9,3.7,1.7c0.7,0.8,1.1,1.8,1.1,2.9
|
||||
c0,1.2-0.3,2.2-0.9,3c-0.6,0.9-1.5,1.6-2.6,2C92.1,38.1,90.7,38.3,89.2,38.3z"/>
|
||||
<path class="st6" d="M112.8,19.9v3H99.3v-3H112.8z M106.6,14.6v17.9c0,0.9,0.2,1.5,0.7,1.9c0.5,0.4,1.1,0.6,1.9,0.6
|
||||
c0.6,0,1.2-0.1,1.7-0.3c0.5-0.2,0.9-0.5,1.3-0.8l0.9,2.8c-0.6,0.5-1.2,0.9-2,1.1c-0.8,0.3-1.7,0.4-2.7,0.4c-1,0-2-0.2-2.8-0.5
|
||||
s-1.5-0.9-2-1.6c-0.5-0.8-0.7-1.7-0.8-3V15.7L106.6,14.6z"/>
|
||||
<path d="M137.9,17.5h-13.3v6.9h10.4v3.3h-10.4v10.2h-3.9V14.2h17.2V17.5z"/>
|
||||
<path d="M150.9,13.8c2.1,0,4,0.4,5.5,1.2c1.6,0.8,2.9,2,4,3.5l-2.6,2.5c-0.9-1.4-1.9-2.4-3.1-3c-1.1-0.6-2.5-0.9-4-0.9
|
||||
c-1.2,0-2.1,0.2-2.8,0.5c-0.7,0.3-1.3,0.7-1.6,1.2c-0.3,0.5-0.5,1.1-0.5,1.7c0,0.7,0.3,1.4,0.8,1.9c0.5,0.6,1.5,1,2.9,1.3
|
||||
l4.8,1.1c2.3,0.5,3.9,1.3,4.9,2.3c1,1,1.4,2.3,1.4,3.9c0,1.5-0.4,2.7-1.2,3.8c-0.8,1.1-1.9,1.9-3.3,2.5s-3.1,0.9-5,0.9
|
||||
c-1.7,0-3.2-0.2-4.5-0.6c-1.3-0.4-2.5-1-3.5-1.8c-1-0.7-1.8-1.6-2.5-2.6l2.7-2.7c0.5,0.8,1.1,1.6,1.9,2.2
|
||||
c0.8,0.7,1.7,1.2,2.7,1.5c1,0.4,2.2,0.5,3.4,0.5c1.1,0,2.1-0.1,2.9-0.4c0.8-0.3,1.4-0.7,1.8-1.2c0.4-0.5,0.6-1.1,0.6-1.9
|
||||
c0-0.7-0.2-1.3-0.7-1.8c-0.5-0.5-1.3-0.9-2.6-1.2l-5.2-1.2c-1.4-0.3-2.6-0.8-3.6-1.3c-0.9-0.6-1.6-1.3-2.1-2.1s-0.7-1.8-0.7-2.8
|
||||
c0-1.3,0.4-2.6,1.1-3.7c0.7-1.1,1.8-2,3.2-2.6C147.3,14.1,148.9,13.8,150.9,13.8z"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
Before Width: | Height: | Size: 5.5 KiB |
73
.github/workflows/builds.yml
vendored
Normal file
73
.github/workflows/builds.yml
vendored
Normal file
|
@ -0,0 +1,73 @@
|
|||
name: Builds
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- 'support/*'
|
||||
types: [ opened, synchronize ]
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
|
||||
jobs:
|
||||
build_cli:
|
||||
name: Build CLI
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Get tree-service client
|
||||
run: make sync-tree
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Build CLI
|
||||
run: make
|
||||
|
||||
- name: Check version
|
||||
run: if [[ $(make version) == *"dirty"* ]]; then exit 1; fi
|
||||
|
||||
build_image:
|
||||
needs: build_cli
|
||||
name: Build Docker image
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Get tree-service client
|
||||
run: make sync-tree
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Build Docker image
|
||||
run: make image
|
67
.github/workflows/codeql-analysis.yml
vendored
Normal file
67
.github/workflows/codeql-analysis.yml
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, 'support/*' ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master, 'support/*' ]
|
||||
schedule:
|
||||
- cron: '35 8 * * 1'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'go' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||
# Learn more:
|
||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
22
.github/workflows/dco.yml
vendored
Normal file
22
.github/workflows/dco.yml
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
name: DCO check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- 'support/*'
|
||||
|
||||
jobs:
|
||||
commits_check_job:
|
||||
runs-on: ubuntu-latest
|
||||
name: Commits Check
|
||||
steps:
|
||||
- name: Get PR Commits
|
||||
id: 'get-pr-commits'
|
||||
uses: tim-actions/get-pr-commits@master
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: DCO Check
|
||||
uses: tim-actions/dco@master
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
96
.github/workflows/tests.yml
vendored
Normal file
96
.github/workflows/tests.yml
vendored
Normal file
|
@ -0,0 +1,96 @@
|
|||
name: Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- 'support/*'
|
||||
types: [opened, synchronize]
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Get tree-service client
|
||||
run: make sync-tree
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: latest
|
||||
|
||||
cover:
|
||||
name: Coverage
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
env:
|
||||
CGO_ENABLED: 1
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Get tree-service client
|
||||
run: make sync-tree
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Test and write coverage profile
|
||||
run: make cover
|
||||
|
||||
- name: Upload coverage results to Codecov
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
fail_ci_if_error: false
|
||||
path_to_write_report: ./coverage.txt
|
||||
verbose: true
|
||||
|
||||
tests:
|
||||
name: Tests
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.17', '1.18.x', '1.19.x' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Get tree-service client
|
||||
run: make sync-tree
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
11
.gitignore
vendored
11
.gitignore
vendored
|
@ -2,6 +2,9 @@
|
|||
.idea
|
||||
.vscode
|
||||
|
||||
# Tree service
|
||||
internal/neofs/services/tree/
|
||||
|
||||
# Vendoring
|
||||
vendor
|
||||
|
||||
|
@ -18,10 +21,4 @@ coverage.txt
|
|||
coverage.html
|
||||
|
||||
# debhelpers
|
||||
**/*debhelper*
|
||||
|
||||
# debian package build files
|
||||
debian/files
|
||||
debian/*.log
|
||||
debian/*.substvars
|
||||
debian/frostfs-s3-gw/
|
||||
**/.debhelper
|
||||
|
|
11
.gitlint
11
.gitlint
|
@ -1,11 +0,0 @@
|
|||
[general]
|
||||
fail-without-commits=True
|
||||
regex-style-search=True
|
||||
contrib=CC1
|
||||
|
||||
[title-match-regex]
|
||||
regex=^\[\#[0-9Xx]+\]\s
|
||||
|
||||
[ignore-by-title]
|
||||
regex=^Release(.*)
|
||||
ignore=title-match-regex
|
|
@ -4,7 +4,7 @@
|
|||
# options for analysis running
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 15m
|
||||
timeout: 5m
|
||||
|
||||
# include test files or not, default is true
|
||||
tests: true
|
||||
|
@ -32,12 +32,15 @@ linters:
|
|||
- revive
|
||||
|
||||
# some default golangci-lint linters
|
||||
- deadcode
|
||||
- errcheck
|
||||
- gosimple
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
# extra linters
|
||||
- exhaustive
|
||||
|
|
|
@ -1,45 +0,0 @@
|
|||
ci:
|
||||
autofix_prs: false
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/jorisroovers/gitlint
|
||||
rev: v0.19.1
|
||||
hooks:
|
||||
- id: gitlint
|
||||
stages: [commit-msg]
|
||||
- id: gitlint-ci
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-case-conflict
|
||||
- id: check-executables-have-shebangs
|
||||
- id: check-shebang-scripts-are-executable
|
||||
- id: check-merge-conflict
|
||||
- id: check-json
|
||||
- id: check-xml
|
||||
- id: check-yaml
|
||||
- id: trailing-whitespace
|
||||
args: [--markdown-linebreak-ext=md]
|
||||
- id: end-of-file-fixer
|
||||
exclude: ".key$"
|
||||
|
||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||
rev: v0.9.0.2
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
|
||||
- repo: https://github.com/golangci/golangci-lint
|
||||
rev: v1.51.2
|
||||
hooks:
|
||||
- id: golangci-lint
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: go-unit-tests
|
||||
name: go unit tests
|
||||
entry: make test
|
||||
pass_filenames: false
|
||||
types: [go]
|
||||
language: system
|
412
CHANGELOG.md
412
CHANGELOG.md
|
@ -4,82 +4,372 @@ This document outlines major changes between releases.
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.25.1] - 2022-10-30
|
||||
|
||||
### Fixed
|
||||
- Handle negative `Content-Length` on put (#125)
|
||||
- Use `DisableURIPathEscaping` to presign urls (#125)
|
||||
- Use specific s3 errors instead of `InternalError` where possible (#143)
|
||||
- `grpc` schemas in tree configuration (#166)
|
||||
- Return appropriate 404 code when object missed in storage but there is in gate cache (#158)
|
||||
- Replace part on re-upload when use multipart upload (#176)
|
||||
- Empty bucket policy (#740)
|
||||
- Big object removal (#749)
|
||||
- Checksum panic (#741)
|
||||
|
||||
### Added
|
||||
- Support dump metrics descriptions (#80)
|
||||
- Add `copies_numbers` section to `placement_policy` in config file and support vectors of copies numbers (#70, #101)
|
||||
- Support impersonate bearer token (#81, #105)
|
||||
- Reload default and custom copies numbers on SIGHUP (#104)
|
||||
- Tracing support (#84, #140)
|
||||
- Return bearer token in `s3-authmate obtain-secret` result (#132)
|
||||
- Support multiple version credentials using GSet (#135)
|
||||
- Implement chunk uploading (#106)
|
||||
- Add new `kludge.bypass_content_encoding_check_in_chunks` config param (#146)
|
||||
- Debian packaging (#737)
|
||||
- Timeout for individual operations in streaming RPC (#750)
|
||||
|
||||
## [0.25.0] - 2022-10-31
|
||||
|
||||
### Fixed
|
||||
- Legal hold object lock enabling (#709)
|
||||
- Errors at object locking (#719)
|
||||
- Unrestricted access to not owned objects via cache (#713)
|
||||
- Check tree service health (#699)
|
||||
- Bucket names in listing (#733)
|
||||
|
||||
### Added
|
||||
- Config reloading on SIGHUP (#702, #715, #716)
|
||||
- Stop pool dial on SIGINT (#712)
|
||||
|
||||
### Changed
|
||||
- Update prometheus to v1.15.0 (#94)
|
||||
- Update go version to go1.19 (#118)
|
||||
- Remove object from tree and reset its cache on object deletion when it is already removed from storage (#78)
|
||||
- Finish rebranding (#2)
|
||||
- Timeout errors has code 504 now (#103)
|
||||
- Use request scope logger (#111)
|
||||
- Add `s3-authmate update-secret` command (#131)
|
||||
- Use default registerer for app metrics (#155)
|
||||
- Use chi router instead of archived gorlilla/mux (#149)
|
||||
- Complete multipart upload doesn't unnecessary copy now. Thus, the total time of multipart upload was reduced by 2 times (#63)
|
||||
- GitHub actions update (#710)
|
||||
- Makefile help (#725)
|
||||
- Optimized object tags setting (#669)
|
||||
- Improved logging (#728)
|
||||
- Unified unit test names (#617)
|
||||
- Improved docs (#732)
|
||||
|
||||
### Removed
|
||||
- Drop `tree.service` param (now endpoints from `peers` section are used) (#133)
|
||||
- Unused cache methods (#650)
|
||||
|
||||
## [0.27.0] - Karpinsky - 2023-07-12
|
||||
### Updating from v0.24.0
|
||||
New config parameters were added. Make sure the default parameters are appropriate for you.
|
||||
|
||||
This is a first FrostFS S3 Gateway release named after
|
||||
[Karpinsky glacier](https://en.wikipedia.org/wiki/Karpinsky_Glacier).
|
||||
```yaml
|
||||
cache:
|
||||
accesscontrol:
|
||||
lifetime: 1m
|
||||
size: 100000
|
||||
```
|
||||
|
||||
### Fixed
|
||||
- Using multiple servers require only one healthy (#12)
|
||||
- Renew token before it expires (#20)
|
||||
- Add generated deb builder files to .gitignore, and fix typo (#28)
|
||||
- Get empty bucket CORS from frostfs (#36)
|
||||
- Don't count pool error on client abort (#35)
|
||||
- Handle request cancelling (#69)
|
||||
- Clean up List and Name caches when object is missing in Tree service (#57)
|
||||
- Don't create unnecessary delete-markers (#83)
|
||||
- `Too many pings` error (#145)
|
||||
## [0.24.0] - 2022-09-14
|
||||
|
||||
### Added
|
||||
- Billing metrics (#5, #26, #29)
|
||||
- Return container name in `head-bucket` response (#18)
|
||||
- Multiple configs support (#21)
|
||||
- Bucket name resolving policy (#25)
|
||||
- Support string `Action` and `Resource` fields in `bucketPolicy.Statement` (#32)
|
||||
- Add new `kludge.use_default_xmlns_for_complete_multipart` config param (#40)
|
||||
- Return `X-Owner-Id` in `head-bucket` response (#79)
|
||||
- Support multiple tree service endpoints (#74, #110, #114)
|
||||
- Exposure of pool metrics (#615, #680)
|
||||
- Configuration of `set_copies_number` (#634, #637)
|
||||
- Configuration of list of allowed `AccessKeyID` prefixes (#674)
|
||||
- Tagging directive for `CopyObject` (#666, #683)
|
||||
- Customer encryption (#595)
|
||||
- `CopiesNumber` configuration (#634, #637)
|
||||
|
||||
### Changed
|
||||
- Repository rebranding (#1)
|
||||
- Update neo-go to v0.101.0 (#14)
|
||||
- Update viper to v1.15.0 (#14)
|
||||
- Update go version to go1.18 (#16)
|
||||
- Return error on invalid LocationConstraint (#23)
|
||||
- Limit number of objects to delete at one time (#37)
|
||||
- CompleteMultipartUpload handler now sends whitespace characters to keep alive client's connection (#60)
|
||||
- Support new system attributes (#64)
|
||||
- Abstract network communication in TreeClient (#59, #75)
|
||||
- Changed values for `frostfs_s3_gw_state_health` metric (#91)
|
||||
- Improved wallet configuration via `.yaml` config and environment variables (#607)
|
||||
- Update go version for build to 1.19 (#694, #705)
|
||||
- Update version calculation (#653, #697)
|
||||
- Optimized lock creation (#692)
|
||||
- Update way to configure `listen_domains` (#667)
|
||||
- Use `FilePath` instead of `FileName` for object keys (#657)
|
||||
- Optimize listing (#625, #616)
|
||||
|
||||
### Removed
|
||||
- Drop any object search logic (#545)
|
||||
|
||||
### Fixed
|
||||
- Responses to `GetObject` and `HeadObject`: removed redundant `VersionID` (#577, #682)
|
||||
- Replacement of object tagging in case of overwriting of an object (#645)
|
||||
- Using tags cache with empty `versionId` (#643)
|
||||
- Fix panic on go1.19 (#678)
|
||||
- Fix panic on invalid versioning status (#660)
|
||||
- Fix panic on missing decrypt reader (#704)
|
||||
- Using multipart uploads with `/` in name (#671)
|
||||
- Don't update settings cache when request fails (#661)
|
||||
- Fix handling `X-Amz-Copy-Source` header (#672)
|
||||
- ACL related problems (#676, #606)
|
||||
- Using `ContinuationToken` for "directories" (#684)
|
||||
- Fix `connection was closed` error (#656)
|
||||
- Fix listing for nested objects (#624)
|
||||
- Fix anon requests to tree service (#504, #505)
|
||||
|
||||
### Updating from v0.23.0
|
||||
Make sure your configuration is valid:
|
||||
|
||||
If you configure application using environment variables change:
|
||||
* `S3_GW_WALLET` -> `S3_GW_WALLET_PATH`
|
||||
* `S3_GW_ADDRESS` -> `S3_GW_WALLET_ADDRESS`
|
||||
* `S3_GW_LISTEN_DOMAINS_N` -> `S3_GW_LISTEN_DOMAINS` (use it as array variable)
|
||||
|
||||
If you configure application using `.yaml` file change:
|
||||
* `wallet` -> `wallet.path`
|
||||
* `address` -> `wallet.address`
|
||||
* `listen_domains.n` -> `listen_domains` (use it as array param)
|
||||
|
||||
|
||||
## [0.23.0] - 2022-08-01
|
||||
|
||||
### Fixed
|
||||
- System metadata are filtered now (#619)
|
||||
- List objects in corner cases (#612, #627)
|
||||
- Correct removal of a deleted object (#610)
|
||||
- Bucket creation could lead to "no healthy client" error (#636)
|
||||
|
||||
### Added
|
||||
- New param to configure pool error threshold (#633)
|
||||
|
||||
### Changed
|
||||
- Pprof and prometheus metrics configuration (#591)
|
||||
- Don't set sticky bit in authmate container (#540)
|
||||
- Updated compatibility table (#638)
|
||||
- Rely on string sanitizing from zap (#498)
|
||||
|
||||
### Updating from v0.22.0
|
||||
1. To enable pprof use `pprof.enabled` instead of `pprof` in config.
|
||||
To enable prometheus metrics use `prometheus.enabled` instead of `metrics` in config.
|
||||
If you are using the command line flags you can skip this step.
|
||||
|
||||
## [0.22.0] - 2022-07-25
|
||||
|
||||
Tree service support
|
||||
|
||||
### Fixed
|
||||
- Error logging (#450)
|
||||
- Default bucket location constraint (#463)
|
||||
- Suspended versioning status (#462)
|
||||
- CodeQL warnings (#489, #522, #539)
|
||||
- Bearer token behaviour with non-owned buckets (#459)
|
||||
- ACL issues (#495, #553, #571, #573, #574, #580)
|
||||
- Authmate policy parsing (#558)
|
||||
|
||||
### Added
|
||||
- Public key output in authmate issue-secret command (#482)
|
||||
- Support of conditional headers (#484)
|
||||
- Cache type cast error logging (#465)
|
||||
- `docker/*` target in Makefile (#471)
|
||||
- Pre signed requests (#529)
|
||||
- Tagging and ACL notifications (#361)
|
||||
- AWSv4 signer package to improve compatibility with S3 clients (#528)
|
||||
- Extension mimetype detector (#289)
|
||||
- Default params documentation (#592)
|
||||
- Health metric (#600)
|
||||
- Parallel object listing (#525)
|
||||
- Tree service (see commit links from #609)
|
||||
|
||||
### Changed
|
||||
- Reduce number of network requests (#439, #441)
|
||||
- Renamed authmate to s3-authmate (#518)
|
||||
- Version output (#578)
|
||||
- Improved error messages (#539)
|
||||
|
||||
### Removed
|
||||
- `layer/neofs` package (#438)
|
||||
|
||||
## [0.21.1] - 2022-05-16
|
||||
|
||||
### Changed
|
||||
- Update go version to go1.17 (#427)
|
||||
- Set homomorphic hashing disable attribute in container if required (#435)
|
||||
|
||||
## [0.21.0] - 2022-05-13
|
||||
|
||||
### Added
|
||||
- Support of get-object-attributes (#430)
|
||||
|
||||
### Fixed
|
||||
- Reduced time of bucket creation (#426)
|
||||
- Bucket removal (#428)
|
||||
- Obtainment of ETag value (#431)
|
||||
|
||||
### Changed
|
||||
- Authmate doesn't parse session context anymore, now it accepts application defined
|
||||
flexible structure with container ID in human-readable format (#428)
|
||||
|
||||
## [0.20.0] - 2022-04-29
|
||||
|
||||
### Added
|
||||
- Support of object locking (#195)
|
||||
- Support of basic notifications (#357, #358, #359)
|
||||
|
||||
### Changed
|
||||
- Logger behavior: now it writes to stderr instead of stdout, app name and
|
||||
version are always presented and fixed, all user options except of `level` are
|
||||
dropped (#380)
|
||||
- Improved docs, added config examples (#396, #398)
|
||||
- Updated NeoFS SDK (#365, #409)
|
||||
|
||||
### Fixed
|
||||
- Added check of `SetEACL` tokens before processing of requests (#347)
|
||||
- Authmate: returned lost session tokens when a parameter `--session-token` is
|
||||
omitted (#387)
|
||||
- Error when a bucket hasn't a settings file (#389)
|
||||
- Response to a request to delete not existing object (#392)
|
||||
- Replaced gate key in ACL Grantee by key of bearer token issuer (#395)
|
||||
- Missing attach of bearer token to requests to put system object (#399)
|
||||
- Deletion of system object while CompleteMultipartUpload (#400)
|
||||
- Improved English in docs and comments (#405)
|
||||
- Authmate: reconsidered default bearer token rules (#406)
|
||||
|
||||
## [0.19.0] - 2022-03-16
|
||||
|
||||
### Added
|
||||
- Authmate: support placement policy overriding (#343, #364)
|
||||
- Managing bucket notification configuration (#340)
|
||||
- Unit tests in go1.17 (#265)
|
||||
- NATS settings in application config (#341)
|
||||
- Support `Expires` and `Cache-Control` headers (#312)
|
||||
- Support `%` as delimiter (#313)
|
||||
- Support `null` version deletion (#319)
|
||||
- Bucket name resolving order (#285)
|
||||
- Authmate: added `timeout` flag (#290)
|
||||
- MinIO results in s3 compatibility tables (#304)
|
||||
- Support overriding response headers (#310)
|
||||
|
||||
### Changed
|
||||
- Authmate: check parameters before container creation (#372)
|
||||
- Unify cache invalidation on deletion (#368)
|
||||
- Updated NeoFS SDK to v1.0.0-rc.3 (#297, #333, #346, #376)
|
||||
- Authmate: changed session token rules handling (#329, #336, #338, #352)
|
||||
- Changed status code for some failed requests (#308)
|
||||
- GetBucketLocation returns policy name used at bucket creation (#301)
|
||||
|
||||
### Fixed
|
||||
- Waiting for bucket to be deleted (#366)
|
||||
- Authmate: changed error message for session context building (#348)
|
||||
- Authmate: fixed access key parsing in `obtain-secret` command (#295)
|
||||
- Distinguishing `BucketAlreadyExists` errors (#354)
|
||||
- Incorrect panic if handler not found (#305)
|
||||
- Authmate: use container friendly name as system name (#299, #324)
|
||||
- Use UTC `Last-Modified` timestamps (#331)
|
||||
- Don't return object system metadata (#307)
|
||||
- Handling empty post policy (#306)
|
||||
- Use `X-Amz-Verion-Id` in `CompleteMulipartUpload` (#318)
|
||||
|
||||
### Removed
|
||||
- Drop MinIO related errors (#316)
|
||||
|
||||
## [0.18.0] - 2021-12-16
|
||||
|
||||
### Added
|
||||
- Support for MultipartUpload (#186, #187)
|
||||
- CORS support (#217)
|
||||
- Authmate supports setting of tokens lifetime in a more convenient format (duration) (#258)
|
||||
- Generation of a random key for `--no-sign-request` (#276)
|
||||
|
||||
### Changed
|
||||
- Bucket name resolving mechanism from listing owner's containers to using DNS (#219)
|
||||
|
||||
### Removed
|
||||
- Deprecated golint, replaced by revive (#272)
|
||||
|
||||
## 0.17.0 (24 Sep 2021)
|
||||
With this release we introduce [ceph-based](https://github.com/ceph/s3-tests) S3 compatibility results.
|
||||
|
||||
### Added
|
||||
* Versioning support (#122, #242, #263)
|
||||
* Ceph S3 compatibility results (#150, #249, #266)
|
||||
* Handling `X-Amz-Expected-Bucket-Owner` header (#216)
|
||||
* `X-Container-Id` header for `HeadBucket` response (#220)
|
||||
* Basic ACL support (#49, #213)
|
||||
* Caching (#179, #206, #231, #236, #253)
|
||||
* Metadata directive when copying (#191)
|
||||
* Bucket name checking (189)
|
||||
* Continuation token support (#112, #154, #180)
|
||||
* Mapping `LocationConstraint` to `PlacementPolicy` (#89)
|
||||
* Tagging support (#196)
|
||||
* POST uploading support (#190)
|
||||
* Delete marker support (#248)
|
||||
* Expiration for access box (#255)
|
||||
* AWS CLI credential generating by authmate (#241)
|
||||
|
||||
### Changed
|
||||
* Default placement policy is now configurable (#218)
|
||||
* README is split into different files (#210)
|
||||
* Unified error handling (#89, #149, #184)
|
||||
* Authmate issue-secret response contains container id (#163)
|
||||
* Removed "github.com/nspcc-dev/neofs-node" dependency (#234)
|
||||
* Removed GitHub workflow of image publishing (#243)
|
||||
* Changed license to AGPLv3 (#264)
|
||||
|
||||
### Fixed
|
||||
* ListObjects results are now the same for different users (#230)
|
||||
* Error response for invalid authentication header is now correct (#199)
|
||||
* Saving object metadata (#198)
|
||||
* Range header handling (#194)
|
||||
* Correct status codes (#118, #262)
|
||||
* HeadObject for "directories" (#160)
|
||||
* Fetch-owner parameter support (#159)
|
||||
|
||||
## 0.16.0 (16 Jul 2021)
|
||||
|
||||
With this release we publish S3 gateway source code. It includes various S3
|
||||
compatibility improvements, support of bucket management, unified secp256r1
|
||||
cryptography with NEP-6 wallet support.
|
||||
|
||||
### Fixed
|
||||
* Allowed no-sign request (#65)
|
||||
* Bearer token attached to all requests (#84)
|
||||
* Time format in responses (#133)
|
||||
* Max-keys checked in ListObjects (#135)
|
||||
* Lost metadat in the objects (#131)
|
||||
* Unique bucket name check (#125)
|
||||
|
||||
### Added
|
||||
* Bucket management operations (#47, #72)
|
||||
* Node-specific owner IDs in bearer tokens (#83)
|
||||
* AWS CLI usage section in README (#77)
|
||||
* List object paging (#97)
|
||||
* Lifetime for the tokens in auth-mate (#108)
|
||||
* Support of range in GetObject request (#96)
|
||||
* Support of NEP-6 wallets instead of binary encoded keys (#92)
|
||||
* Support of JSON encoded rules in auth-mate (#71)
|
||||
* Support of delimiters in ListObjects (#98)
|
||||
* Support of object ETag (#93)
|
||||
* Support of time-based conditional CopyObject and GetObject (#94)
|
||||
|
||||
### Changed
|
||||
* Accesskey format: now `0` used as a delimiter between container ID and object
|
||||
ID instead of `_` (#164)
|
||||
* Accessbox is encoded in protobuf format (#48)
|
||||
* Authentication uses secp256r1 instead of ed25519 (#75)
|
||||
* Improved integration with NeoFS SDK and NeoFS API Go (#78, #88)
|
||||
* Optimized object put execution (#155)
|
||||
|
||||
### Removed
|
||||
* GRPC keepalive options (#73)
|
||||
|
||||
## 0.15.0 (10 Jun 2021)
|
||||
|
||||
This release brings S3 gateway to the current state of NeoFS and fixes some
|
||||
bugs, no new significant features introduced (other than moving here already
|
||||
existing authmate component).
|
||||
|
||||
New features:
|
||||
* authmate was moved into this repository and is now built along with the
|
||||
gateway itself (#46)
|
||||
|
||||
Behavior changes:
|
||||
* neofs-s3-gate was renamed to neofs-s3-gw (#50)
|
||||
|
||||
Improvements:
|
||||
* better Makefile (#43, #45, #55)
|
||||
* stricter linters (#45)
|
||||
* removed non-standard errors package from dependencies (#54)
|
||||
* refactoring, reusing new sdk-go component (#60, #62, #63)
|
||||
* updated neofs-api-go for compatibility with current NeoFS node 0.21.0 (#60, #68)
|
||||
* extended README (#67, #76)
|
||||
|
||||
Bugs fixed:
|
||||
* wrong (as per AWS specification) access key ID generated (#64)
|
||||
|
||||
## Older versions
|
||||
|
||||
This project is a fork of [NeoFS S3 Gateway](https://github.com/nspcc-dev/neofs-s3-gw) from version v0.26.0.
|
||||
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-s3-gw/blob/master/CHANGELOG.md.
|
||||
Please refer to [Github
|
||||
releases](https://github.com/nspcc-dev/neofs-s3-gw/releases/) for older
|
||||
releases.
|
||||
|
||||
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/b2148cc3...v0.27.0
|
||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.27.0...master
|
||||
[0.18.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.17.0...v0.18.0
|
||||
[0.19.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.18.0...v0.19.0
|
||||
[0.20.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.19.0...v0.20.0
|
||||
[0.21.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.20.0...v0.21.0
|
||||
[0.21.1]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.21.0...v0.21.1
|
||||
[0.22.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.21.1...v0.22.0
|
||||
[0.23.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.22.0...v0.23.0
|
||||
[0.24.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.23.0...v0.24.0
|
||||
[0.25.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.24.0...v0.25.0
|
||||
[0.25.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.25.0...v0.25.1
|
||||
[Unreleased]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.25.1...master
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
First, thank you for contributing! We love and encourage pull requests from
|
||||
everyone. Please follow the guidelines:
|
||||
|
||||
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/issues) and
|
||||
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pulls) for existing
|
||||
- Check the open [issues](https://github.com/nspcc-dev/neofs-s3-gw/issues) and
|
||||
[pull requests](https://github.com/nspcc-dev/neofs-s3-gw/pulls) for existing
|
||||
discussions.
|
||||
|
||||
- Open an issue first, to discuss a new feature or enhancement.
|
||||
|
@ -23,24 +23,24 @@ everyone. Please follow the guidelines:
|
|||
|
||||
## Development Workflow
|
||||
|
||||
Start by forking the `frostfs-s3-gw` repository, make changes in a branch and then
|
||||
Start by forking the `neofs-s3-gw` repository, make changes in a branch and then
|
||||
send a pull request. We encourage pull requests to discuss code changes. Here
|
||||
are the steps in details:
|
||||
|
||||
### Set up your git repository
|
||||
Fork [FrostFS S3 Gateway
|
||||
upstream](https://git.frostfs.info/repo/fork/15) source repository
|
||||
### Set up your GitHub Repository
|
||||
Fork [NeoFS S3 Gateway
|
||||
upstream](https://github.com/nspcc-dev/neofs-s3-gw/fork) source repository
|
||||
to your own personal repository. Copy the URL of your fork (you will need it for
|
||||
the `git clone` command below).
|
||||
|
||||
```sh
|
||||
$ git clone https://git.frostfs.info/<username>/frostfs-s3-gw.git
|
||||
$ git clone https://github.com/nspcc-dev/neofs-s3-gw
|
||||
```
|
||||
|
||||
### Set up git remote as ``upstream``
|
||||
```sh
|
||||
$ cd frostfs-s3-gw
|
||||
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw.git
|
||||
$ cd neofs-s3-gw
|
||||
$ git remote add upstream https://github.com/nspcc-dev/neofs-s3-gw
|
||||
$ git fetch upstream
|
||||
$ git merge upstream/master
|
||||
...
|
||||
|
@ -90,8 +90,8 @@ $ git push origin feature/123-something_awesome
|
|||
```
|
||||
|
||||
### Create a Pull Request
|
||||
Pull requests can be created via Forgejo. Refer to [this
|
||||
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
|
||||
Pull requests can be created via GitHub. Refer to [this
|
||||
document](https://help.github.com/articles/creating-a-pull-request/) for
|
||||
detailed steps on how to create a pull request. After a Pull Request gets peer
|
||||
reviewed and approved, it will be merged.
|
||||
|
||||
|
@ -107,7 +107,7 @@ contributors".
|
|||
To sign your work, just add a line like this at the end of your commit message:
|
||||
|
||||
```
|
||||
Signed-off-by: Samii Sakisaka <samii@frostfs.info>
|
||||
Signed-off-by: Samii Sakisaka <samii@nspcc.ru>
|
||||
```
|
||||
|
||||
This can be easily done with the `--signoff` option to `git commit`.
|
||||
|
|
|
@ -16,4 +16,4 @@ In chronological order:
|
|||
- Elizaveta Chichindaeva
|
||||
- Stanislav Bogatyrev
|
||||
- Anastasia Prasolova
|
||||
- Leonard Liubich
|
||||
- Leonard Liubich
|
39
Makefile
Executable file → Normal file
39
Makefile
Executable file → Normal file
|
@ -7,40 +7,42 @@ GO_VERSION ?= 1.19
|
|||
LINT_VERSION ?= 1.49.0
|
||||
BINDIR = bin
|
||||
|
||||
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
||||
|
||||
# Binaries to build
|
||||
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
|
||||
CMDS = $(addprefix neofs-, $(notdir $(wildcard cmd/*)))
|
||||
BINS = $(addprefix $(BINDIR)/, $(CMDS))
|
||||
|
||||
# Variables for docker
|
||||
REPO_BASENAME = $(shell basename `go list -m`)
|
||||
HUB_IMAGE ?= "truecloudlab/$(REPO_BASENAME)"
|
||||
HUB_IMAGE ?= "nspccdev/$(REPO_BASENAME)"
|
||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||
|
||||
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean protoc
|
||||
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint version clean protoc
|
||||
|
||||
# .deb package versioning
|
||||
OS_RELEASE = $(shell lsb_release -cs)
|
||||
PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
|
||||
sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \
|
||||
sed "s/-/~/")-${OS_RELEASE}
|
||||
.PHONY: debpackage debclean
|
||||
.PHONY: debpackage debclean
|
||||
|
||||
# Make all binaries
|
||||
all: $(BINS)
|
||||
|
||||
$(BINS): $(BINDIR) dep
|
||||
$(BINS): sync-tree $(BINDIR) dep
|
||||
@echo "⇒ Build $@"
|
||||
CGO_ENABLED=0 \
|
||||
go build -v -trimpath \
|
||||
-ldflags "-X $(REPO)/internal/version.Version=$(VERSION)" \
|
||||
-o $@ ./cmd/$(subst frostfs-,,$(notdir $@))
|
||||
-o $@ ./cmd/$(subst neofs-,,$(notdir $@))
|
||||
|
||||
$(BINDIR):
|
||||
@echo "⇒ Ensure dir: $@"
|
||||
@mkdir -p $@
|
||||
|
||||
# Synchronize tree service
|
||||
sync-tree:
|
||||
@./syncTree.sh
|
||||
|
||||
# Pull go dependencies
|
||||
dep:
|
||||
@printf "⇒ Download requirements: "
|
||||
|
@ -78,7 +80,7 @@ format:
|
|||
|
||||
# Build clean Docker image
|
||||
image:
|
||||
@echo "⇒ Build FrostFS S3 Gateway docker image "
|
||||
@echo "⇒ Build NeoFS S3 Gateway docker image "
|
||||
@docker build \
|
||||
--build-arg REPO=$(REPO) \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
|
@ -93,7 +95,7 @@ image-push:
|
|||
|
||||
# Build dirty Docker image
|
||||
dirty-image:
|
||||
@echo "⇒ Build FrostFS S3 Gateway dirty docker image "
|
||||
@echo "⇒ Build NeoFS S3 Gateway dirty docker image "
|
||||
@docker build \
|
||||
--build-arg REPO=$(REPO) \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
|
@ -113,14 +115,6 @@ docker/lint:
|
|||
--env HOME=/src \
|
||||
golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint'
|
||||
|
||||
# Activate pre-commit hooks
|
||||
pre-commit:
|
||||
pre-commit install -t pre-commit -t commit-msg
|
||||
|
||||
# Deactivate pre-commit hooks
|
||||
unpre-commit:
|
||||
pre-commit uninstall -t pre-commit -t commit-msg
|
||||
|
||||
# Show current version
|
||||
version:
|
||||
@echo $(VERSION)
|
||||
|
@ -141,7 +135,7 @@ protoc:
|
|||
|
||||
# Package for Debian
|
||||
debpackage:
|
||||
dch --package frostfs-s3-gw \
|
||||
dch --package neofs-s3-gw \
|
||||
--controlmaint \
|
||||
--newversion $(PKG_VERSION) \
|
||||
--distribution $(OS_RELEASE) \
|
||||
|
@ -149,11 +143,6 @@ debpackage:
|
|||
dpkg-buildpackage --no-sign -b
|
||||
|
||||
debclean:
|
||||
dh clean
|
||||
|
||||
# Dump metrics (use METRICS_DUMP_OUT variable to override default out file './metrics-dump.json')
|
||||
.PHONY: dump-metrics
|
||||
dump-metrics:
|
||||
@go test ./metrics -run TestDescribeAll --tags=dump_metrics --out=$(abspath $(METRICS_DUMP_OUT))
|
||||
dh clean
|
||||
|
||||
include help.mk
|
||||
|
|
46
README.md
46
README.md
|
@ -1,25 +1,13 @@
|
|||
<p align="center">
|
||||
<img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
|
||||
</p>
|
||||
# NeoFS S3 Gateway
|
||||
|
||||
---
|
||||
[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-s3-gw)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-s3-gw)
|
||||
![Release](https://img.shields.io/badge/dynamic/json.svg?label=release&url=https://git.frostfs.info/api/v1/repos/TrueCloudLab/frostfs-s3-gw/releases&query=$[0].tag_name&color=orange)
|
||||
![License](https://img.shields.io/badge/license-GPL--3.0-orange.svg)
|
||||
|
||||
# FrostFS S3 Gateway
|
||||
|
||||
FrostFS S3 gateway provides API compatible with Amazon S3 cloud storage service.
|
||||
NeoFS S3 gateway provides API compatible with Amazon S3 cloud storage service.
|
||||
|
||||
## Installation
|
||||
|
||||
```go get -u git.frostfs.info/TrueCloudLab/frostfs-s3-gw```
|
||||
```go get -u github.com/nspcc-dev/neofs-s3-gw```
|
||||
|
||||
Or you can call `make` to build it from the cloned repository (the binary will
|
||||
end up in `bin/frostfs-s3-gw` with authmate helper in `bin/frostfs-s3-authmate`).
|
||||
end up in `bin/neofs-s3-gw` with authmate helper in `bin/neofs-s3-authmate`).
|
||||
To build binaries in clean docker environment, call `make docker/all`.
|
||||
|
||||
Other notable make targets:
|
||||
|
@ -34,45 +22,45 @@ version Show current version
|
|||
```
|
||||
|
||||
Or you can also use a [Docker
|
||||
image](https://hub.docker.com/r/truecloudlab/frostfs-s3-gw) provided for released
|
||||
image](https://hub.docker.com/r/nspccdev/neofs-s3-gw) provided for released
|
||||
(and occasionally unreleased) versions of gateway (`:latest` points to the
|
||||
latest stable release).
|
||||
|
||||
## Execution
|
||||
|
||||
Minimalistic S3 gateway setup needs:
|
||||
* FrostFS node(s) address (S3 gateway itself is not a FrostFS node)
|
||||
* NeoFS node(s) address (S3 gateway itself is not a NeoFS node)
|
||||
Passed via `-p` parameter or via `S3_GW_PEERS_<N>_ADDRESS` and
|
||||
`S3_GW_PEERS_<N>_WEIGHT` environment variables (gateway supports multiple
|
||||
FrostFS nodes with weighted load balancing).
|
||||
* a wallet used to fetch key and communicate with FrostFS nodes
|
||||
NeoFS nodes with weighted load balancing).
|
||||
* a wallet used to fetch key and communicate with NeoFS nodes
|
||||
Passed via `--wallet` parameter or `S3_GW_WALLET_PATH` environment variable.
|
||||
|
||||
These two commands are functionally equivalent, they run the gate with one
|
||||
backend node, some keys and otherwise default settings:
|
||||
```
|
||||
$ frostfs-s3-gw -p 192.168.130.72:8080 --wallet wallet.json
|
||||
$ neofs-s3-gw -p 192.168.130.72:8080 --wallet wallet.json
|
||||
|
||||
$ S3_GW_PEERS_0_ADDRESS=192.168.130.72:8080 \
|
||||
S3_GW_WALLET=wallet.json \
|
||||
frostfs-s3-gw
|
||||
neofs-s3-gw
|
||||
```
|
||||
It's also possible to specify uri scheme (grpc or grpcs) when using `-p` or environment variables:
|
||||
```
|
||||
$ frostfs-s3-gw -p grpc://192.168.130.72:8080 --wallet wallet.json
|
||||
$ neofs-s3-gw -p grpc://192.168.130.72:8080 --wallet wallet.json
|
||||
|
||||
$ S3_GW_PEERS_0_ADDRESS=grpcs://192.168.130.72:8080 \
|
||||
S3_GW_WALLET=wallet.json \
|
||||
frostfs-s3-gw
|
||||
neofs-s3-gw
|
||||
```
|
||||
|
||||
## Domains
|
||||
|
||||
By default, s3-gw enable only `path-style access`.
|
||||
By default, s3-gw enable only `path-style access`.
|
||||
To be able to use both: `virtual-hosted-style` and `path-style` access you must configure `listen_domains`:
|
||||
|
||||
```shell
|
||||
$ frostfs-s3-gw -p 192.168.130.72:8080 --wallet wallet.json --listen_domains your.first.domain --listen_domains your.second.domain
|
||||
$ neofs-s3-gw -p 192.168.130.72:8080 --wallet wallet.json --listen_domains your.first.domain --listen_domains your.second.domain
|
||||
```
|
||||
|
||||
So now you can use (e.g. `HeadBucket`. Make sure DNS is properly configured):
|
||||
|
@ -96,12 +84,12 @@ Also, you can configure domains using `.env` variables or `yaml` file.
|
|||
## Documentation
|
||||
|
||||
- [Configuration](./docs/configuration.md)
|
||||
- [FrostFS S3 AuthMate](./docs/authmate.md)
|
||||
- [FrostFS Tree service](./docs/tree_service.md)
|
||||
- [NeoFS S3 AuthMate](./docs/authmate.md)
|
||||
- [NeoFS Tree service](./docs/tree_service.md)
|
||||
- [AWS CLI basic usage](./docs/aws_cli.md)
|
||||
- [AWS S3 API compatibility](./docs/aws_s3_compat.md)
|
||||
- [AWS S3 Compatibility test results](./docs/s3_test_results.md)
|
||||
|
||||
## Credits
|
||||
## Credits
|
||||
|
||||
Please see [CREDITS](CREDITS.md) for details.
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v0.27.0
|
||||
v0.25.1
|
||||
|
|
|
@ -14,14 +14,14 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
v4 "github.com/nspcc-dev/neofs-s3-gw/api/auth/signer/v4"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/cache"
|
||||
apiErrors "github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/creds/tokens"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
// authorizationFieldRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
|
||||
|
@ -33,14 +33,7 @@ var postPolicyCredentialRegexp = regexp.MustCompile(`(?P<access_key_id>[^/]+)/(?
|
|||
type (
|
||||
// Center is a user authentication interface.
|
||||
Center interface {
|
||||
Authenticate(request *http.Request) (*Box, error)
|
||||
}
|
||||
|
||||
// Box contains access box and additional info.
|
||||
Box struct {
|
||||
AccessBox *accessbox.Box
|
||||
ClientTime time.Time
|
||||
AuthHeaders *AuthHeader
|
||||
Authenticate(request *http.Request) (*accessbox.Box, error)
|
||||
}
|
||||
|
||||
center struct {
|
||||
|
@ -52,8 +45,7 @@ type (
|
|||
|
||||
prs int
|
||||
|
||||
//nolint:revive
|
||||
AuthHeader struct {
|
||||
authHeader struct {
|
||||
AccessKeyID string
|
||||
Service string
|
||||
Region string
|
||||
|
@ -94,16 +86,16 @@ func (p prs) Seek(_ int64, _ int) (int64, error) {
|
|||
var _ io.ReadSeeker = prs(0)
|
||||
|
||||
// New creates an instance of AuthCenter.
|
||||
func New(frostFS tokens.FrostFS, key *keys.PrivateKey, prefixes []string, config *cache.Config) Center {
|
||||
func New(neoFS tokens.NeoFS, key *keys.PrivateKey, prefixes []string, config *cache.Config) Center {
|
||||
return ¢er{
|
||||
cli: tokens.New(frostFS, key, config),
|
||||
cli: tokens.New(neoFS, key, config),
|
||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||
allowedAccessKeyIDPrefixes: prefixes,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *center) parseAuthHeader(header string) (*AuthHeader, error) {
|
||||
func (c *center) parseAuthHeader(header string) (*authHeader, error) {
|
||||
submatches := c.reg.GetSubmatches(header)
|
||||
if len(submatches) != authHeaderPartsNum {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed)
|
||||
|
@ -116,7 +108,7 @@ func (c *center) parseAuthHeader(header string) (*AuthHeader, error) {
|
|||
|
||||
signedFields := strings.Split(submatches["signed_header_fields"], ";")
|
||||
|
||||
return &AuthHeader{
|
||||
return &authHeader{
|
||||
AccessKeyID: submatches["access_key_id"],
|
||||
Service: submatches["service"],
|
||||
Region: submatches["region"],
|
||||
|
@ -126,7 +118,7 @@ func (c *center) parseAuthHeader(header string) (*AuthHeader, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (a *AuthHeader) getAddress() (oid.Address, error) {
|
||||
func (a *authHeader) getAddress() (oid.Address, error) {
|
||||
var addr oid.Address
|
||||
if err := addr.DecodeString(strings.ReplaceAll(a.AccessKeyID, "0", "/")); err != nil {
|
||||
return addr, apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID)
|
||||
|
@ -134,12 +126,11 @@ func (a *AuthHeader) getAddress() (oid.Address, error) {
|
|||
return addr, nil
|
||||
}
|
||||
|
||||
func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
||||
func (c *center) Authenticate(r *http.Request) (*accessbox.Box, error) {
|
||||
var (
|
||||
err error
|
||||
authHdr *AuthHeader
|
||||
authHdr *authHeader
|
||||
signatureDateTimeStr string
|
||||
needClientTime bool
|
||||
)
|
||||
|
||||
queryValues := r.URL.Query()
|
||||
|
@ -148,7 +139,7 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
|||
if len(creds) != 5 || creds[4] != "aws4_request" {
|
||||
return nil, fmt.Errorf("bad X-Amz-Credential")
|
||||
}
|
||||
authHdr = &AuthHeader{
|
||||
authHdr = &authHeader{
|
||||
AccessKeyID: creds[0],
|
||||
Service: creds[3],
|
||||
Region: creds[2],
|
||||
|
@ -175,7 +166,6 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
|||
return nil, err
|
||||
}
|
||||
signatureDateTimeStr = r.Header.Get(AmzDate)
|
||||
needClientTime = true
|
||||
}
|
||||
|
||||
signatureDateTime, err := time.Parse("20060102T150405Z", signatureDateTimeStr)
|
||||
|
@ -202,15 +192,7 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
result := &Box{
|
||||
AccessBox: box,
|
||||
AuthHeaders: authHdr,
|
||||
}
|
||||
if needClientTime {
|
||||
result.ClientTime = signatureDateTime
|
||||
}
|
||||
|
||||
return result, nil
|
||||
return box, nil
|
||||
}
|
||||
|
||||
func (c center) checkAccessKeyID(accessKeyID string) error {
|
||||
|
@ -227,7 +209,7 @@ func (c center) checkAccessKeyID(accessKeyID string) error {
|
|||
return apiErrors.GetAPIError(apiErrors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
func (c *center) checkFormData(r *http.Request) (*Box, error) {
|
||||
func (c *center) checkFormData(r *http.Request) (*accessbox.Box, error) {
|
||||
if err := r.ParseMultipartForm(maxFormSizeMemory); err != nil {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidArgument)
|
||||
}
|
||||
|
@ -269,10 +251,10 @@ func (c *center) checkFormData(r *http.Request) (*Box, error) {
|
|||
return nil, apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
|
||||
return &Box{AccessBox: box}, nil
|
||||
return box, nil
|
||||
}
|
||||
|
||||
func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
||||
func cloneRequest(r *http.Request, authHeader *authHeader) *http.Request {
|
||||
otherRequest := r.Clone(context.TODO())
|
||||
otherRequest.Header = make(http.Header)
|
||||
|
||||
|
@ -293,10 +275,9 @@ func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
|||
return otherRequest
|
||||
}
|
||||
|
||||
func (c *center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
|
||||
func (c *center) checkSign(authHeader *authHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
|
||||
awsCreds := credentials.NewStaticCredentials(authHeader.AccessKeyID, box.Gate.AccessKey, "")
|
||||
signer := v4.NewSigner(awsCreds)
|
||||
signer.DisableURIPathEscaping = true
|
||||
|
||||
var signature string
|
||||
if authHeader.IsPresigned {
|
||||
|
@ -312,6 +293,7 @@ func (c *center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *
|
|||
}
|
||||
signature = request.URL.Query().Get(AmzSignature)
|
||||
} else {
|
||||
signer.DisableURIPathEscaping = true
|
||||
if _, err := signer.Sign(request, nil, authHeader.Service, authHeader.Region, signatureDateTime); err != nil {
|
||||
return fmt.Errorf("failed to sign temporary HTTP request: %w", err)
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -19,12 +19,12 @@ func TestAuthHeaderParse(t *testing.T) {
|
|||
for _, tc := range []struct {
|
||||
header string
|
||||
err error
|
||||
expected *AuthHeader
|
||||
expected *authHeader
|
||||
}{
|
||||
{
|
||||
header: defaultHeader,
|
||||
err: nil,
|
||||
expected: &AuthHeader{
|
||||
expected: &authHeader{
|
||||
AccessKeyID: "oid0cid",
|
||||
Service: "s3",
|
||||
Region: "us-east-1",
|
||||
|
@ -54,29 +54,29 @@ func TestAuthHeaderGetAddress(t *testing.T) {
|
|||
defaulErr := errors.GetAPIError(errors.ErrInvalidAccessKeyID)
|
||||
|
||||
for _, tc := range []struct {
|
||||
authHeader *AuthHeader
|
||||
authHeader *authHeader
|
||||
err error
|
||||
}{
|
||||
{
|
||||
authHeader: &AuthHeader{
|
||||
authHeader: &authHeader{
|
||||
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJM0HrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
authHeader: &AuthHeader{
|
||||
authHeader: &authHeader{
|
||||
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJMHrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
|
||||
},
|
||||
err: defaulErr,
|
||||
},
|
||||
{
|
||||
authHeader: &AuthHeader{
|
||||
authHeader: &authHeader{
|
||||
AccessKeyID: "oid0cid",
|
||||
},
|
||||
err: defaulErr,
|
||||
},
|
||||
{
|
||||
authHeader: &AuthHeader{
|
||||
authHeader: &authHeader{
|
||||
AccessKeyID: "oidcid",
|
||||
},
|
||||
err: defaulErr,
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
type RequestData struct {
|
||||
Method string
|
||||
Endpoint string
|
||||
Bucket string
|
||||
Object string
|
||||
}
|
||||
|
||||
type PresignData struct {
|
||||
Service string
|
||||
Region string
|
||||
Lifetime time.Duration
|
||||
SignTime time.Time
|
||||
}
|
||||
|
||||
// PresignRequest forms pre-signed request to access objects without aws credentials.
|
||||
func PresignRequest(creds *credentials.Credentials, reqData RequestData, presignData PresignData) (*http.Request, error) {
|
||||
urlStr := fmt.Sprintf("%s/%s/%s", reqData.Endpoint, rest.EscapePath(reqData.Bucket, false), rest.EscapePath(reqData.Object, false))
|
||||
req, err := http.NewRequest(strings.ToUpper(reqData.Method), urlStr, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z"))
|
||||
|
||||
signer := v4.NewSigner(creds)
|
||||
signer.DisableURIPathEscaping = true
|
||||
|
||||
if _, err = signer.Presign(req, nil, presignData.Service, presignData.Region, presignData.Lifetime, presignData.SignTime); err != nil {
|
||||
return nil, fmt.Errorf("presign: %w", err)
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
|
@ -1,95 +0,0 @@
|
|||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var _ tokens.Credentials = (*credentialsMock)(nil)
|
||||
|
||||
type credentialsMock struct {
|
||||
boxes map[string]*accessbox.Box
|
||||
}
|
||||
|
||||
func newTokensFrostfsMock() *credentialsMock {
|
||||
return &credentialsMock{
|
||||
boxes: make(map[string]*accessbox.Box),
|
||||
}
|
||||
}
|
||||
|
||||
func (m credentialsMock) addBox(addr oid.Address, box *accessbox.Box) {
|
||||
m.boxes[addr.String()] = box
|
||||
}
|
||||
|
||||
func (m credentialsMock) GetBox(_ context.Context, addr oid.Address) (*accessbox.Box, error) {
|
||||
box, ok := m.boxes[addr.String()]
|
||||
if !ok {
|
||||
return nil, apistatus.ObjectNotFound{}
|
||||
}
|
||||
|
||||
return box, nil
|
||||
}
|
||||
|
||||
func (m credentialsMock) Put(context.Context, cid.ID, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error) {
|
||||
return oid.Address{}, nil
|
||||
}
|
||||
|
||||
func (m credentialsMock) Update(context.Context, oid.Address, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error) {
|
||||
return oid.Address{}, nil
|
||||
}
|
||||
|
||||
func TestCheckSign(t *testing.T) {
|
||||
var accessKeyAddr oid.Address
|
||||
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
|
||||
require.NoError(t, err)
|
||||
|
||||
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
||||
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
|
||||
awsCreds := credentials.NewStaticCredentials(accessKeyID, secretKey, "")
|
||||
|
||||
reqData := RequestData{
|
||||
Method: "GET",
|
||||
Endpoint: "http://localhost:8084",
|
||||
Bucket: "my-bucket",
|
||||
Object: "@obj/name",
|
||||
}
|
||||
presignData := PresignData{
|
||||
Service: "s3",
|
||||
Region: "spb",
|
||||
Lifetime: 10 * time.Minute,
|
||||
SignTime: time.Now().UTC(),
|
||||
}
|
||||
|
||||
req, err := PresignRequest(awsCreds, reqData, presignData)
|
||||
require.NoError(t, err)
|
||||
|
||||
expBox := &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
AccessKey: secretKey,
|
||||
},
|
||||
}
|
||||
|
||||
mock := newTokensFrostfsMock()
|
||||
mock.addBox(accessKeyAddr, expBox)
|
||||
|
||||
c := ¢er{
|
||||
cli: mock,
|
||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||
}
|
||||
box, err := c.Authenticate(req)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, expBox, box.AccessBox)
|
||||
}
|
|
@ -790,8 +790,6 @@ const doubleSpace = " "
|
|||
|
||||
// stripExcessSpaces will rewrite the passed in slice's string values to not
|
||||
// contain multiple side-by-side spaces.
|
||||
//
|
||||
//nolint:revive
|
||||
func stripExcessSpaces(vals []string) {
|
||||
var j, k, l, m, spaces int
|
||||
for i, str := range vals {
|
||||
|
|
2
api/cache/access_control.go
vendored
2
api/cache/access_control.go
vendored
|
@ -4,8 +4,8 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/bluele/gcache"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
|
4
api/cache/accessbox.go
vendored
4
api/cache/accessbox.go
vendored
|
@ -4,9 +4,9 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/bluele/gcache"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
|
2
api/cache/buckets.go
vendored
2
api/cache/buckets.go
vendored
|
@ -4,8 +4,8 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/bluele/gcache"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
|
8
api/cache/cache_test.go
vendored
8
api/cache/cache_test.go
vendored
|
@ -3,10 +3,10 @@ package cache
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
||||
cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test"
|
||||
oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest/observer"
|
||||
|
|
2
api/cache/names.go
vendored
2
api/cache/names.go
vendored
|
@ -4,8 +4,8 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/bluele/gcache"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
|
4
api/cache/objects.go
vendored
4
api/cache/objects.go
vendored
|
@ -4,9 +4,9 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/bluele/gcache"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
|
6
api/cache/objects_test.go
vendored
6
api/cache/objects_test.go
vendored
|
@ -4,9 +4,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
objecttest "github.com/nspcc-dev/neofs-sdk-go/object/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
|
6
api/cache/objectslist.go
vendored
6
api/cache/objectslist.go
vendored
|
@ -6,9 +6,9 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/bluele/gcache"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -20,7 +20,7 @@ import (
|
|||
After putting a record, it lives for a while (default value is 60 seconds).
|
||||
|
||||
When we receive a request from a user, we try to find the suitable and non-expired cache entry, go through the list
|
||||
and get ObjectInfos from common object cache or with a request to FrostFS.
|
||||
and get ObjectInfos from common object cache or with a request to NeoFS.
|
||||
|
||||
When we put an object into a container, we invalidate entries with prefixes that are prefixes of the object's name.
|
||||
*/
|
||||
|
|
6
api/cache/objectslist_test.go
vendored
6
api/cache/objectslist_test.go
vendored
|
@ -4,9 +4,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test"
|
||||
oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
|
2
api/cache/system.go
vendored
2
api/cache/system.go
vendored
|
@ -4,8 +4,8 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/bluele/gcache"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
|
|
@ -4,9 +4,9 @@ import (
|
|||
"encoding/xml"
|
||||
"time"
|
||||
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -22,8 +22,7 @@ const (
|
|||
type (
|
||||
// BucketInfo stores basic bucket data.
|
||||
BucketInfo struct {
|
||||
Name string // container name from system attribute
|
||||
Zone string // container zone from system attribute
|
||||
Name string
|
||||
CID cid.ID
|
||||
Owner user.ID
|
||||
Created time.Time
|
||||
|
@ -38,22 +37,21 @@ type (
|
|||
IsDir bool
|
||||
IsDeleteMarker bool
|
||||
|
||||
Bucket string
|
||||
Name string
|
||||
Size uint64
|
||||
ContentType string
|
||||
Created time.Time
|
||||
CreationEpoch uint64
|
||||
HashSum string
|
||||
Owner user.ID
|
||||
Headers map[string]string
|
||||
Bucket string
|
||||
Name string
|
||||
Size int64
|
||||
ContentType string
|
||||
Created time.Time
|
||||
HashSum string
|
||||
Owner user.ID
|
||||
Headers map[string]string
|
||||
}
|
||||
|
||||
// NotificationInfo store info to send s3 notification.
|
||||
NotificationInfo struct {
|
||||
Name string
|
||||
Version string
|
||||
Size uint64
|
||||
Size int64
|
||||
HashSum string
|
||||
}
|
||||
|
||||
|
|
|
@ -4,9 +4,9 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -18,7 +18,6 @@ type NodeVersion struct {
|
|||
BaseNodeVersion
|
||||
DeleteMarker *DeleteMarkerInfo
|
||||
IsUnversioned bool
|
||||
IsCombined bool
|
||||
}
|
||||
|
||||
func (v NodeVersion) IsDeleteMarker() bool {
|
||||
|
@ -26,7 +25,7 @@ func (v NodeVersion) IsDeleteMarker() bool {
|
|||
}
|
||||
|
||||
// DeleteMarkerInfo is used to save object info if node in the tree service is delete marker.
|
||||
// We need this information because the "delete marker" object is no longer stored in FrostFS.
|
||||
// We need this information because the "delete marker" object is no longer stored in NeoFS.
|
||||
type DeleteMarkerInfo struct {
|
||||
Created time.Time
|
||||
Owner user.ID
|
||||
|
@ -54,7 +53,7 @@ type BaseNodeVersion struct {
|
|||
ParenID uint64
|
||||
OID oid.ID
|
||||
Timestamp uint64
|
||||
Size uint64
|
||||
Size int64
|
||||
ETag string
|
||||
FilePath string
|
||||
}
|
||||
|
@ -69,29 +68,29 @@ type ObjectTaggingInfo struct {
|
|||
type MultipartInfo struct {
|
||||
// ID is node id in tree service.
|
||||
// It's ignored when creating a new multipart upload.
|
||||
ID uint64
|
||||
Key string
|
||||
UploadID string
|
||||
Owner user.ID
|
||||
Created time.Time
|
||||
Meta map[string]string
|
||||
CopiesNumbers []uint32
|
||||
ID uint64
|
||||
Key string
|
||||
UploadID string
|
||||
Owner user.ID
|
||||
Created time.Time
|
||||
Meta map[string]string
|
||||
CopiesNumber uint32
|
||||
}
|
||||
|
||||
// PartInfo is upload information about part.
|
||||
type PartInfo struct {
|
||||
Key string `json:"key"`
|
||||
UploadID string `json:"uploadId"`
|
||||
Number int `json:"number"`
|
||||
OID oid.ID `json:"oid"`
|
||||
Size uint64 `json:"size"`
|
||||
ETag string `json:"etag"`
|
||||
Created time.Time `json:"created"`
|
||||
Key string
|
||||
UploadID string
|
||||
Number int
|
||||
OID oid.ID
|
||||
Size int64
|
||||
ETag string
|
||||
Created time.Time
|
||||
}
|
||||
|
||||
// ToHeaderString form short part representation to use in S3-Completed-Parts header.
|
||||
func (p *PartInfo) ToHeaderString() string {
|
||||
return strconv.Itoa(p.Number) + "-" + strconv.FormatUint(p.Size, 10) + "-" + p.ETag
|
||||
return strconv.Itoa(p.Number) + "-" + strconv.FormatInt(p.Size, 10) + "-" + p.ETag
|
||||
}
|
||||
|
||||
// LockInfo is lock information to create appropriate tree node.
|
||||
|
|
|
@ -3,8 +3,6 @@ package errors
|
|||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
frosterrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
)
|
||||
|
||||
type (
|
||||
|
@ -92,7 +90,6 @@ const (
|
|||
ErrMissingFields
|
||||
ErrMissingCredTag
|
||||
ErrCredMalformed
|
||||
ErrInvalidLocationConstraint
|
||||
ErrInvalidRegion
|
||||
ErrInvalidServiceS3
|
||||
ErrInvalidServiceSTS
|
||||
|
@ -177,10 +174,8 @@ const (
|
|||
// Add new extended error codes here.
|
||||
ErrInvalidObjectName
|
||||
ErrOperationTimedOut
|
||||
ErrGatewayTimeout
|
||||
ErrOperationMaxedOut
|
||||
ErrInvalidRequest
|
||||
ErrInvalidRequestLargeCopy
|
||||
ErrInvalidStorageClass
|
||||
|
||||
ErrMalformedJSON
|
||||
|
@ -685,12 +680,6 @@ var errorCodes = errorCodeMap{
|
|||
Description: "Error parsing the X-Amz-Credential parameter; the region is wrong;",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidLocationConstraint: {
|
||||
ErrCode: ErrInvalidLocationConstraint,
|
||||
Code: "InvalidLocationConstraint",
|
||||
Description: "The specified location (Region) constraint is not valid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRegion: {
|
||||
ErrCode: ErrInvalidRegion,
|
||||
Code: "InvalidRegion",
|
||||
|
@ -997,7 +986,7 @@ var errorCodes = errorCodeMap{
|
|||
ErrNotSupported: {
|
||||
ErrCode: ErrNotSupported,
|
||||
Code: "BadRequest",
|
||||
Description: "Not supported by FrostFS S3 Gateway",
|
||||
Description: "Not supported by NeoFS S3 Gateway",
|
||||
HTTPStatusCode: http.StatusNotImplemented,
|
||||
},
|
||||
ErrInvalidEncryptionMethod: {
|
||||
|
@ -1128,12 +1117,6 @@ var errorCodes = errorCodeMap{
|
|||
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrGatewayTimeout: {
|
||||
ErrCode: ErrGatewayTimeout,
|
||||
Code: "GatewayTimeout",
|
||||
Description: "The server is acting as a gateway and cannot get a response in time",
|
||||
HTTPStatusCode: http.StatusGatewayTimeout,
|
||||
},
|
||||
ErrOperationMaxedOut: {
|
||||
ErrCode: ErrOperationMaxedOut,
|
||||
Code: "SlowDown",
|
||||
|
@ -1162,12 +1145,6 @@ var errorCodes = errorCodeMap{
|
|||
Description: "Invalid Request",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRequestLargeCopy: {
|
||||
ErrCode: ErrInvalidRequestLargeCopy,
|
||||
Code: "InvalidRequest",
|
||||
Description: "CopyObject request made on objects larger than 5GB in size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIncorrectContinuationToken: {
|
||||
ErrCode: ErrIncorrectContinuationToken,
|
||||
Code: "InvalidArgument",
|
||||
|
@ -1709,7 +1686,6 @@ var errorCodes = errorCodeMap{
|
|||
|
||||
// IsS3Error checks if the provided error is a specific s3 error.
|
||||
func IsS3Error(err error, code ErrorCode) bool {
|
||||
err = frosterrors.UnwrapErr(err)
|
||||
e, ok := err.(Error)
|
||||
return ok && e.ErrCode == code
|
||||
}
|
||||
|
|
|
@ -14,17 +14,16 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
v2acl "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
v2acl "github.com/nspcc-dev/neofs-api-go/v2/acl"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/eacl"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -159,93 +158,8 @@ func (s ServiceRecord) ToEACLRecord() *eacl.Record {
|
|||
return serviceRecord
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidStatement = stderrors.New("invalid statement")
|
||||
errInvalidPrincipal = stderrors.New("invalid principal")
|
||||
)
|
||||
|
||||
func (s *statement) UnmarshalJSON(data []byte) error {
|
||||
var statementMap map[string]interface{}
|
||||
if err := json.Unmarshal(data, &statementMap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sidField, ok := statementMap["Sid"]
|
||||
if ok {
|
||||
if s.Sid, ok = sidField.(string); !ok {
|
||||
return errInvalidStatement
|
||||
}
|
||||
}
|
||||
|
||||
effectField, ok := statementMap["Effect"]
|
||||
if ok {
|
||||
if s.Effect, ok = effectField.(string); !ok {
|
||||
return errInvalidStatement
|
||||
}
|
||||
}
|
||||
|
||||
principalField, ok := statementMap["Principal"]
|
||||
if ok {
|
||||
principalMap, ok := principalField.(map[string]interface{})
|
||||
if !ok {
|
||||
return errInvalidPrincipal
|
||||
}
|
||||
|
||||
awsField, ok := principalMap["AWS"]
|
||||
if ok {
|
||||
if s.Principal.AWS, ok = awsField.(string); !ok {
|
||||
return fmt.Errorf("%w: 'AWS' field must be string", errInvalidPrincipal)
|
||||
}
|
||||
}
|
||||
|
||||
canonicalUserField, ok := principalMap["CanonicalUser"]
|
||||
if ok {
|
||||
if s.Principal.CanonicalUser, ok = canonicalUserField.(string); !ok {
|
||||
return errInvalidPrincipal
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
actionField, ok := statementMap["Action"]
|
||||
if ok {
|
||||
switch actionField := actionField.(type) {
|
||||
case []interface{}:
|
||||
s.Action = make([]string, len(actionField))
|
||||
for i, action := range actionField {
|
||||
if s.Action[i], ok = action.(string); !ok {
|
||||
return errInvalidStatement
|
||||
}
|
||||
}
|
||||
case string:
|
||||
s.Action = []string{actionField}
|
||||
default:
|
||||
return errInvalidStatement
|
||||
}
|
||||
}
|
||||
|
||||
resourceField, ok := statementMap["Resource"]
|
||||
if ok {
|
||||
switch resourceField := resourceField.(type) {
|
||||
case []interface{}:
|
||||
s.Resource = make([]string, len(resourceField))
|
||||
for i, action := range resourceField {
|
||||
if s.Resource[i], ok = action.(string); !ok {
|
||||
return errInvalidStatement
|
||||
}
|
||||
}
|
||||
case string:
|
||||
s.Resource = []string{resourceField}
|
||||
default:
|
||||
return errInvalidStatement
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -253,13 +167,13 @@ func (h *handler) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bucketACL, err := h.obj.GetBucketACL(ctx, bktInfo)
|
||||
bucketACL, err := h.obj.GetBucketACL(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, h.encodeBucketACL(ctx, bktInfo.Name, bucketACL)); err != nil {
|
||||
if err = api.EncodeToResponse(w, h.encodeBucketACL(bktInfo.Name, bucketACL)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -283,7 +197,7 @@ func (h *handler) bearerTokenIssuerKey(ctx context.Context) (*keys.PublicKey, er
|
|||
}
|
||||
|
||||
func (h *handler) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get bearer token issuer key", reqInfo, err)
|
||||
|
@ -367,8 +281,7 @@ func (h *handler) updateBucketACL(r *http.Request, astChild *ast, bktInfo *data.
|
|||
}
|
||||
|
||||
func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -376,7 +289,7 @@ func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bucketACL, err := h.obj.GetBucketACL(ctx, bktInfo)
|
||||
bucketACL, err := h.obj.GetBucketACL(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
||||
return
|
||||
|
@ -388,28 +301,27 @@ func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||
}
|
||||
|
||||
objInfo, err := h.obj.GetObjectInfo(ctx, prm)
|
||||
objInfo, err := h.obj.GetObjectInfo(r.Context(), prm)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not object info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, h.encodeObjectACL(ctx, bucketACL, reqInfo.BucketName, objInfo.VersionID())); err != nil {
|
||||
if err = api.EncodeToResponse(w, h.encodeObjectACL(bucketACL, reqInfo.BucketName, objInfo.VersionID())); err != nil {
|
||||
h.logAndSendError(w, "failed to encode response", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
versionID := reqInfo.URL.Query().Get(api.QueryVersionID)
|
||||
key, err := h.bearerTokenIssuerKey(ctx)
|
||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get gate key", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
token, err := getSessionTokenSetEACL(ctx)
|
||||
token, err := getSessionTokenSetEACL(r.Context())
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get eacl token", reqInfo, err)
|
||||
return
|
||||
|
@ -427,7 +339,7 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
VersionID: versionID,
|
||||
}
|
||||
|
||||
objInfo, err := h.obj.GetObjectInfo(ctx, p)
|
||||
objInfo, err := h.obj.GetObjectInfo(r.Context(), p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get object info", reqInfo, err)
|
||||
return
|
||||
|
@ -469,15 +381,15 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -508,14 +420,14 @@ func checkOwner(info *data.BucketInfo, owner string) error {
|
|||
|
||||
// may need to convert owner to appropriate format
|
||||
if info.Owner.String() != owner {
|
||||
return fmt.Errorf("%w: mismatch owner", errors.GetAPIError(errors.ErrAccessDenied))
|
||||
return errors.GetAPIError(errors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *handler) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -1414,7 +1326,7 @@ func isWriteOperation(op eacl.Operation) bool {
|
|||
return op == eacl.OperationDelete || op == eacl.OperationPut
|
||||
}
|
||||
|
||||
func (h *handler) encodeObjectACL(ctx context.Context, bucketACL *layer.BucketACL, bucketName, objectVersion string) *AccessControlPolicy {
|
||||
func (h *handler) encodeObjectACL(bucketACL *layer.BucketACL, bucketName, objectVersion string) *AccessControlPolicy {
|
||||
res := &AccessControlPolicy{
|
||||
Owner: Owner{
|
||||
ID: bucketACL.Info.Owner.String(),
|
||||
|
@ -1460,7 +1372,7 @@ func (h *handler) encodeObjectACL(ctx context.Context, bucketACL *layer.BucketAC
|
|||
if read {
|
||||
permission = aclFullControl
|
||||
} else {
|
||||
h.reqLogger(ctx).Warn("some acl not fully mapped")
|
||||
h.log.Warn("some acl not fully mapped")
|
||||
}
|
||||
|
||||
var grantee *Grantee
|
||||
|
@ -1482,8 +1394,8 @@ func (h *handler) encodeObjectACL(ctx context.Context, bucketACL *layer.BucketAC
|
|||
return res
|
||||
}
|
||||
|
||||
func (h *handler) encodeBucketACL(ctx context.Context, bucketName string, bucketACL *layer.BucketACL) *AccessControlPolicy {
|
||||
return h.encodeObjectACL(ctx, bucketACL, bucketName, "")
|
||||
func (h *handler) encodeBucketACL(bucketName string, bucketACL *layer.BucketACL) *AccessControlPolicy {
|
||||
return h.encodeObjectACL(bucketACL, bucketName, "")
|
||||
}
|
||||
|
||||
func contains(list []eacl.Operation, op eacl.Operation) bool {
|
||||
|
|
|
@ -11,20 +11,17 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/bearer"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/eacl"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -1355,85 +1352,6 @@ func TestBucketPolicy(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBucketPolicyUnmarshal(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
policy string
|
||||
}{
|
||||
{
|
||||
name: "action/resource array",
|
||||
policy: `
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Principal": {
|
||||
"AWS": "arn:aws:iam::111122223333:role/JohnDoe"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:GetObject",
|
||||
"s3:GetObjectVersion"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::DOC-EXAMPLE-BUCKET/*",
|
||||
"arn:aws:s3:::DOC-EXAMPLE-BUCKET2/*"
|
||||
]
|
||||
}]
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "action/resource string",
|
||||
policy: `
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Principal": {
|
||||
"AWS": "arn:aws:iam::111122223333:role/JohnDoe"
|
||||
},
|
||||
"Effect": "Deny",
|
||||
"Action": "s3:GetObject",
|
||||
"Resource": "arn:aws:s3:::DOC-EXAMPLE-BUCKET/*"
|
||||
}]
|
||||
}
|
||||
`,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
bktPolicy := &bucketPolicy{}
|
||||
err := json.Unmarshal([]byte(tc.policy), bktPolicy)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutBucketPolicy(t *testing.T) {
|
||||
bktPolicy := `
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Principal": {
|
||||
"AWS": "*"
|
||||
},
|
||||
"Effect": "Deny",
|
||||
"Action": "s3:GetObject",
|
||||
"Resource": "arn:aws:s3:::bucket-for-policy/*"
|
||||
}]
|
||||
}
|
||||
`
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName := "bucket-for-policy"
|
||||
|
||||
box, _ := createAccessBox(t)
|
||||
createBucket(t, hc, bktName, box)
|
||||
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, "", bytes.NewReader([]byte(bktPolicy)))
|
||||
ctx := context.WithValue(r.Context(), middleware.BoxData, box)
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().PutBucketPolicyHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func getBucketPolicy(hc *handlerContext, bktName string) *bucketPolicy {
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
hc.Handler().GetBucketPolicyHandler(w, r)
|
||||
|
@ -1450,7 +1368,7 @@ func putBucketPolicy(hc *handlerContext, bktName string, bktPolicy *bucketPolicy
|
|||
require.NoError(hc.t, err)
|
||||
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, "", bytes.NewReader(body))
|
||||
ctx := context.WithValue(r.Context(), middleware.BoxData, box)
|
||||
ctx := context.WithValue(r.Context(), api.BoxData, box)
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().PutBucketPolicyHandler(w, r)
|
||||
assertStatus(hc.t, w, status)
|
||||
|
@ -1483,14 +1401,9 @@ func createAccessBox(t *testing.T) (*accessbox.Box, *keys.PrivateKey) {
|
|||
|
||||
tok := new(session.Container)
|
||||
tok.ForVerb(session.VerbContainerSetEACL)
|
||||
err = tok.Sign(key.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
tok2 := new(session.Container)
|
||||
tok2.ForVerb(session.VerbContainerPut)
|
||||
err = tok2.Sign(key.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
box := &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
SessionTokens: []*session.Container{tok, tok2},
|
||||
|
@ -1501,34 +1414,24 @@ func createAccessBox(t *testing.T) (*accessbox.Box, *keys.PrivateKey) {
|
|||
return box, key
|
||||
}
|
||||
|
||||
func createBucket(t *testing.T, hc *handlerContext, bktName string, box *accessbox.Box) *data.BucketInfo {
|
||||
w := createBucketBase(hc, bktName, box)
|
||||
func createBucket(t *testing.T, tc *handlerContext, bktName string, box *accessbox.Box) *data.BucketInfo {
|
||||
w, r := prepareTestRequest(tc, bktName, "", nil)
|
||||
ctx := context.WithValue(r.Context(), api.BoxData, box)
|
||||
r = r.WithContext(ctx)
|
||||
tc.Handler().CreateBucketHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
bktInfo, err := hc.Layer().GetBucketInfo(hc.Context(), bktName)
|
||||
bktInfo, err := tc.Layer().GetBucketInfo(tc.Context(), bktName)
|
||||
require.NoError(t, err)
|
||||
return bktInfo
|
||||
}
|
||||
|
||||
func createBucketAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, code s3errors.ErrorCode) {
|
||||
w := createBucketBase(hc, bktName, box)
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(code))
|
||||
}
|
||||
|
||||
func createBucketBase(hc *handlerContext, bktName string, box *accessbox.Box) *httptest.ResponseRecorder {
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
ctx := context.WithValue(r.Context(), middleware.BoxData, box)
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().CreateBucketHandler(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
func putBucketACL(t *testing.T, tc *handlerContext, bktName string, box *accessbox.Box, header map[string]string) {
|
||||
w, r := prepareTestRequest(tc, bktName, "", nil)
|
||||
for key, val := range header {
|
||||
r.Header.Set(key, val)
|
||||
}
|
||||
ctx := context.WithValue(r.Context(), middleware.BoxData, box)
|
||||
ctx := context.WithValue(r.Context(), api.BoxData, box)
|
||||
r = r.WithContext(ctx)
|
||||
tc.Handler().PutBucketACLHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
|
|
@ -1,17 +1,11 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -25,39 +19,21 @@ type (
|
|||
|
||||
Notificator interface {
|
||||
SendNotifications(topics map[string]string, p *SendNotificationParams) error
|
||||
SendTestNotification(topic, bucketName, requestID, HostID string, now time.Time) error
|
||||
SendTestNotification(topic, bucketName, requestID, HostID string) error
|
||||
}
|
||||
|
||||
// Config contains data which handler needs to keep.
|
||||
Config struct {
|
||||
Policy PlacementPolicy
|
||||
XMLDecoder XMLDecoderProvider
|
||||
DefaultMaxAge int
|
||||
NotificatorEnabled bool
|
||||
ResolveZoneList []string
|
||||
IsResolveListAllow bool // True if ResolveZoneList contains allowed zones
|
||||
CompleteMultipartKeepalive time.Duration
|
||||
Kludge KludgeSettings
|
||||
}
|
||||
|
||||
PlacementPolicy interface {
|
||||
DefaultPlacementPolicy() netmap.PlacementPolicy
|
||||
PlacementPolicy(string) (netmap.PlacementPolicy, bool)
|
||||
CopiesNumbers(string) ([]uint32, bool)
|
||||
DefaultCopiesNumbers() []uint32
|
||||
}
|
||||
|
||||
XMLDecoderProvider interface {
|
||||
NewCompleteMultipartDecoder(io.Reader) *xml.Decoder
|
||||
}
|
||||
|
||||
KludgeSettings interface {
|
||||
BypassContentEncodingInChunks() bool
|
||||
DefaultPolicy netmap.PlacementPolicy
|
||||
DefaultMaxAge int
|
||||
NotificatorEnabled bool
|
||||
TLSEnabled bool
|
||||
CopiesNumber uint32
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultPolicy is a default policy of placing containers in FrostFS if it's not set at the request.
|
||||
// DefaultPolicy is a default policy of placing containers in NeoFS if it's not set at the request.
|
||||
DefaultPolicy = "REP 3"
|
||||
// DefaultCopiesNumber is a default number of object copies that is enough to consider put successful if it's not set in config.
|
||||
DefaultCopiesNumber uint32 = 0
|
||||
|
@ -69,7 +45,7 @@ var _ api.Handler = (*handler)(nil)
|
|||
func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg *Config) (api.Handler, error) {
|
||||
switch {
|
||||
case obj == nil:
|
||||
return nil, errors.New("empty FrostFS Object Layer")
|
||||
return nil, errors.New("empty NeoFS Object Layer")
|
||||
case log == nil:
|
||||
return nil, errors.New("empty logger")
|
||||
}
|
||||
|
@ -87,44 +63,3 @@ func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg *Config
|
|||
notificator: notificator,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// pickCopiesNumbers chooses the return values following this logic:
|
||||
// 1) array of copies numbers sent in request's header has the highest priority.
|
||||
// 2) array of copies numbers with corresponding location constraint provided in the config file.
|
||||
// 3) default copies number from the config file wrapped into array.
|
||||
func (h *handler) pickCopiesNumbers(metadata map[string]string, locationConstraint string) ([]uint32, error) {
|
||||
copiesNumbersStr, ok := metadata[layer.AttributeFrostfsCopiesNumber]
|
||||
if ok {
|
||||
result, err := parseCopiesNumbers(copiesNumbersStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
copiesNumbers, ok := h.cfg.Policy.CopiesNumbers(locationConstraint)
|
||||
if ok {
|
||||
return copiesNumbers, nil
|
||||
}
|
||||
|
||||
return h.cfg.Policy.DefaultCopiesNumbers(), nil
|
||||
}
|
||||
|
||||
func parseCopiesNumbers(copiesNumbersStr string) ([]uint32, error) {
|
||||
var result []uint32
|
||||
copiesNumbersSplit := strings.Split(copiesNumbersStr, ",")
|
||||
|
||||
for i := range copiesNumbersSplit {
|
||||
item := strings.ReplaceAll(copiesNumbersSplit[i], " ", "")
|
||||
if len(item) == 0 {
|
||||
continue
|
||||
}
|
||||
copiesNumber, err := strconv.ParseUint(item, 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pasrse copies number: %w", err)
|
||||
}
|
||||
result = append(result, uint32(copiesNumber))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
@ -1,71 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCopiesNumberPicker(t *testing.T) {
|
||||
var locationConstraints = map[string][]uint32{}
|
||||
locationConstraint1 := "one"
|
||||
locationConstraint2 := "two"
|
||||
locationConstraints[locationConstraint1] = []uint32{2, 3, 4}
|
||||
|
||||
config := &Config{
|
||||
Policy: &placementPolicyMock{
|
||||
copiesNumbers: locationConstraints,
|
||||
defaultCopiesNumbers: []uint32{1},
|
||||
},
|
||||
}
|
||||
h := handler{
|
||||
cfg: config,
|
||||
}
|
||||
|
||||
metadata := map[string]string{}
|
||||
|
||||
t.Run("pick default copies number", func(t *testing.T) {
|
||||
metadata["somekey1"] = "5, 6, 7"
|
||||
expectedCopiesNumbers := []uint32{1}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
||||
t.Run("pick copies number vector according to location constraint", func(t *testing.T) {
|
||||
metadata["somekey2"] = "6, 7, 8"
|
||||
expectedCopiesNumbers := []uint32{2, 3, 4}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
||||
t.Run("pick copies number from metadata", func(t *testing.T) {
|
||||
metadata["frostfs-copies-number"] = "7, 8, 9"
|
||||
expectedCopiesNumbers := []uint32{7, 8, 9}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
||||
t.Run("pick copies number from metadata with no space", func(t *testing.T) {
|
||||
metadata["frostfs-copies-number"] = "7,8,9"
|
||||
expectedCopiesNumbers := []uint32{7, 8, 9}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
||||
t.Run("pick copies number from metadata with trailing comma", func(t *testing.T) {
|
||||
metadata["frostfs-copies-number"] = "11, 12, 13, "
|
||||
expectedCopiesNumbers := []uint32{11, 12, 13}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
}
|
|
@ -6,11 +6,10 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -18,7 +17,7 @@ type (
|
|||
GetObjectAttributesResponse struct {
|
||||
ETag string `xml:"ETag,omitempty"`
|
||||
Checksum *Checksum `xml:"Checksum,omitempty"`
|
||||
ObjectSize uint64 `xml:"ObjectSize,omitempty"`
|
||||
ObjectSize int64 `xml:"ObjectSize,omitempty"`
|
||||
StorageClass string `xml:"StorageClass,omitempty"`
|
||||
ObjectParts *ObjectParts `xml:"ObjectParts,omitempty"`
|
||||
}
|
||||
|
@ -68,7 +67,7 @@ var validAttributes = map[string]struct{}{
|
|||
}
|
||||
|
||||
func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
params, err := parseGetObjectAttributeArgs(r)
|
||||
if err != nil {
|
||||
|
@ -95,7 +94,7 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
|
|||
}
|
||||
info := extendedInfo.ObjectInfo
|
||||
|
||||
encryptionParams, err := formEncryptionParams(r)
|
||||
encryptionParams, err := h.formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
|
@ -124,7 +123,7 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
|
|||
}
|
||||
|
||||
writeAttributesHeaders(w.Header(), extendedInfo, bktSettings.Unversioned())
|
||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||
if err = api.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
|
|
@ -6,13 +6,12 @@ import (
|
|||
"regexp"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/auth"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -47,8 +46,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
tagSet map[string]string
|
||||
sessionTokenEACL *session.Container
|
||||
|
||||
ctx = r.Context()
|
||||
reqInfo = middleware.GetReqInfo(ctx)
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
|
||||
containsACL = containsACLHeaders(r)
|
||||
)
|
||||
|
@ -86,45 +84,26 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(ctx, dstBktInfo)
|
||||
settings, err := h.obj.GetBucketSettings(r.Context(), dstBktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if containsACL {
|
||||
if sessionTokenEACL, err = getSessionTokenSetEACL(ctx); err != nil {
|
||||
if sessionTokenEACL, err = getSessionTokenSetEACL(r.Context()); err != nil {
|
||||
h.logAndSendError(w, "could not get eacl session token from a box", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm)
|
||||
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), srcObjPrm)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not find object", reqInfo, err)
|
||||
return
|
||||
}
|
||||
srcObjInfo := extendedSrcObjInfo.ObjectInfo
|
||||
|
||||
encryptionParams, err := formEncryptionParams(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if srcSize, err := getObjectSize(extendedSrcObjInfo, encryptionParams); err != nil {
|
||||
h.logAndSendError(w, "failed to get source object size", reqInfo, err)
|
||||
return
|
||||
} else if srcSize > layer.UploadMaxSize { //https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
|
||||
h.logAndSendError(w, "too bid object to copy with single copy operation, use multipart upload copy instead", reqInfo, errors.GetAPIError(errors.ErrInvalidRequestLargeCopy))
|
||||
return
|
||||
}
|
||||
|
||||
args, err := parseCopyObjectArgs(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not parse request params", reqInfo, err)
|
||||
|
@ -156,13 +135,24 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
NodeVersion: extendedSrcObjInfo.NodeVersion,
|
||||
}
|
||||
|
||||
_, tagSet, err = h.obj.GetObjectTagging(ctx, tagPrm)
|
||||
_, tagSet, err = h.obj.GetObjectTagging(r.Context(), tagPrm)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get object tagging", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
encryptionParams, err := h.formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if err = checkPreconditions(srcObjInfo, args.Conditional); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed))
|
||||
return
|
||||
|
@ -172,44 +162,43 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
if len(srcObjInfo.ContentType) > 0 {
|
||||
srcObjInfo.Headers[api.ContentType] = srcObjInfo.ContentType
|
||||
}
|
||||
metadata = makeCopyMap(srcObjInfo.Headers)
|
||||
delete(metadata, layer.MultipartObjectSize) // object payload will be real one rather than list of compound parts
|
||||
metadata = srcObjInfo.Headers
|
||||
} else if contentType := r.Header.Get(api.ContentType); len(contentType) > 0 {
|
||||
metadata[api.ContentType] = contentType
|
||||
}
|
||||
|
||||
params := &layer.CopyObjectParams{
|
||||
SrcVersioned: srcObjPrm.Versioned(),
|
||||
SrcObject: srcObjInfo,
|
||||
ScrBktInfo: srcObjPrm.BktInfo,
|
||||
DstBktInfo: dstBktInfo,
|
||||
DstObject: reqInfo.ObjectName,
|
||||
SrcSize: srcObjInfo.Size,
|
||||
Header: metadata,
|
||||
Encryption: encryptionParams,
|
||||
}
|
||||
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, dstBktInfo.LocationConstraint)
|
||||
copiesNumber, err := getCopiesNumberOrDefault(metadata, h.cfg.CopiesNumber)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
params.Lock, err = formObjectLock(ctx, dstBktInfo, settings.LockConfiguration, r.Header)
|
||||
params := &layer.CopyObjectParams{
|
||||
SrcObject: srcObjInfo,
|
||||
ScrBktInfo: srcObjPrm.BktInfo,
|
||||
DstBktInfo: dstBktInfo,
|
||||
DstObject: reqInfo.ObjectName,
|
||||
SrcSize: srcObjInfo.Size,
|
||||
Header: metadata,
|
||||
Encryption: encryptionParams,
|
||||
CopiesNuber: copiesNumber,
|
||||
}
|
||||
|
||||
params.Lock, err = formObjectLock(dstBktInfo, settings.LockConfiguration, r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not form object lock", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
additional := []zap.Field{zap.String("src_bucket_name", srcBucket), zap.String("src_object_name", srcObject)}
|
||||
extendedDstObjInfo, err := h.obj.CopyObject(ctx, params)
|
||||
extendedDstObjInfo, err := h.obj.CopyObject(r.Context(), params)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't copy object", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
dstObjInfo := extendedDstObjInfo.ObjectInfo
|
||||
|
||||
if err = middleware.EncodeToResponse(w, &CopyObjectResponse{LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339), ETag: dstObjInfo.HashSum}); err != nil {
|
||||
if err = api.EncodeToResponse(w, &CopyObjectResponse{LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339), ETag: dstObjInfo.HashSum}); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
@ -227,7 +216,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
SessionToken: sessionTokenEACL,
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketACL(ctx, p); err != nil {
|
||||
if err = h.obj.PutBucketACL(r.Context(), p); err != nil {
|
||||
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -243,13 +232,16 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
TagSet: tagSet,
|
||||
NodeVersion: extendedDstObjInfo.NodeVersion,
|
||||
}
|
||||
if _, err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
||||
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
|
||||
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
h.reqLogger(ctx).Info("object is copied", zap.Stringer("object_id", dstObjInfo.ID))
|
||||
h.log.Info("object is copied",
|
||||
zap.String("bucket", dstObjInfo.Bucket),
|
||||
zap.String("object", dstObjInfo.Name),
|
||||
zap.Stringer("object_id", dstObjInfo.ID))
|
||||
|
||||
s := &SendNotificationParams{
|
||||
Event: EventObjectCreatedCopy,
|
||||
|
@ -257,8 +249,8 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
BktInfo: dstBktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
|
||||
if encryptionParams.Enabled() {
|
||||
|
@ -266,15 +258,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
func makeCopyMap(headers map[string]string) map[string]string {
|
||||
res := make(map[string]string, len(headers))
|
||||
for key, val := range headers {
|
||||
res[key] = val
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func isCopyingToItselfForbidden(reqInfo *middleware.ReqInfo, srcBucket string, srcObject string, settings *data.BucketSettings, args *copyObjectArgs) bool {
|
||||
func isCopyingToItselfForbidden(reqInfo *api.ReqInfo, srcBucket string, srcObject string, settings *data.BucketSettings, args *copyObjectArgs) bool {
|
||||
if reqInfo.BucketName != srcBucket || reqInfo.ObjectName != srcObject {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -6,8 +6,7 @@ import (
|
|||
"net/url"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -30,14 +29,14 @@ func TestCopyWithTaggingDirective(t *testing.T) {
|
|||
copyMeta := CopyMeta{
|
||||
Tags: map[string]string{"key2": "val"},
|
||||
}
|
||||
copyObject(tc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
|
||||
copyObject(t, tc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
|
||||
tagging := getObjectTagging(t, tc, bktName, objToCopy, emptyVersion)
|
||||
require.Len(t, tagging.TagSet, 1)
|
||||
require.Equal(t, "key", tagging.TagSet[0].Key)
|
||||
require.Equal(t, "val", tagging.TagSet[0].Value)
|
||||
|
||||
copyMeta.TaggingDirective = replaceDirective
|
||||
copyObject(tc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
|
||||
copyObject(t, tc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
|
||||
tagging = getObjectTagging(t, tc, bktName, objToCopy2, emptyVersion)
|
||||
require.Len(t, tagging.TagSet, 1)
|
||||
require.Equal(t, "key2", tagging.TagSet[0].Key)
|
||||
|
@ -52,54 +51,20 @@ func TestCopyToItself(t *testing.T) {
|
|||
|
||||
copyMeta := CopyMeta{MetadataDirective: replaceDirective}
|
||||
|
||||
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusBadRequest)
|
||||
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
||||
copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusBadRequest)
|
||||
copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
||||
|
||||
putBucketVersioning(t, tc, bktName, true)
|
||||
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
||||
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
||||
copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
||||
copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
||||
|
||||
putBucketVersioning(t, tc, bktName, false)
|
||||
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
||||
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
||||
copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
||||
copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
||||
}
|
||||
|
||||
func TestCopyMultipart(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-copy", "object-for-copy"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
partSize := layer.UploadMinSize
|
||||
objLen := 6 * partSize
|
||||
headers := map[string]string{}
|
||||
|
||||
data := multipartUpload(hc, bktName, objName, headers, objLen, partSize)
|
||||
require.Equal(t, objLen, len(data))
|
||||
|
||||
objToCopy := "copy-target"
|
||||
var copyMeta CopyMeta
|
||||
copyObject(hc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
|
||||
|
||||
copiedData, _ := getObject(hc, bktName, objToCopy)
|
||||
equalDataSlices(t, data, copiedData)
|
||||
|
||||
result := getObjectAttributes(hc, bktName, objToCopy, objectParts)
|
||||
require.NotNil(t, result.ObjectParts)
|
||||
|
||||
objToCopy2 := "copy-target2"
|
||||
copyMeta.MetadataDirective = replaceDirective
|
||||
copyObject(hc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
|
||||
|
||||
result = getObjectAttributes(hc, bktName, objToCopy2, objectParts)
|
||||
require.Nil(t, result.ObjectParts)
|
||||
|
||||
copiedData, _ = getObject(hc, bktName, objToCopy2)
|
||||
equalDataSlices(t, data, copiedData)
|
||||
}
|
||||
|
||||
func copyObject(hc *handlerContext, bktName, fromObject, toObject string, copyMeta CopyMeta, statusCode int) {
|
||||
w, r := prepareTestRequest(hc, bktName, toObject, nil)
|
||||
func copyObject(t *testing.T, tc *handlerContext, bktName, fromObject, toObject string, copyMeta CopyMeta, statusCode int) {
|
||||
w, r := prepareTestRequest(tc, bktName, toObject, nil)
|
||||
r.Header.Set(api.AmzCopySource, bktName+"/"+fromObject)
|
||||
|
||||
r.Header.Set(api.AmzMetadataDirective, copyMeta.MetadataDirective)
|
||||
|
@ -114,8 +79,8 @@ func copyObject(hc *handlerContext, bktName, fromObject, toObject string, copyMe
|
|||
}
|
||||
r.Header.Set(api.AmzTagging, tagsQuery.Encode())
|
||||
|
||||
hc.Handler().CopyObjectHandler(w, r)
|
||||
assertStatus(hc.t, w, statusCode)
|
||||
tc.Handler().CopyObjectHandler(w, r)
|
||||
assertStatus(t, w, statusCode)
|
||||
}
|
||||
|
||||
func putObjectTagging(t *testing.T, tc *handlerContext, bktName, objName string, tags map[string]string) {
|
||||
|
|
|
@ -5,10 +5,9 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -19,7 +18,7 @@ const (
|
|||
)
|
||||
|
||||
func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -33,14 +32,14 @@ func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, cors); err != nil {
|
||||
if err = api.EncodeToResponse(w, cors); err != nil {
|
||||
h.logAndSendError(w, "could not encode cors to response", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -49,14 +48,9 @@ func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
p := &layer.PutCORSParams{
|
||||
BktInfo: bktInfo,
|
||||
Reader: r.Body,
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
BktInfo: bktInfo,
|
||||
Reader: r.Body,
|
||||
CopiesNumber: h.cfg.CopiesNumber,
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketCORS(r.Context(), p); err != nil {
|
||||
|
@ -64,11 +58,11 @@ func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
api.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -91,21 +85,19 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
|
|||
if origin == "" {
|
||||
return
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
if reqInfo.BucketName == "" {
|
||||
return
|
||||
}
|
||||
bktInfo, err := h.obj.GetBucketInfo(ctx, reqInfo.BucketName)
|
||||
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.reqLogger(ctx).Warn("get bucket info", zap.Error(err))
|
||||
h.log.Warn("get bucket info", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo)
|
||||
cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
h.reqLogger(ctx).Warn("get bucket cors", zap.Error(err))
|
||||
h.log.Warn("get bucket cors", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -144,7 +136,7 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
|
@ -198,7 +190,7 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
|||
if o != wildcard {
|
||||
w.Header().Set(api.AccessControlAllowCredentials, "true")
|
||||
}
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
api.WriteSuccessResponseHeadersOnly(w)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
)
|
||||
|
||||
func TestCORSOriginWildcard(t *testing.T) {
|
||||
body := `
|
||||
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<CORSRule>
|
||||
<AllowedMethod>GET</AllowedMethod>
|
||||
<AllowedOrigin>*</AllowedOrigin>
|
||||
</CORSRule>
|
||||
</CORSConfiguration>
|
||||
`
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-cors"
|
||||
box, _ := createAccessBox(t)
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
ctx := context.WithValue(r.Context(), middleware.BoxData, box)
|
||||
r = r.WithContext(ctx)
|
||||
r.Header.Add(api.AmzACL, "public-read")
|
||||
hc.Handler().CreateBucketHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(body))
|
||||
ctx = context.WithValue(r.Context(), middleware.BoxData, box)
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().PutBucketCorsHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, r = prepareTestPayloadRequest(hc, bktName, "", nil)
|
||||
hc.Handler().GetBucketCorsHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
}
|
|
@ -6,21 +6,17 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// limitation of AWS https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
|
||||
const maxObjectsToDelete = 1000
|
||||
|
||||
// DeleteObjectsRequest -- xml carrying the object key names which should be deleted.
|
||||
type DeleteObjectsRequest struct {
|
||||
// Element to enable quiet mode for the request
|
||||
|
@ -62,8 +58,7 @@ type DeleteObjectsResponse struct {
|
|||
}
|
||||
|
||||
func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
versionID := reqInfo.URL.Query().Get(api.QueryVersionID)
|
||||
versionedObject := []*layer.VersionedObject{{
|
||||
Name: reqInfo.ObjectName,
|
||||
|
@ -76,7 +71,7 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
|
@ -87,7 +82,7 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
Objects: versionedObject,
|
||||
Settings: bktSettings,
|
||||
}
|
||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
||||
deletedObjects := h.obj.DeleteObjects(r.Context(), p)
|
||||
deletedObject := deletedObjects[0]
|
||||
if deletedObject.Error != nil {
|
||||
if isErrObjectLocked(deletedObject.Error) {
|
||||
|
@ -114,7 +109,7 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
var objID oid.ID
|
||||
if len(versionID) != 0 {
|
||||
if err = objID.DecodeString(versionID); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -129,8 +124,8 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
if err = h.sendNotifications(ctx, m); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(r.Context(), m); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
|
||||
if deletedObject.VersionID != "" {
|
||||
|
@ -158,8 +153,7 @@ func isErrObjectLocked(err error) bool {
|
|||
|
||||
// DeleteMultipleObjectsHandler handles multiple delete requests.
|
||||
func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
// Content-Md5 is required and should be set
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
|
@ -182,11 +176,6 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
if len(requested.Objects) == 0 || len(requested.Objects) > maxObjectsToDelete {
|
||||
h.logAndSendError(w, "number of objects to delete must be greater than 0 and less or equal to 1000", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
|
||||
return
|
||||
}
|
||||
|
||||
removed := make(map[string]*layer.VersionedObject)
|
||||
toRemove := make([]*layer.VersionedObject, 0, len(requested.Objects))
|
||||
for _, obj := range requested.Objects {
|
||||
|
@ -209,7 +198,7 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
|
@ -227,7 +216,7 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
Objects: toRemove,
|
||||
Settings: bktSettings,
|
||||
}
|
||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
||||
deletedObjects := h.obj.DeleteObjects(r.Context(), p)
|
||||
|
||||
var errs []error
|
||||
for _, obj := range deletedObjects {
|
||||
|
@ -262,17 +251,17 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
zap.Array("objects", marshaler),
|
||||
zap.Errors("errors", errs),
|
||||
}
|
||||
h.reqLogger(ctx).Error("couldn't delete objects", fields...)
|
||||
h.log.Error("couldn't delete objects", fields...)
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||
if err = api.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "could not write response", reqInfo, err, zap.Array("objects", marshaler))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
|
|
|
@ -3,15 +3,11 @@ package handler
|
|||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -19,31 +15,6 @@ const (
|
|||
emptyVersion = ""
|
||||
)
|
||||
|
||||
func TestDeleteBucketOnAlreadyRemovedError(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
putObject(t, hc, bktName, objName)
|
||||
|
||||
addr := getAddressOfLastVersion(hc, bktInfo, objName)
|
||||
hc.tp.SetObjectError(addr, apistatus.ObjectAlreadyRemoved{})
|
||||
|
||||
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
||||
|
||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||
}
|
||||
|
||||
func getAddressOfLastVersion(hc *handlerContext, bktInfo *data.BucketInfo, objName string) oid.Address {
|
||||
nodeVersion, err := hc.tree.GetLatestVersion(hc.context, bktInfo, objName)
|
||||
require.NoError(hc.t, err)
|
||||
var addr oid.Address
|
||||
addr.SetContainer(bktInfo.CID)
|
||||
addr.SetObject(nodeVersion.OID)
|
||||
return addr
|
||||
}
|
||||
|
||||
func TestDeleteBucket(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -60,26 +31,6 @@ func TestDeleteBucket(t *testing.T) {
|
|||
deleteBucket(t, tc, bktName, http.StatusNoContent)
|
||||
}
|
||||
|
||||
func TestDeleteBucketOnNotFoundError(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
putObject(t, hc, bktName, objName)
|
||||
|
||||
nodeVersion, err := hc.tree.GetUnversioned(hc.context, bktInfo, objName)
|
||||
require.NoError(t, err)
|
||||
var addr oid.Address
|
||||
addr.SetContainer(bktInfo.CID)
|
||||
addr.SetObject(nodeVersion.OID)
|
||||
hc.tp.SetObjectError(addr, apistatus.ObjectNotFound{})
|
||||
|
||||
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
||||
|
||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||
}
|
||||
|
||||
func TestDeleteObject(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -90,7 +41,7 @@ func TestDeleteObject(t *testing.T) {
|
|||
deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
||||
|
||||
require.False(t, existInMockedFrostFS(tc, bktInfo, objInfo))
|
||||
require.False(t, existInMockedNeoFS(tc, bktInfo, objInfo))
|
||||
}
|
||||
|
||||
func TestDeleteObjectFromSuspended(t *testing.T) {
|
||||
|
@ -158,7 +109,7 @@ func TestDeleteObjectVersioned(t *testing.T) {
|
|||
deleteObject(t, tc, bktName, objName, objInfo.VersionID())
|
||||
checkNotFound(t, tc, bktName, objName, objInfo.VersionID())
|
||||
|
||||
require.False(t, existInMockedFrostFS(tc, bktInfo, objInfo), "object exists but shouldn't")
|
||||
require.False(t, existInMockedNeoFS(tc, bktInfo, objInfo), "object exists but shouldn't")
|
||||
}
|
||||
|
||||
func TestDeleteObjectUnversioned(t *testing.T) {
|
||||
|
@ -175,7 +126,7 @@ func TestDeleteObjectUnversioned(t *testing.T) {
|
|||
require.Len(t, versions.DeleteMarker, 0, "delete markers must be empty")
|
||||
require.Len(t, versions.Version, 0, "versions must be empty")
|
||||
|
||||
require.False(t, existInMockedFrostFS(tc, bktInfo, objInfo), "object exists but shouldn't")
|
||||
require.False(t, existInMockedNeoFS(tc, bktInfo, objInfo), "object exists but shouldn't")
|
||||
}
|
||||
|
||||
func TestRemoveDeleteMarker(t *testing.T) {
|
||||
|
@ -193,82 +144,7 @@ func TestRemoveDeleteMarker(t *testing.T) {
|
|||
deleteObject(t, tc, bktName, objName, deleteMarkerVersion)
|
||||
checkFound(t, tc, bktName, objName, emptyVersion)
|
||||
|
||||
require.True(t, existInMockedFrostFS(tc, bktInfo, objInfo), "object doesn't exist but should")
|
||||
}
|
||||
|
||||
func TestDeleteMarkerVersioned(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||
createVersionedBucketAndObject(t, tc, bktName, objName)
|
||||
|
||||
t.Run("not create new delete marker if last version is delete marker", func(t *testing.T) {
|
||||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
require.True(t, isDeleteMarker)
|
||||
versions := listVersions(t, tc, bktName)
|
||||
require.Equal(t, deleteMarkerVersion, versions.DeleteMarker[0].VersionID)
|
||||
|
||||
_, isDeleteMarker = deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
require.True(t, isDeleteMarker)
|
||||
versions = listVersions(t, tc, bktName)
|
||||
require.Len(t, versions.DeleteMarker, 1)
|
||||
require.Equal(t, deleteMarkerVersion, versions.DeleteMarker[0].VersionID)
|
||||
})
|
||||
|
||||
t.Run("do not create delete marker if object does not exist", func(t *testing.T) {
|
||||
versionsBefore := listVersions(t, tc, bktName)
|
||||
_, isDeleteMarker := deleteObject(t, tc, bktName, "dummy", emptyVersion)
|
||||
require.False(t, isDeleteMarker)
|
||||
versionsAfter := listVersions(t, tc, bktName)
|
||||
require.Equal(t, versionsBefore, versionsAfter)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeleteMarkerSuspended(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||
bktInfo, _ := createVersionedBucketAndObject(t, tc, bktName, objName)
|
||||
putBucketVersioning(t, tc, bktName, false)
|
||||
|
||||
t.Run("not create new delete marker if last version is delete marker", func(t *testing.T) {
|
||||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
require.True(t, isDeleteMarker)
|
||||
require.Equal(t, data.UnversionedObjectVersionID, deleteMarkerVersion)
|
||||
|
||||
deleteMarkerVersion, isDeleteMarker = deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
require.True(t, isDeleteMarker)
|
||||
require.Equal(t, data.UnversionedObjectVersionID, deleteMarkerVersion)
|
||||
|
||||
versions := listVersions(t, tc, bktName)
|
||||
require.Len(t, versions.DeleteMarker, 1)
|
||||
require.Equal(t, deleteMarkerVersion, versions.DeleteMarker[0].VersionID)
|
||||
})
|
||||
|
||||
t.Run("do not create delete marker if object does not exist", func(t *testing.T) {
|
||||
versionsBefore := listVersions(t, tc, bktName)
|
||||
_, isDeleteMarker := deleteObject(t, tc, bktName, "dummy", emptyVersion)
|
||||
require.False(t, isDeleteMarker)
|
||||
versionsAfter := listVersions(t, tc, bktName)
|
||||
require.Equal(t, versionsBefore, versionsAfter)
|
||||
})
|
||||
|
||||
t.Run("remove last unversioned non delete marker", func(t *testing.T) {
|
||||
objName := "obj3"
|
||||
putObject(t, tc, bktName, objName)
|
||||
|
||||
nodeVersion, err := tc.tree.GetUnversioned(tc.Context(), bktInfo, objName)
|
||||
require.NoError(t, err)
|
||||
|
||||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
require.True(t, isDeleteMarker)
|
||||
require.Equal(t, data.UnversionedObjectVersionID, deleteMarkerVersion)
|
||||
|
||||
objVersions := getVersion(listVersions(t, tc, bktName), objName)
|
||||
require.Len(t, objVersions, 0)
|
||||
|
||||
require.False(t, tc.MockedPool().ObjectExists(nodeVersion.OID))
|
||||
})
|
||||
require.True(t, existInMockedNeoFS(tc, bktInfo, objInfo), "object doesn't exist but should")
|
||||
}
|
||||
|
||||
func TestDeleteObjectCombined(t *testing.T) {
|
||||
|
@ -285,7 +161,7 @@ func TestDeleteObjectCombined(t *testing.T) {
|
|||
|
||||
checkFound(t, tc, bktName, objName, objInfo.VersionID())
|
||||
|
||||
require.True(t, existInMockedFrostFS(tc, bktInfo, objInfo), "object doesn't exist but should")
|
||||
require.True(t, existInMockedNeoFS(tc, bktInfo, objInfo), "object doesn't exist but should")
|
||||
}
|
||||
|
||||
func TestDeleteObjectSuspended(t *testing.T) {
|
||||
|
@ -305,7 +181,7 @@ func TestDeleteObjectSuspended(t *testing.T) {
|
|||
deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
checkNotFound(t, tc, bktName, objName, objInfo.VersionID())
|
||||
|
||||
require.False(t, existInMockedFrostFS(tc, bktInfo, objInfo), "object exists but shouldn't")
|
||||
require.False(t, existInMockedNeoFS(tc, bktInfo, objInfo), "object exists but shouldn't")
|
||||
}
|
||||
|
||||
func TestDeleteMarkers(t *testing.T) {
|
||||
|
@ -321,10 +197,10 @@ func TestDeleteMarkers(t *testing.T) {
|
|||
deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
|
||||
versions := listVersions(t, tc, bktName)
|
||||
require.Len(t, versions.DeleteMarker, 0, "invalid delete markers length")
|
||||
require.Len(t, versions.DeleteMarker, 3, "invalid delete markers length")
|
||||
require.Len(t, versions.Version, 0, "versions must be empty")
|
||||
|
||||
require.Len(t, listOIDsFromMockedFrostFS(t, tc, bktName), 0, "shouldn't be any object in frostfs")
|
||||
require.Len(t, listOIDsFromMockedNeoFS(t, tc, bktName), 0, "shouldn't be any object in neofs")
|
||||
}
|
||||
|
||||
func TestDeleteObjectFromListCache(t *testing.T) {
|
||||
|
@ -344,7 +220,7 @@ func TestDeleteObjectFromListCache(t *testing.T) {
|
|||
versions = listObjectsV1(t, tc, bktName, "", "", "", -1)
|
||||
require.Len(t, versions.Contents, 0)
|
||||
|
||||
require.False(t, existInMockedFrostFS(tc, bktInfo, objInfo))
|
||||
require.False(t, existInMockedNeoFS(tc, bktInfo, objInfo))
|
||||
}
|
||||
|
||||
func TestDeleteObjectCheckMarkerReturn(t *testing.T) {
|
||||
|
@ -407,53 +283,28 @@ func deleteObject(t *testing.T, tc *handlerContext, bktName, objName, version st
|
|||
return w.Header().Get(api.AmzVersionID), w.Header().Get(api.AmzDeleteMarker) != ""
|
||||
}
|
||||
|
||||
func deleteObjects(t *testing.T, tc *handlerContext, bktName string, objVersions [][2]string) *DeleteObjectsResponse {
|
||||
req := &DeleteObjectsRequest{}
|
||||
for _, version := range objVersions {
|
||||
req.Objects = append(req.Objects, ObjectIdentifier{
|
||||
ObjectName: version[0],
|
||||
VersionID: version[1],
|
||||
})
|
||||
}
|
||||
|
||||
w, r := prepareTestRequest(tc, bktName, "", req)
|
||||
r.Header.Set(api.ContentMD5, "")
|
||||
tc.Handler().DeleteMultipleObjectsHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
res := &DeleteObjectsResponse{}
|
||||
parseTestResponse(t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func deleteBucket(t *testing.T, tc *handlerContext, bktName string, code int) {
|
||||
w, r := prepareTestRequest(tc, bktName, "", nil)
|
||||
tc.Handler().DeleteBucketHandler(w, r)
|
||||
assertStatus(t, w, code)
|
||||
}
|
||||
|
||||
func checkNotFound(t *testing.T, hc *handlerContext, bktName, objName, version string) {
|
||||
w := headObjectBase(hc, bktName, objName, version)
|
||||
assertStatus(t, w, http.StatusNotFound)
|
||||
}
|
||||
|
||||
func headObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apiErrors.ErrorCode) {
|
||||
w := headObjectBase(hc, bktName, objName, version)
|
||||
assertS3Error(hc.t, w, apiErrors.GetAPIError(code))
|
||||
}
|
||||
|
||||
func checkFound(t *testing.T, hc *handlerContext, bktName, objName, version string) {
|
||||
w := headObjectBase(hc, bktName, objName, version)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func headObjectBase(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
|
||||
func checkNotFound(t *testing.T, tc *handlerContext, bktName, objName, version string) {
|
||||
query := make(url.Values)
|
||||
query.Add(api.QueryVersionID, version)
|
||||
|
||||
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
|
||||
hc.Handler().HeadObjectHandler(w, r)
|
||||
return w
|
||||
w, r := prepareTestFullRequest(tc, bktName, objName, query, nil)
|
||||
tc.Handler().HeadObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusNotFound)
|
||||
}
|
||||
|
||||
func checkFound(t *testing.T, tc *handlerContext, bktName, objName, version string) {
|
||||
query := make(url.Values)
|
||||
query.Add(api.QueryVersionID, version)
|
||||
|
||||
w, r := prepareTestFullRequest(tc, bktName, objName, query, nil)
|
||||
tc.Handler().HeadObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func listVersions(t *testing.T, tc *handlerContext, bktName string) *ListObjectsVersionsResponse {
|
||||
|
@ -465,16 +316,6 @@ func listVersions(t *testing.T, tc *handlerContext, bktName string) *ListObjects
|
|||
return res
|
||||
}
|
||||
|
||||
func getVersion(resp *ListObjectsVersionsResponse, objName string) []*ObjectVersionResponse {
|
||||
var res []*ObjectVersionResponse
|
||||
for i, version := range resp.Version {
|
||||
if version.Key == objName {
|
||||
res = append(res, &resp.Version[i])
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func putObject(t *testing.T, tc *handlerContext, bktName, objName string) {
|
||||
body := bytes.NewReader([]byte("content"))
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, body)
|
||||
|
|
|
@ -3,18 +3,16 @@ package handler
|
|||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -42,7 +40,7 @@ func TestSimpleGetEncrypted(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.NotEqual(t, content, string(encryptedContent))
|
||||
|
||||
response, _ := getEncryptedObject(tc, bktName, objName)
|
||||
response, _ := getEncryptedObject(t, tc, bktName, objName)
|
||||
require.Equal(t, content, string(response))
|
||||
}
|
||||
|
||||
|
@ -104,40 +102,14 @@ func TestS3EncryptionSSECMultipartUpload(t *testing.T) {
|
|||
data := multipartUploadEncrypted(tc, bktName, objName, headers, objLen, partSize)
|
||||
require.Equal(t, objLen, len(data))
|
||||
|
||||
resData, resHeader := getEncryptedObject(tc, bktName, objName)
|
||||
resData, resHeader := getEncryptedObject(t, tc, bktName, objName)
|
||||
equalDataSlices(t, data, resData)
|
||||
require.Equal(t, headers[api.ContentType], resHeader.Get(api.ContentType))
|
||||
require.Equal(t, headers[headerMetaKey], resHeader[headerMetaKey][0])
|
||||
require.Equal(t, strconv.Itoa(objLen), resHeader.Get(api.ContentLength))
|
||||
|
||||
checkContentUsingRangeEnc(tc, bktName, objName, data, 1000000)
|
||||
checkContentUsingRangeEnc(tc, bktName, objName, data, 10000000)
|
||||
}
|
||||
|
||||
func TestMultipartUploadGetRange(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName, objName := "bucket-for-multipart-s3-tests", "multipart_obj"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
objLen := 30 * 1024 * 1024
|
||||
partSize := objLen / 6
|
||||
headerMetaKey := api.MetadataPrefix + "foo"
|
||||
headers := map[string]string{
|
||||
headerMetaKey: "bar",
|
||||
api.ContentType: "text/plain",
|
||||
}
|
||||
|
||||
data := multipartUpload(hc, bktName, objName, headers, objLen, partSize)
|
||||
require.Equal(t, objLen, len(data))
|
||||
|
||||
resData, resHeader := getObject(hc, bktName, objName)
|
||||
equalDataSlices(t, data, resData)
|
||||
require.Equal(t, headers[api.ContentType], resHeader.Get(api.ContentType))
|
||||
require.Equal(t, headers[headerMetaKey], resHeader[headerMetaKey][0])
|
||||
require.Equal(t, strconv.Itoa(objLen), resHeader.Get(api.ContentLength))
|
||||
|
||||
checkContentUsingRange(hc, bktName, objName, data, 1000000)
|
||||
checkContentUsingRange(hc, bktName, objName, data, 10000000)
|
||||
checkContentUsingRangeEnc(t, tc, bktName, objName, data, 1000000)
|
||||
checkContentUsingRangeEnc(t, tc, bktName, objName, data, 10000000)
|
||||
}
|
||||
|
||||
func equalDataSlices(t *testing.T, expected, actual []byte) {
|
||||
|
@ -154,15 +126,7 @@ func equalDataSlices(t *testing.T, expected, actual []byte) {
|
|||
}
|
||||
}
|
||||
|
||||
func checkContentUsingRangeEnc(hc *handlerContext, bktName, objName string, data []byte, step int) {
|
||||
checkContentUsingRangeBase(hc, bktName, objName, data, step, true)
|
||||
}
|
||||
|
||||
func checkContentUsingRange(hc *handlerContext, bktName, objName string, data []byte, step int) {
|
||||
checkContentUsingRangeBase(hc, bktName, objName, data, step, false)
|
||||
}
|
||||
|
||||
func checkContentUsingRangeBase(hc *handlerContext, bktName, objName string, data []byte, step int, encrypted bool) {
|
||||
func checkContentUsingRangeEnc(t *testing.T, tc *handlerContext, bktName, objName string, data []byte, step int) {
|
||||
var off, toRead, end int
|
||||
|
||||
for off < len(data) {
|
||||
|
@ -172,14 +136,8 @@ func checkContentUsingRangeBase(hc *handlerContext, bktName, objName string, dat
|
|||
}
|
||||
end = off + toRead - 1
|
||||
|
||||
var rangeData []byte
|
||||
if encrypted {
|
||||
rangeData = getEncryptedObjectRange(hc.t, hc, bktName, objName, off, end)
|
||||
} else {
|
||||
rangeData = getObjectRange(hc.t, hc, bktName, objName, off, end)
|
||||
}
|
||||
|
||||
equalDataSlices(hc.t, data[off:end+1], rangeData)
|
||||
rangeData := getEncryptedObjectRange(t, tc, bktName, objName, off, end)
|
||||
equalDataSlices(t, data[off:end+1], rangeData)
|
||||
|
||||
off += step
|
||||
}
|
||||
|
@ -209,30 +167,6 @@ func multipartUploadEncrypted(hc *handlerContext, bktName, objName string, heade
|
|||
return
|
||||
}
|
||||
|
||||
func multipartUpload(hc *handlerContext, bktName, objName string, headers map[string]string, objLen, partsSize int) (objData []byte) {
|
||||
multipartInfo := createMultipartUpload(hc, bktName, objName, headers)
|
||||
|
||||
var sum, currentPart int
|
||||
var etags []string
|
||||
adjustedSize := partsSize
|
||||
|
||||
for sum < objLen {
|
||||
currentPart++
|
||||
|
||||
sum += partsSize
|
||||
if sum > objLen {
|
||||
adjustedSize = objLen - sum
|
||||
}
|
||||
|
||||
etag, data := uploadPart(hc, bktName, objName, multipartInfo.UploadID, currentPart, adjustedSize)
|
||||
etags = append(etags, etag)
|
||||
objData = append(objData, data...)
|
||||
}
|
||||
|
||||
completeMultipartUpload(hc, bktName, objName, multipartInfo.UploadID, etags)
|
||||
return
|
||||
}
|
||||
|
||||
func createMultipartUploadEncrypted(hc *handlerContext, bktName, objName string, headers map[string]string) *InitiateMultipartUploadResponse {
|
||||
return createMultipartUploadBase(hc, bktName, objName, true, headers)
|
||||
}
|
||||
|
@ -255,11 +189,6 @@ func createMultipartUploadBase(hc *handlerContext, bktName, objName string, encr
|
|||
}
|
||||
|
||||
func completeMultipartUpload(hc *handlerContext, bktName, objName, uploadID string, partsETags []string) {
|
||||
w := completeMultipartUploadBase(hc, bktName, objName, uploadID, partsETags)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func completeMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID string, partsETags []string) *httptest.ResponseRecorder {
|
||||
query := make(url.Values)
|
||||
query.Set(uploadIDQuery, uploadID)
|
||||
complete := &CompleteMultipartUpload{
|
||||
|
@ -274,8 +203,7 @@ func completeMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID
|
|||
|
||||
w, r := prepareTestFullRequest(hc, bktName, objName, query, complete)
|
||||
hc.Handler().CompleteMultipartUploadHandler(w, r)
|
||||
|
||||
return w
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func uploadPartEncrypted(hc *handlerContext, bktName, objName, uploadID string, num, size int) (string, []byte) {
|
||||
|
@ -318,7 +246,7 @@ func TestMultipartEncrypted(t *testing.T) {
|
|||
part2ETag, part2 := uploadPartEncrypted(hc, bktName, objName, multipartInitInfo.UploadID, 2, 5)
|
||||
completeMultipartUpload(hc, bktName, objName, multipartInitInfo.UploadID, []string{part1ETag, part2ETag})
|
||||
|
||||
res, _ := getEncryptedObject(hc, bktName, objName)
|
||||
res, _ := getEncryptedObject(t, hc, bktName, objName)
|
||||
require.Equal(t, len(part1)+len(part2), len(res))
|
||||
require.Equal(t, append(part1, part2...), res)
|
||||
|
||||
|
@ -334,22 +262,13 @@ func putEncryptedObject(t *testing.T, tc *handlerContext, bktName, objName, cont
|
|||
assertStatus(t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func getEncryptedObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header) {
|
||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||
func getEncryptedObject(t *testing.T, tc *handlerContext, bktName, objName string) ([]byte, http.Header) {
|
||||
w, r := prepareTestRequest(tc, bktName, objName, nil)
|
||||
setEncryptHeaders(r)
|
||||
return getObjectBase(hc, w, r)
|
||||
}
|
||||
|
||||
func getObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header) {
|
||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||
return getObjectBase(hc, w, r)
|
||||
}
|
||||
|
||||
func getObjectBase(hc *handlerContext, w *httptest.ResponseRecorder, r *http.Request) ([]byte, http.Header) {
|
||||
hc.Handler().GetObjectHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
tc.Handler().GetObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
content, err := io.ReadAll(w.Result().Body)
|
||||
require.NoError(hc.t, err)
|
||||
require.NoError(t, err)
|
||||
return content, w.Header()
|
||||
}
|
||||
|
||||
|
@ -365,7 +284,6 @@ func getEncryptedObjectRange(t *testing.T, tc *handlerContext, bktName, objName
|
|||
}
|
||||
|
||||
func setEncryptHeaders(r *http.Request) {
|
||||
r.TLS = &tls.ConnectionState{}
|
||||
r.Header.Set(api.AmzServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKey, aes256Key)
|
||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKeyMD5, aes256KeyMD5)
|
||||
|
|
|
@ -8,12 +8,10 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -89,10 +87,8 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E
|
|||
if len(info.Headers[layer.AttributeEncryptionAlgorithm]) > 0 {
|
||||
h.Set(api.ContentLength, info.Headers[layer.AttributeDecryptedSize])
|
||||
addSSECHeaders(h, requestHeader)
|
||||
} else if len(info.Headers[layer.MultipartObjectSize]) > 0 {
|
||||
h.Set(api.ContentLength, info.Headers[layer.MultipartObjectSize])
|
||||
} else {
|
||||
h.Set(api.ContentLength, strconv.FormatUint(info.Size, 10))
|
||||
h.Set(api.ContentLength, strconv.FormatInt(info.Size, 10))
|
||||
}
|
||||
|
||||
h.Set(api.ETag, info.HashSum)
|
||||
|
@ -108,9 +104,6 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E
|
|||
if expires := info.Headers[api.Expires]; expires != "" {
|
||||
h.Set(api.Expires, expires)
|
||||
}
|
||||
if encodings := info.Headers[api.ContentEncoding]; encodings != "" {
|
||||
h.Set(api.ContentEncoding, encodings)
|
||||
}
|
||||
|
||||
for key, val := range info.Headers {
|
||||
if layer.IsSystemHeader(key) {
|
||||
|
@ -124,7 +117,7 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
var (
|
||||
params *layer.RangeParams
|
||||
|
||||
reqInfo = middleware.GetReqInfo(r.Context())
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
)
|
||||
|
||||
conditional, err := parseConditionalHeaders(r.Header)
|
||||
|
@ -157,7 +150,7 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
encryptionParams, err := formEncryptionParams(r)
|
||||
encryptionParams, err := h.formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
|
@ -168,13 +161,15 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
fullSize, err := getObjectSize(extendedInfo, encryptionParams)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
||||
return
|
||||
fullSize := info.Size
|
||||
if encryptionParams.Enabled() {
|
||||
if fullSize, err = strconv.ParseInt(info.Headers[layer.AttributeDecryptedSize], 10, 64); err != nil {
|
||||
h.logAndSendError(w, "invalid decrypted size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if params, err = fetchRangeHeader(r.Header, fullSize); err != nil {
|
||||
if params, err = fetchRangeHeader(r.Header, uint64(fullSize)); err != nil {
|
||||
h.logAndSendError(w, "could not parse range header", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -206,70 +201,38 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
getPayloadParams := &layer.GetObjectParams{
|
||||
ObjectInfo: info,
|
||||
Versioned: p.Versioned(),
|
||||
Range: params,
|
||||
BucketInfo: bktInfo,
|
||||
Encryption: encryptionParams,
|
||||
}
|
||||
|
||||
objPayload, err := h.obj.GetObject(r.Context(), getPayloadParams)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get object payload", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned())
|
||||
if params != nil {
|
||||
writeRangeHeaders(w, params, fullSize)
|
||||
writeRangeHeaders(w, params, info.Size)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
if err = objPayload.StreamTo(w); err != nil {
|
||||
h.logAndSendError(w, "could not stream object payload", reqInfo, err)
|
||||
return
|
||||
getParams := &layer.GetObjectParams{
|
||||
ObjectInfo: info,
|
||||
Writer: w,
|
||||
Range: params,
|
||||
BucketInfo: bktInfo,
|
||||
Encryption: encryptionParams,
|
||||
}
|
||||
}
|
||||
|
||||
func getObjectSize(extendedInfo *data.ExtendedObjectInfo, encryptionParams encryption.Params) (uint64, error) {
|
||||
var err error
|
||||
fullSize := extendedInfo.ObjectInfo.Size
|
||||
|
||||
if encryptionParams.Enabled() {
|
||||
if fullSize, err = strconv.ParseUint(extendedInfo.ObjectInfo.Headers[layer.AttributeDecryptedSize], 10, 64); err != nil {
|
||||
return 0, fmt.Errorf("invalid decrypted size header: %w", err)
|
||||
}
|
||||
} else if extendedInfo.NodeVersion.IsCombined {
|
||||
if fullSize, err = strconv.ParseUint(extendedInfo.ObjectInfo.Headers[layer.MultipartObjectSize], 10, 64); err != nil {
|
||||
return 0, fmt.Errorf("invalid multipart size header: %w", err)
|
||||
}
|
||||
if err = h.obj.GetObject(r.Context(), getParams); err != nil {
|
||||
h.logAndSendError(w, "could not get object", reqInfo, err)
|
||||
}
|
||||
|
||||
return fullSize, nil
|
||||
}
|
||||
|
||||
func checkPreconditions(info *data.ObjectInfo, args *conditionalArgs) error {
|
||||
if len(args.IfMatch) > 0 && args.IfMatch != info.HashSum {
|
||||
return fmt.Errorf("%w: etag mismatched: '%s', '%s'", errors.GetAPIError(errors.ErrPreconditionFailed), args.IfMatch, info.HashSum)
|
||||
return errors.GetAPIError(errors.ErrPreconditionFailed)
|
||||
}
|
||||
if len(args.IfNoneMatch) > 0 && args.IfNoneMatch == info.HashSum {
|
||||
return fmt.Errorf("%w: etag matched: '%s', '%s'", errors.GetAPIError(errors.ErrNotModified), args.IfNoneMatch, info.HashSum)
|
||||
return errors.GetAPIError(errors.ErrNotModified)
|
||||
}
|
||||
if args.IfModifiedSince != nil && info.Created.Before(*args.IfModifiedSince) {
|
||||
return fmt.Errorf("%w: not modified since '%s', last modified '%s'", errors.GetAPIError(errors.ErrNotModified),
|
||||
args.IfModifiedSince.Format(time.RFC3339), info.Created.Format(time.RFC3339))
|
||||
return errors.GetAPIError(errors.ErrNotModified)
|
||||
}
|
||||
if args.IfUnmodifiedSince != nil && info.Created.After(*args.IfUnmodifiedSince) {
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_RequestSyntax
|
||||
// If both of the If-Match and If-Unmodified-Since headers are present in the request as follows:
|
||||
// If-Match condition evaluates to true, and;
|
||||
// If-Unmodified-Since condition evaluates to false;
|
||||
// then, S3 returns 200 OK and the data requested.
|
||||
if len(args.IfMatch) == 0 {
|
||||
return fmt.Errorf("%w: modified since '%s', last modified '%s'", errors.GetAPIError(errors.ErrPreconditionFailed),
|
||||
args.IfUnmodifiedSince.Format(time.RFC3339), info.Created.Format(time.RFC3339))
|
||||
return errors.GetAPIError(errors.ErrPreconditionFailed)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -279,8 +242,8 @@ func checkPreconditions(info *data.ObjectInfo, args *conditionalArgs) error {
|
|||
func parseConditionalHeaders(headers http.Header) (*conditionalArgs, error) {
|
||||
var err error
|
||||
args := &conditionalArgs{
|
||||
IfMatch: strings.Trim(headers.Get(api.IfMatch), "\""),
|
||||
IfNoneMatch: strings.Trim(headers.Get(api.IfNoneMatch), "\""),
|
||||
IfMatch: headers.Get(api.IfMatch),
|
||||
IfNoneMatch: headers.Get(api.IfNoneMatch),
|
||||
}
|
||||
|
||||
if args.IfModifiedSince, err = parseHTTPTime(headers.Get(api.IfModifiedSince)); err != nil {
|
||||
|
@ -305,7 +268,7 @@ func parseHTTPTime(data string) (*time.Time, error) {
|
|||
return &result, nil
|
||||
}
|
||||
|
||||
func writeRangeHeaders(w http.ResponseWriter, params *layer.RangeParams, size uint64) {
|
||||
func writeRangeHeaders(w http.ResponseWriter, params *layer.RangeParams, size int64) {
|
||||
w.Header().Set(api.AcceptRanges, "bytes")
|
||||
w.Header().Set(api.ContentRange, fmt.Sprintf("bytes %d-%d/%d", params.Start, params.End, size))
|
||||
w.Header().Set(api.ContentLength, strconv.FormatUint(params.End-params.Start+1, 10))
|
||||
|
|
|
@ -2,22 +2,15 @@ package handler
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -150,11 +143,7 @@ func TestPreconditions(t *testing.T) {
|
|||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual := checkPreconditions(tc.info, tc.args)
|
||||
if tc.expected == nil {
|
||||
require.NoError(t, actual)
|
||||
} else {
|
||||
require.True(t, stderrors.Is(actual, tc.expected), tc.expected, actual)
|
||||
}
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -181,24 +170,6 @@ func TestGetRange(t *testing.T) {
|
|||
require.Equal(t, "bcdef", string(end))
|
||||
}
|
||||
|
||||
func TestGetObject(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName, objName := "bucket", "obj"
|
||||
bktInfo, objInfo := createVersionedBucketAndObject(hc.t, hc, bktName, objName)
|
||||
|
||||
putObject(hc.t, hc, bktName, objName)
|
||||
|
||||
checkFound(hc.t, hc, bktName, objName, objInfo.VersionID())
|
||||
checkFound(hc.t, hc, bktName, objName, emptyVersion)
|
||||
|
||||
addr := getAddressOfLastVersion(hc, bktInfo, objName)
|
||||
hc.tp.SetObjectError(addr, apistatus.ObjectNotFound{})
|
||||
hc.tp.SetObjectError(objInfo.Address(), apistatus.ObjectNotFound{})
|
||||
|
||||
getObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), s3errors.ErrNoSuchVersion)
|
||||
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, s3errors.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
func putObjectContent(hc *handlerContext, bktName, objName, content string) {
|
||||
body := bytes.NewReader([]byte(content))
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
|
||||
|
@ -215,17 +186,3 @@ func getObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, s
|
|||
require.NoError(t, err)
|
||||
return content
|
||||
}
|
||||
|
||||
func getObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apiErrors.ErrorCode) {
|
||||
w := getObjectBaseResponse(hc, bktName, objName, version)
|
||||
assertS3Error(hc.t, w, apiErrors.GetAPIError(code))
|
||||
}
|
||||
|
||||
func getObjectBaseResponse(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
|
||||
query := make(url.Values)
|
||||
query.Add(api.QueryVersionID, version)
|
||||
|
||||
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
|
||||
hc.Handler().GetObjectHandler(w, r)
|
||||
return w
|
||||
}
|
||||
|
|
|
@ -13,18 +13,15 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/resolver"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -33,17 +30,15 @@ type handlerContext struct {
|
|||
owner user.ID
|
||||
t *testing.T
|
||||
h *handler
|
||||
tp *layer.TestFrostFS
|
||||
tree *tree.Tree
|
||||
tp *layer.TestNeoFS
|
||||
context context.Context
|
||||
kludge *kludgeSettingsMock
|
||||
}
|
||||
|
||||
func (hc *handlerContext) Handler() *handler {
|
||||
return hc.h
|
||||
}
|
||||
|
||||
func (hc *handlerContext) MockedPool() *layer.TestFrostFS {
|
||||
func (hc *handlerContext) MockedPool() *layer.TestNeoFS {
|
||||
return hc.tp
|
||||
}
|
||||
|
||||
|
@ -55,57 +50,12 @@ func (hc *handlerContext) Context() context.Context {
|
|||
return hc.context
|
||||
}
|
||||
|
||||
type placementPolicyMock struct {
|
||||
defaultPolicy netmap.PlacementPolicy
|
||||
copiesNumbers map[string][]uint32
|
||||
defaultCopiesNumbers []uint32
|
||||
}
|
||||
|
||||
func (p *placementPolicyMock) DefaultPlacementPolicy() netmap.PlacementPolicy {
|
||||
return p.defaultPolicy
|
||||
}
|
||||
|
||||
func (p *placementPolicyMock) PlacementPolicy(string) (netmap.PlacementPolicy, bool) {
|
||||
return netmap.PlacementPolicy{}, false
|
||||
}
|
||||
|
||||
func (p *placementPolicyMock) CopiesNumbers(locationConstraint string) ([]uint32, bool) {
|
||||
result, ok := p.copiesNumbers[locationConstraint]
|
||||
return result, ok
|
||||
}
|
||||
|
||||
func (p *placementPolicyMock) DefaultCopiesNumbers() []uint32 {
|
||||
return p.defaultCopiesNumbers
|
||||
}
|
||||
|
||||
type xmlDecoderProviderMock struct{}
|
||||
|
||||
func (p *xmlDecoderProviderMock) NewCompleteMultipartDecoder(r io.Reader) *xml.Decoder {
|
||||
return xml.NewDecoder(r)
|
||||
}
|
||||
|
||||
type kludgeSettingsMock struct {
|
||||
bypassContentEncodingInChunks bool
|
||||
}
|
||||
|
||||
func (k *kludgeSettingsMock) BypassContentEncodingInChunks() bool {
|
||||
return k.bypassContentEncodingInChunks
|
||||
}
|
||||
|
||||
func prepareHandlerContext(t *testing.T) *handlerContext {
|
||||
return prepareHandlerContextBase(t, false)
|
||||
}
|
||||
|
||||
func prepareHandlerContextWithMinCache(t *testing.T) *handlerContext {
|
||||
return prepareHandlerContextBase(t, true)
|
||||
}
|
||||
|
||||
func prepareHandlerContextBase(t *testing.T, minCache bool) *handlerContext {
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
l := zap.NewExample()
|
||||
tp := layer.NewTestFrostFS()
|
||||
tp := layer.NewTestNeoFS()
|
||||
|
||||
testResolver := &resolver.Resolver{Name: "test_resolver"}
|
||||
testResolver.SetResolveFunc(func(_ context.Context, name string) (cid.ID, error) {
|
||||
|
@ -115,33 +65,18 @@ func prepareHandlerContextBase(t *testing.T, minCache bool) *handlerContext {
|
|||
var owner user.ID
|
||||
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
||||
|
||||
treeMock := NewTreeServiceMock(t)
|
||||
|
||||
cacheCfg := layer.DefaultCachesConfigs(l)
|
||||
if minCache {
|
||||
cacheCfg = getMinCacheConfig(l)
|
||||
}
|
||||
|
||||
layerCfg := &layer.Config{
|
||||
Caches: cacheCfg,
|
||||
Caches: layer.DefaultCachesConfigs(zap.NewExample()),
|
||||
AnonKey: layer.AnonymousKey{Key: key},
|
||||
Resolver: testResolver,
|
||||
TreeService: treeMock,
|
||||
TreeService: layer.NewTreeService(),
|
||||
}
|
||||
|
||||
var pp netmap.PlacementPolicy
|
||||
err = pp.DecodeString("REP 1")
|
||||
require.NoError(t, err)
|
||||
|
||||
kludge := &kludgeSettingsMock{}
|
||||
|
||||
h := &handler{
|
||||
log: l,
|
||||
obj: layer.NewLayer(l, tp, layerCfg),
|
||||
cfg: &Config{
|
||||
Policy: &placementPolicyMock{defaultPolicy: pp},
|
||||
XMLDecoder: &xmlDecoderProviderMock{},
|
||||
Kludge: kludge,
|
||||
TLSEnabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -150,35 +85,10 @@ func prepareHandlerContextBase(t *testing.T, minCache bool) *handlerContext {
|
|||
t: t,
|
||||
h: h,
|
||||
tp: tp,
|
||||
tree: treeMock,
|
||||
context: context.WithValue(context.Background(), middleware.BoxData, newTestAccessBox(t, key)),
|
||||
kludge: kludge,
|
||||
context: context.WithValue(context.Background(), api.BoxData, newTestAccessBox(t, key)),
|
||||
}
|
||||
}
|
||||
|
||||
func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
|
||||
minCacheCfg := &cache.Config{
|
||||
Size: 1,
|
||||
Lifetime: 1,
|
||||
Logger: logger,
|
||||
}
|
||||
return &layer.CachesConfig{
|
||||
Logger: logger,
|
||||
Objects: minCacheCfg,
|
||||
ObjectsList: minCacheCfg,
|
||||
Names: minCacheCfg,
|
||||
Buckets: minCacheCfg,
|
||||
System: minCacheCfg,
|
||||
AccessControl: minCacheCfg,
|
||||
}
|
||||
}
|
||||
|
||||
func NewTreeServiceMock(t *testing.T) *tree.Tree {
|
||||
memCli, err := tree.NewTreeServiceClientMemory()
|
||||
require.NoError(t, err)
|
||||
return tree.NewTree(memCli, zap.NewExample())
|
||||
}
|
||||
|
||||
func createTestBucket(hc *handlerContext, bktName string) *data.BucketInfo {
|
||||
_, err := hc.MockedPool().CreateContainer(hc.Context(), layer.PrmContainerCreate{
|
||||
Creator: hc.owner,
|
||||
|
@ -234,7 +144,7 @@ func createTestObject(hc *handlerContext, bktInfo *data.BucketInfo, objName stri
|
|||
extObjInfo, err := hc.Layer().PutObject(hc.Context(), &layer.PutObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Object: objName,
|
||||
Size: uint64(len(content)),
|
||||
Size: int64(len(content)),
|
||||
Reader: bytes.NewReader(content),
|
||||
Header: header,
|
||||
})
|
||||
|
@ -259,8 +169,8 @@ func prepareTestRequestWithQuery(hc *handlerContext, bktName, objName string, qu
|
|||
r := httptest.NewRequest(http.MethodPut, defaultURL, bytes.NewReader(body))
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: bktName, Object: objName})
|
||||
r = r.WithContext(middleware.SetReqInfo(hc.Context(), reqInfo))
|
||||
reqInfo := api.NewReqInfo(w, r, api.ObjectRequest{Bucket: bktName, Object: objName})
|
||||
r = r.WithContext(api.SetReqInfo(hc.Context(), reqInfo))
|
||||
|
||||
return w, r
|
||||
}
|
||||
|
@ -269,8 +179,8 @@ func prepareTestPayloadRequest(hc *handlerContext, bktName, objName string, payl
|
|||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPut, defaultURL, payload)
|
||||
|
||||
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: bktName, Object: objName})
|
||||
r = r.WithContext(middleware.SetReqInfo(hc.Context(), reqInfo))
|
||||
reqInfo := api.NewReqInfo(w, r, api.ObjectRequest{Bucket: bktName, Object: objName})
|
||||
r = r.WithContext(api.SetReqInfo(hc.Context(), reqInfo))
|
||||
|
||||
return w, r
|
||||
}
|
||||
|
@ -281,23 +191,17 @@ func parseTestResponse(t *testing.T, response *httptest.ResponseRecorder, body i
|
|||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func existInMockedFrostFS(tc *handlerContext, bktInfo *data.BucketInfo, objInfo *data.ObjectInfo) bool {
|
||||
func existInMockedNeoFS(tc *handlerContext, bktInfo *data.BucketInfo, objInfo *data.ObjectInfo) bool {
|
||||
p := &layer.GetObjectParams{
|
||||
BucketInfo: bktInfo,
|
||||
ObjectInfo: objInfo,
|
||||
Writer: io.Discard,
|
||||
}
|
||||
|
||||
objPayload, err := tc.Layer().GetObject(tc.Context(), p)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err = io.ReadAll(objPayload)
|
||||
require.NoError(tc.t, err)
|
||||
return true
|
||||
return tc.Layer().GetObject(tc.Context(), p) == nil
|
||||
}
|
||||
|
||||
func listOIDsFromMockedFrostFS(t *testing.T, tc *handlerContext, bktName string) []oid.ID {
|
||||
func listOIDsFromMockedNeoFS(t *testing.T, tc *handlerContext, bktName string) []oid.ID {
|
||||
bktInfo, err := tc.Layer().GetBucketInfo(tc.Context(), bktName)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -1,21 +1,20 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"io"
|
||||
"bytes"
|
||||
"net/http"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const sizeToDetectType = 512
|
||||
|
||||
func getRangeToDetectContentType(maxSize uint64) *layer.RangeParams {
|
||||
end := maxSize
|
||||
func getRangeToDetectContentType(maxSize int64) *layer.RangeParams {
|
||||
end := uint64(maxSize)
|
||||
if sizeToDetectType < end {
|
||||
end = sizeToDetectType
|
||||
}
|
||||
|
@ -27,7 +26,7 @@ func getRangeToDetectContentType(maxSize uint64) *layer.RangeParams {
|
|||
}
|
||||
|
||||
func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -54,7 +53,7 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
info := extendedInfo.ObjectInfo
|
||||
|
||||
encryptionParams, err := formEncryptionParams(r)
|
||||
encryptionParams, err := h.formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
|
@ -84,26 +83,18 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
if len(info.ContentType) == 0 {
|
||||
if info.ContentType = layer.MimeByFilePath(info.Name); len(info.ContentType) == 0 {
|
||||
buffer := bytes.NewBuffer(make([]byte, 0, sizeToDetectType))
|
||||
getParams := &layer.GetObjectParams{
|
||||
ObjectInfo: info,
|
||||
Versioned: p.Versioned(),
|
||||
Writer: buffer,
|
||||
Range: getRangeToDetectContentType(info.Size),
|
||||
BucketInfo: bktInfo,
|
||||
}
|
||||
|
||||
objPayload, err := h.obj.GetObject(r.Context(), getParams)
|
||||
if err != nil {
|
||||
if err = h.obj.GetObject(r.Context(), getParams); err != nil {
|
||||
h.logAndSendError(w, "could not get object", reqInfo, err, zap.Stringer("oid", info.ID))
|
||||
return
|
||||
}
|
||||
|
||||
buffer, err := io.ReadAll(objPayload)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not partly read payload to detect content type", reqInfo, err, zap.Stringer("oid", info.ID))
|
||||
return
|
||||
}
|
||||
|
||||
info.ContentType = http.DetectContentType(buffer)
|
||||
info.ContentType = http.DetectContentType(buffer.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -123,7 +114,7 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
func (h *handler) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -131,16 +122,8 @@ func (h *handler) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
w.Header().Set(api.OwnerID, bktInfo.Owner.EncodeToString())
|
||||
w.Header().Set(api.ContainerID, bktInfo.CID.EncodeToString())
|
||||
w.Header().Set(api.AmzBucketRegion, bktInfo.LocationConstraint)
|
||||
|
||||
if isAvailableToResolve(bktInfo.Zone, h.cfg.ResolveZoneList, h.cfg.IsResolveListAllow) {
|
||||
w.Header().Set(api.ContainerName, bktInfo.Name)
|
||||
w.Header().Set(api.ContainerZone, bktInfo.Zone)
|
||||
}
|
||||
|
||||
middleware.WriteResponse(w, http.StatusOK, nil, middleware.MimeNone)
|
||||
api.WriteResponse(w, http.StatusOK, nil, api.MimeNone)
|
||||
}
|
||||
|
||||
func (h *handler) setLockingHeaders(bktInfo *data.BucketInfo, lockInfo *data.LockInfo, header http.Header) error {
|
||||
|
@ -173,25 +156,3 @@ func writeLockHeaders(h http.Header, legalHold *data.LegalHold, retention *data.
|
|||
h.Set(api.AmzObjectLockMode, retention.Mode)
|
||||
}
|
||||
}
|
||||
|
||||
func isAvailableToResolve(zone string, list []string, isAllowList bool) bool {
|
||||
// empty zone means container doesn't have proper system name,
|
||||
// so we don't have to resolve it
|
||||
if len(zone) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var zoneInList bool
|
||||
for _, t := range list {
|
||||
if t == zone {
|
||||
zoneInList = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// InList | IsAllowList | Result
|
||||
// 0 0 1
|
||||
// 0 1 0
|
||||
// 1 0 0
|
||||
// 1 1 1
|
||||
return zoneInList == isAllowList
|
||||
}
|
||||
|
|
|
@ -6,14 +6,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/bearer"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/eacl"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -27,14 +24,10 @@ func TestConditionalHead(t *testing.T) {
|
|||
tc.Handler().HeadObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
etag := w.Result().Header.Get(api.ETag)
|
||||
etagQuoted := "\"" + etag + "\""
|
||||
|
||||
headers := map[string]string{api.IfMatch: etag}
|
||||
headObject(t, tc, bktName, objName, headers, http.StatusOK)
|
||||
|
||||
headers = map[string]string{api.IfMatch: etagQuoted}
|
||||
headObject(t, tc, bktName, objName, headers, http.StatusOK)
|
||||
|
||||
headers = map[string]string{api.IfMatch: "etag"}
|
||||
headObject(t, tc, bktName, objName, headers, http.StatusPreconditionFailed)
|
||||
|
||||
|
@ -54,9 +47,6 @@ func TestConditionalHead(t *testing.T) {
|
|||
headers = map[string]string{api.IfNoneMatch: etag}
|
||||
headObject(t, tc, bktName, objName, headers, http.StatusNotModified)
|
||||
|
||||
headers = map[string]string{api.IfNoneMatch: etagQuoted}
|
||||
headObject(t, tc, bktName, objName, headers, http.StatusNotModified)
|
||||
|
||||
headers = map[string]string{api.IfNoneMatch: "etag"}
|
||||
headObject(t, tc, bktName, objName, headers, http.StatusOK)
|
||||
|
||||
|
@ -92,48 +82,10 @@ func TestInvalidAccessThroughCache(t *testing.T) {
|
|||
headObject(t, tc, bktName, objName, nil, http.StatusOK)
|
||||
|
||||
w, r := prepareTestRequest(tc, bktName, objName, nil)
|
||||
tc.Handler().HeadObjectHandler(w, r.WithContext(context.WithValue(r.Context(), middleware.BoxData, newTestAccessBox(t, nil))))
|
||||
tc.Handler().HeadObjectHandler(w, r.WithContext(context.WithValue(r.Context(), api.BoxData, newTestAccessBox(t, nil))))
|
||||
assertStatus(t, w, http.StatusForbidden)
|
||||
}
|
||||
|
||||
func TestHeadObject(t *testing.T) {
|
||||
hc := prepareHandlerContextWithMinCache(t)
|
||||
bktName, objName := "bucket", "obj"
|
||||
bktInfo, objInfo := createVersionedBucketAndObject(hc.t, hc, bktName, objName)
|
||||
|
||||
putObject(hc.t, hc, bktName, objName)
|
||||
|
||||
checkFound(hc.t, hc, bktName, objName, objInfo.VersionID())
|
||||
checkFound(hc.t, hc, bktName, objName, emptyVersion)
|
||||
|
||||
addr := getAddressOfLastVersion(hc, bktInfo, objName)
|
||||
hc.tp.SetObjectError(addr, apistatus.ObjectNotFound{})
|
||||
hc.tp.SetObjectError(objInfo.Address(), apistatus.ObjectNotFound{})
|
||||
|
||||
headObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), s3errors.ErrNoSuchVersion)
|
||||
headObjectAssertS3Error(hc, bktName, objName, emptyVersion, s3errors.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
func TestIsAvailableToResolve(t *testing.T) {
|
||||
list := []string{"container", "s3"}
|
||||
|
||||
for i, testCase := range [...]struct {
|
||||
isAllowList bool
|
||||
list []string
|
||||
zone string
|
||||
expected bool
|
||||
}{
|
||||
{isAllowList: true, list: list, zone: "container", expected: true},
|
||||
{isAllowList: true, list: list, zone: "sftp", expected: false},
|
||||
{isAllowList: false, list: list, zone: "s3", expected: false},
|
||||
{isAllowList: false, list: list, zone: "system", expected: true},
|
||||
{isAllowList: true, list: list, zone: "", expected: false},
|
||||
} {
|
||||
result := isAvailableToResolve(testCase.zone, testCase.list, testCase.isAllowList)
|
||||
require.Equal(t, testCase.expected, result, "case %d", i+1)
|
||||
}
|
||||
}
|
||||
|
||||
func newTestAccessBox(t *testing.T, key *keys.PrivateKey) *accessbox.Box {
|
||||
var err error
|
||||
if key == nil {
|
||||
|
|
|
@ -3,11 +3,11 @@ package handler
|
|||
import (
|
||||
"net/http"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
)
|
||||
|
||||
func (h *handler) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -15,7 +15,7 @@ func (h *handler) GetBucketLocationHandler(w http.ResponseWriter, r *http.Reques
|
|||
return
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, LocationResponse{Location: bktInfo.LocationConstraint}); err != nil {
|
||||
if err = api.EncodeToResponse(w, LocationResponse{Location: bktInfo.LocationConstraint}); err != nil {
|
||||
h.logAndSendError(w, "couldn't encode bucket location response", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,8 +4,8 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
||||
)
|
||||
|
||||
const maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
|
@ -15,7 +15,7 @@ func (h *handler) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
var (
|
||||
own user.ID
|
||||
res *ListBucketsResponse
|
||||
reqInfo = middleware.GetReqInfo(r.Context())
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
)
|
||||
|
||||
list, err := h.obj.ListBuckets(r.Context())
|
||||
|
@ -42,7 +42,7 @@ func (h *handler) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
})
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, res); err != nil {
|
||||
if err = api.EncodeToResponse(w, res); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,18 +1,16 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
apiErrors "github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -27,7 +25,7 @@ const (
|
|||
)
|
||||
|
||||
func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -74,7 +72,7 @@ func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
|
|||
}
|
||||
|
||||
func (h *handler) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -101,13 +99,13 @@ func (h *handler) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
|
|||
settings.LockConfiguration.ObjectLockEnabled = enabledValue
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, settings.LockConfiguration); err != nil {
|
||||
if err = api.EncodeToResponse(w, settings.LockConfiguration); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -144,12 +142,7 @@ func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
|||
Enabled: legalHold.Status == legalHoldOn,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
CopiesNumber: h.cfg.CopiesNumber,
|
||||
}
|
||||
|
||||
if err = h.obj.PutLockInfo(r.Context(), p); err != nil {
|
||||
|
@ -159,7 +152,7 @@ func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -190,13 +183,13 @@ func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
|||
legalHold.Status = legalHoldOn
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, legalHold); err != nil {
|
||||
if err = api.EncodeToResponse(w, legalHold); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -215,7 +208,7 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
|||
return
|
||||
}
|
||||
|
||||
lock, err := formObjectLockFromRetention(r.Context(), retention, r.Header)
|
||||
lock, err := formObjectLockFromRetention(retention, r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid retention configuration", reqInfo, err)
|
||||
return
|
||||
|
@ -227,13 +220,8 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
|||
ObjectName: reqInfo.ObjectName,
|
||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||
},
|
||||
NewLock: lock,
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
NewLock: lock,
|
||||
CopiesNumber: h.cfg.CopiesNumber,
|
||||
}
|
||||
|
||||
if err = h.obj.PutLockInfo(r.Context(), p); err != nil {
|
||||
|
@ -243,7 +231,7 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -282,7 +270,7 @@ func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
|||
retention.Mode = complianceMode
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, retention); err != nil {
|
||||
if err = api.EncodeToResponse(w, retention); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
@ -312,7 +300,7 @@ func checkLockConfiguration(conf *data.ObjectLockConfiguration) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig *data.ObjectLockConfiguration, header http.Header) (*data.ObjectLock, error) {
|
||||
func formObjectLock(bktInfo *data.BucketInfo, defaultConfig *data.ObjectLockConfiguration, header http.Header) (*data.ObjectLock, error) {
|
||||
if !bktInfo.ObjectLockEnabled {
|
||||
if existLockHeaders(header) {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound)
|
||||
|
@ -330,7 +318,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
|
|||
retention := &data.RetentionLock{}
|
||||
defaultRetention := defaultConfig.Rule.DefaultRetention
|
||||
retention.IsCompliance = defaultRetention.Mode == complianceMode
|
||||
now := layer.TimeNow(ctx)
|
||||
now := time.Now()
|
||||
if defaultRetention.Days != 0 {
|
||||
retention.Until = now.Add(time.Duration(defaultRetention.Days) * dayDuration)
|
||||
} else {
|
||||
|
@ -382,7 +370,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
|
|||
objectLock.Retention.ByPassedGovernance = bypass
|
||||
}
|
||||
|
||||
if objectLock.Retention.Until.Before(layer.TimeNow(ctx)) {
|
||||
if objectLock.Retention.Until.Before(time.Now()) {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrPastObjectLockRetainDate)
|
||||
}
|
||||
}
|
||||
|
@ -396,7 +384,7 @@ func existLockHeaders(header http.Header) bool {
|
|||
header.Get(api.AmzObjectLockRetainUntilDate) != ""
|
||||
}
|
||||
|
||||
func formObjectLockFromRetention(ctx context.Context, retention *data.Retention, header http.Header) (*data.ObjectLock, error) {
|
||||
func formObjectLockFromRetention(retention *data.Retention, header http.Header) (*data.ObjectLock, error) {
|
||||
if retention.Mode != governanceMode && retention.Mode != complianceMode {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrMalformedXML)
|
||||
}
|
||||
|
@ -406,7 +394,7 @@ func formObjectLockFromRetention(ctx context.Context, retention *data.Retention,
|
|||
return nil, apiErrors.GetAPIError(apiErrors.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if retentionDate.Before(layer.TimeNow(ctx)) {
|
||||
if retentionDate.Before(time.Now()) {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrPastObjectLockRetainDate)
|
||||
}
|
||||
|
||||
|
|
|
@ -10,18 +10,15 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
apiErrors "github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const defaultURL = "http://localhost/"
|
||||
|
||||
func TestFormObjectLock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
bktInfo *data.BucketInfo
|
||||
|
@ -76,7 +73,7 @@ func TestFormObjectLock(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actualObjLock, err := formObjectLock(ctx, tc.bktInfo, tc.config, tc.header)
|
||||
actualObjLock, err := formObjectLock(tc.bktInfo, tc.config, tc.header)
|
||||
if tc.expectedError {
|
||||
require.Error(t, err)
|
||||
return
|
||||
|
@ -89,8 +86,6 @@ func TestFormObjectLock(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFormObjectLockFromRetention(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
retention *data.Retention
|
||||
|
@ -137,7 +132,7 @@ func TestFormObjectLockFromRetention(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actualObjLock, err := formObjectLockFromRetention(ctx, tc.retention, tc.header)
|
||||
actualObjLock, err := formObjectLockFromRetention(tc.retention, tc.header)
|
||||
if tc.expectedError {
|
||||
require.Error(t, err)
|
||||
return
|
||||
|
@ -314,7 +309,7 @@ func TestPutBucketLockConfigurationHandler(t *testing.T) {
|
|||
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPut, defaultURL, bytes.NewReader(body))
|
||||
r = r.WithContext(middleware.SetReqInfo(r.Context(), middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: tc.bucket})))
|
||||
r = r.WithContext(api.SetReqInfo(r.Context(), api.NewReqInfo(w, r, api.ObjectRequest{Bucket: tc.bucket})))
|
||||
|
||||
hc.Handler().PutBucketObjectLockConfigHandler(w, r)
|
||||
|
||||
|
@ -387,7 +382,7 @@ func TestGetBucketLockConfigurationHandler(t *testing.T) {
|
|||
t.Run(tc.name, func(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPut, defaultURL, bytes.NewReader(nil))
|
||||
r = r.WithContext(middleware.SetReqInfo(r.Context(), middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: tc.bucket})))
|
||||
r = r.WithContext(api.SetReqInfo(r.Context(), api.NewReqInfo(w, r, api.ObjectRequest{Bucket: tc.bucket})))
|
||||
|
||||
hc.Handler().GetBucketObjectLockConfigHandler(w, r)
|
||||
|
||||
|
@ -407,7 +402,7 @@ func TestGetBucketLockConfigurationHandler(t *testing.T) {
|
|||
}
|
||||
|
||||
func assertS3Error(t *testing.T, w *httptest.ResponseRecorder, expectedError apiErrors.Error) {
|
||||
actualErrorResponse := &middleware.ErrorResponse{}
|
||||
actualErrorResponse := &api.ErrorResponse{}
|
||||
err := xml.NewDecoder(w.Result().Body).Decode(actualErrorResponse)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -2,19 +2,17 @@ package handler
|
|||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/google/uuid"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -95,7 +93,7 @@ const (
|
|||
)
|
||||
|
||||
func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -104,7 +102,10 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
|
||||
uploadID := uuid.New()
|
||||
additional := []zap.Field{zap.String("uploadID", uploadID.String())}
|
||||
additional := []zap.Field{
|
||||
zap.String("uploadID", uploadID.String()),
|
||||
zap.String("Key", reqInfo.ObjectName),
|
||||
}
|
||||
|
||||
p := &layer.CreateMultipartParams{
|
||||
Info: &layer.UploadInfoParams{
|
||||
|
@ -118,11 +119,11 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
if containsACLHeaders(r) {
|
||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get gate key", reqInfo, err, additional...)
|
||||
h.logAndSendError(w, "couldn't get gate key", reqInfo, err)
|
||||
return
|
||||
}
|
||||
if _, err = parseACLHeaders(r.Header, key); err != nil {
|
||||
h.logAndSendError(w, "could not parse acl", reqInfo, err, additional...)
|
||||
h.logAndSendError(w, "could not parse acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
p.Data.ACLHeaders = formACLHeadersForMultipart(r.Header)
|
||||
|
@ -136,9 +137,9 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
}
|
||||
|
||||
p.Info.Encryption, err = formEncryptionParams(r)
|
||||
p.Info.Encryption, err = h.formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -147,9 +148,9 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
p.Header[api.ContentType] = contentType
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(p.Header, bktInfo.LocationConstraint)
|
||||
p.CopiesNumber, err = getCopiesNumberOrDefault(p.Header, h.cfg.CopiesNumber)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err, additional...)
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -168,7 +169,7 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
UploadID: uploadID.String(),
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, resp); err != nil {
|
||||
if err = api.EncodeToResponse(w, resp); err != nil {
|
||||
h.logAndSendError(w, "could not encode InitiateMultipartUploadResponse to response", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
@ -194,7 +195,7 @@ func formACLHeadersForMultipart(header http.Header) map[string]string {
|
|||
}
|
||||
|
||||
func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -205,27 +206,15 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
var (
|
||||
queryValues = r.URL.Query()
|
||||
uploadID = queryValues.Get(uploadIDHeaderName)
|
||||
partNumStr = queryValues.Get(partNumberHeaderName)
|
||||
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("partNumber", partNumStr)}
|
||||
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
|
||||
)
|
||||
|
||||
partNumber, err := strconv.Atoi(partNumStr)
|
||||
partNumber, err := strconv.Atoi(queryValues.Get(partNumberHeaderName))
|
||||
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
|
||||
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...)
|
||||
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber))
|
||||
return
|
||||
}
|
||||
|
||||
body, err := h.getBodyReader(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "failed to get body reader", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
var size uint64
|
||||
if r.ContentLength > 0 {
|
||||
size = uint64(r.ContentLength)
|
||||
}
|
||||
|
||||
p := &layer.UploadPartParams{
|
||||
Info: &layer.UploadInfoParams{
|
||||
UploadID: uploadID,
|
||||
|
@ -233,13 +222,13 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
Key: reqInfo.ObjectName,
|
||||
},
|
||||
PartNumber: partNumber,
|
||||
Size: size,
|
||||
Reader: body,
|
||||
Size: r.ContentLength,
|
||||
Reader: r.Body,
|
||||
}
|
||||
|
||||
p.Info.Encryption, err = formEncryptionParams(r)
|
||||
p.Info.Encryption, err = h.formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -254,23 +243,21 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
w.Header().Set(api.ETag, hash)
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
api.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
versionID string
|
||||
ctx = r.Context()
|
||||
reqInfo = middleware.GetReqInfo(ctx)
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
queryValues = reqInfo.URL.Query()
|
||||
uploadID = queryValues.Get(uploadIDHeaderName)
|
||||
partNumStr = queryValues.Get(partNumberHeaderName)
|
||||
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("partNumber", partNumStr)}
|
||||
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
|
||||
)
|
||||
|
||||
partNumber, err := strconv.Atoi(partNumStr)
|
||||
partNumber, err := strconv.Atoi(queryValues.Get(partNumberHeaderName))
|
||||
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
|
||||
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...)
|
||||
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -281,7 +268,7 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
srcBucket, srcObject, err := path2BucketObject(src)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid source copy", reqInfo, err, additional...)
|
||||
h.logAndSendError(w, "invalid source copy", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -294,23 +281,21 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
srcBktInfo, err := h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get source bucket info", reqInfo, err, additional...)
|
||||
h.logAndSendError(w, "could not get source bucket info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get target bucket info", reqInfo, err, additional...)
|
||||
h.logAndSendError(w, "could not get target bucket info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
headPrm := &layer.HeadObjectParams{
|
||||
srcInfo, err := h.obj.GetObjectInfo(r.Context(), &layer.HeadObjectParams{
|
||||
BktInfo: srcBktInfo,
|
||||
Object: srcObject,
|
||||
VersionID: versionID,
|
||||
}
|
||||
|
||||
srcInfo, err := h.obj.GetObjectInfo(ctx, headPrm)
|
||||
})
|
||||
if err != nil {
|
||||
if errors.IsS3Error(err, errors.ErrNoSuchKey) && versionID != "" {
|
||||
h.logAndSendError(w, "could not head source object version", reqInfo,
|
||||
|
@ -335,7 +320,6 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
p := &layer.UploadCopyParams{
|
||||
Versioned: headPrm.Versioned(),
|
||||
Info: &layer.UploadInfoParams{
|
||||
UploadID: uploadID,
|
||||
Bkt: bktInfo,
|
||||
|
@ -347,18 +331,18 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
Range: srcRange,
|
||||
}
|
||||
|
||||
p.Info.Encryption, err = formEncryptionParams(r)
|
||||
p.Info.Encryption, err = h.formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = p.Info.Encryption.MatchObjectEncryption(layer.FormEncryptionInfo(srcInfo.Headers)); err != nil {
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrBadRequest), err), additional...)
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
info, err := h.obj.UploadPartCopy(ctx, p)
|
||||
info, err := h.obj.UploadPartCopy(r.Context(), p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not upload part copy", reqInfo, err, additional...)
|
||||
return
|
||||
|
@ -373,13 +357,13 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
addSSECHeaders(w.Header(), r.Header)
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
|
||||
if err = api.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -388,17 +372,19 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
|
||||
var (
|
||||
sessionTokenSetEACL *session.Container
|
||||
|
||||
uploadID = r.URL.Query().Get(uploadIDHeaderName)
|
||||
uploadInfo = &layer.UploadInfoParams{
|
||||
UploadID: uploadID,
|
||||
Bkt: bktInfo,
|
||||
Key: reqInfo.ObjectName,
|
||||
}
|
||||
additional = []zap.Field{zap.String("uploadID", uploadID)}
|
||||
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
|
||||
)
|
||||
|
||||
reqBody := new(CompleteMultipartUpload)
|
||||
if err = h.cfg.XMLDecoder.NewCompleteMultipartDecoder(r.Body).Decode(reqBody); err != nil {
|
||||
if err = xml.NewDecoder(r.Body).Decode(reqBody); err != nil {
|
||||
h.logAndSendError(w, "could not read complete multipart upload xml", reqInfo,
|
||||
errors.GetAPIError(errors.ErrMalformedXML), additional...)
|
||||
return
|
||||
|
@ -413,53 +399,11 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
Parts: reqBody.Parts,
|
||||
}
|
||||
|
||||
// Next operations might take some time, so we want to keep client's
|
||||
// connection alive. To do so, gateway sends periodic white spaces
|
||||
// back to the client the same way as Amazon S3 service does.
|
||||
stopPeriodicResponseWriter := periodicXMLWriter(w, h.cfg.CompleteMultipartKeepalive)
|
||||
|
||||
// Start complete multipart upload which may take some time to fetch object
|
||||
// and re-upload it part by part.
|
||||
objInfo, err := h.completeMultipartUpload(r, c, bktInfo, reqInfo)
|
||||
|
||||
// Stop periodic writer as complete multipart upload is finished
|
||||
// successfully or not.
|
||||
headerIsWritten := stopPeriodicResponseWriter()
|
||||
|
||||
responseWriter := middleware.EncodeToResponse
|
||||
errLogger := h.logAndSendError
|
||||
// Do not send XML and HTTP headers if periodic writer was invoked at this point.
|
||||
if headerIsWritten {
|
||||
responseWriter = middleware.EncodeToResponseNoHeader
|
||||
errLogger = h.logAndSendErrorNoHeader
|
||||
}
|
||||
|
||||
uploadData, extendedObjInfo, err := h.obj.CompleteMultipartUpload(r.Context(), c)
|
||||
if err != nil {
|
||||
errLogger(w, "complete multipart error", reqInfo, err, additional...)
|
||||
h.logAndSendError(w, "could not complete multipart upload", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
response := CompleteMultipartUploadResponse{
|
||||
Bucket: objInfo.Bucket,
|
||||
ETag: objInfo.HashSum,
|
||||
Key: objInfo.Name,
|
||||
}
|
||||
|
||||
// Here we previously set api.AmzVersionID header for versioned bucket.
|
||||
// It is not possible after #60, because of periodic white
|
||||
// space XML writer to keep connection with the client.
|
||||
|
||||
if err = responseWriter(w, response); err != nil {
|
||||
errLogger(w, "something went wrong", reqInfo, err, additional...)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMultipartParams, bktInfo *data.BucketInfo, reqInfo *middleware.ReqInfo) (*data.ObjectInfo, error) {
|
||||
ctx := r.Context()
|
||||
uploadData, extendedObjInfo, err := h.obj.CompleteMultipartUpload(ctx, c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not complete multipart upload: %w", err)
|
||||
}
|
||||
objInfo := extendedObjInfo.ObjectInfo
|
||||
|
||||
if len(uploadData.TagSet) != 0 {
|
||||
|
@ -472,23 +416,22 @@ func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMult
|
|||
TagSet: uploadData.TagSet,
|
||||
NodeVersion: extendedObjInfo.NodeVersion,
|
||||
}
|
||||
if _, err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
||||
return nil, fmt.Errorf("could not put tagging file of completed multipart upload: %w", err)
|
||||
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
|
||||
h.logAndSendError(w, "could not put tagging file of completed multipart upload", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(uploadData.ACLHeaders) != 0 {
|
||||
sessionTokenSetEACL, err := getSessionTokenSetEACL(ctx)
|
||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't get eacl token: %w", err)
|
||||
}
|
||||
key, err := h.bearerTokenIssuerKey(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't get gate key: %w", err)
|
||||
h.logAndSendError(w, "couldn't get gate key", reqInfo, err)
|
||||
return
|
||||
}
|
||||
acl, err := parseACLHeaders(r.Header, key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse acl: %w", err)
|
||||
h.logAndSendError(w, "could not parse acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
resInfo := &resourceInfo{
|
||||
|
@ -497,10 +440,12 @@ func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMult
|
|||
}
|
||||
astObject, err := aclToAst(acl, resInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not translate acl of completed multipart upload to ast: %w", err)
|
||||
h.logAndSendError(w, "could not translate acl of completed multipart upload to ast", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
if _, err = h.updateBucketACL(r, astObject, bktInfo, sessionTokenSetEACL); err != nil {
|
||||
return nil, fmt.Errorf("could not update bucket acl while completing multipart upload: %w", err)
|
||||
h.logAndSendError(w, "could not update bucket acl while completing multipart upload", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -510,15 +455,32 @@ func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMult
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
|
||||
return objInfo, nil
|
||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
}
|
||||
|
||||
response := CompleteMultipartUploadResponse{
|
||||
Bucket: objInfo.Bucket,
|
||||
ETag: objInfo.HashSum,
|
||||
Key: objInfo.Name,
|
||||
}
|
||||
|
||||
if bktSettings.VersioningEnabled() {
|
||||
w.Header().Set(api.AmzVersionID, objInfo.VersionID())
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -560,13 +522,13 @@ func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, encodeListMultipartUploadsToResponse(list, p)); err != nil {
|
||||
if err = api.EncodeToResponse(w, encodeListMultipartUploadsToResponse(list, p)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -611,7 +573,7 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
PartNumberMarker: partNumberMarker,
|
||||
}
|
||||
|
||||
p.Info.Encryption, err = formEncryptionParams(r)
|
||||
p.Info.Encryption, err = h.formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
|
@ -623,13 +585,13 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, encodeListPartsToResponse(list, p)); err != nil {
|
||||
if err = api.EncodeToResponse(w, encodeListPartsToResponse(list, p)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -646,7 +608,7 @@ func (h *handler) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Req
|
|||
Key: reqInfo.ObjectName,
|
||||
}
|
||||
|
||||
p.Encryption, err = formEncryptionParams(r)
|
||||
p.Encryption, err = h.formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
|
@ -719,53 +681,3 @@ func encodeListPartsToResponse(info *layer.ListPartsInfo, params *layer.ListPart
|
|||
Parts: info.Parts,
|
||||
}
|
||||
}
|
||||
|
||||
// periodicXMLWriter creates go routine to write xml header and whitespaces
|
||||
// over time to avoid connection drop from the client. To work properly,
|
||||
// pass `http.ResponseWriter` with implemented `http.Flusher` interface.
|
||||
// Returns stop function which returns boolean if writer has been used
|
||||
// during goroutine execution. To disable writer, pass 0 duration value.
|
||||
func periodicXMLWriter(w io.Writer, dur time.Duration) (stop func() bool) {
|
||||
if dur == 0 { // 0 duration disables periodic writer
|
||||
return func() bool { return false }
|
||||
}
|
||||
|
||||
whitespaceChar := []byte(" ")
|
||||
closer := make(chan struct{})
|
||||
done := make(chan struct{})
|
||||
headerWritten := false
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
|
||||
tick := time.NewTicker(dur)
|
||||
defer tick.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
if !headerWritten {
|
||||
_, err := w.Write([]byte(xml.Header))
|
||||
headerWritten = err == nil
|
||||
}
|
||||
_, err := w.Write(whitespaceChar)
|
||||
if err != nil {
|
||||
return // is there anything we can do better than ignore error?
|
||||
}
|
||||
if buffered, ok := w.(http.Flusher); ok {
|
||||
buffered.Flush()
|
||||
}
|
||||
case <-closer:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
stop = func() bool {
|
||||
close(closer)
|
||||
<-done // wait for goroutine to stop
|
||||
return headerWritten
|
||||
}
|
||||
|
||||
return stop
|
||||
}
|
||||
|
|
|
@ -1,126 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
s3Errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPeriodicWriter(t *testing.T) {
|
||||
const dur = 100 * time.Millisecond
|
||||
const whitespaces = 8
|
||||
expected := []byte(xml.Header)
|
||||
for i := 0; i < whitespaces; i++ {
|
||||
expected = append(expected, []byte(" ")...)
|
||||
}
|
||||
|
||||
t.Run("writes data", func(t *testing.T) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
stop := periodicXMLWriter(buf, dur)
|
||||
|
||||
// N number of whitespaces + half durations to guarantee at least N writes in buffer
|
||||
time.Sleep(whitespaces*dur + dur/2)
|
||||
require.True(t, stop())
|
||||
require.Equal(t, expected, buf.Bytes())
|
||||
|
||||
t.Run("no additional data after stop", func(t *testing.T) {
|
||||
time.Sleep(2 * dur)
|
||||
require.Equal(t, expected, buf.Bytes())
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("does not write data", func(t *testing.T) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
stop := periodicXMLWriter(buf, dur)
|
||||
time.Sleep(dur / 2)
|
||||
require.False(t, stop())
|
||||
require.Empty(t, buf.Bytes())
|
||||
|
||||
t.Run("disabled", func(t *testing.T) {
|
||||
stop = periodicXMLWriter(buf, 0)
|
||||
require.False(t, stop())
|
||||
require.Empty(t, buf.Bytes())
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestMultipartUploadInvalidPart(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-to-upload-part", "object-multipart"
|
||||
createTestBucket(hc, bktName)
|
||||
partSize := 8 // less than min part size
|
||||
|
||||
multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||
etag1, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
|
||||
etag2, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
|
||||
w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
|
||||
assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
|
||||
}
|
||||
|
||||
func TestMultipartReUploadPart(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-to-upload-part", "object-multipart"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
partSizeLast := 8 // less than min part size
|
||||
partSizeFirst := 5 * 1024 * 1024
|
||||
|
||||
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||
etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSizeLast)
|
||||
etag2, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSizeFirst)
|
||||
|
||||
list := listParts(hc, bktName, objName, uploadInfo.UploadID)
|
||||
require.Len(t, list.Parts, 2)
|
||||
require.Equal(t, etag1, list.Parts[0].ETag)
|
||||
require.Equal(t, etag2, list.Parts[1].ETag)
|
||||
|
||||
w := completeMultipartUploadBase(hc, bktName, objName, uploadInfo.UploadID, []string{etag1, etag2})
|
||||
assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
|
||||
|
||||
etag1, data1 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSizeFirst)
|
||||
etag2, data2 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSizeLast)
|
||||
|
||||
list = listParts(hc, bktName, objName, uploadInfo.UploadID)
|
||||
require.Len(t, list.Parts, 2)
|
||||
require.Equal(t, etag1, list.Parts[0].ETag)
|
||||
require.Equal(t, etag2, list.Parts[1].ETag)
|
||||
|
||||
innerUploadInfo, err := hc.tree.GetMultipartUpload(hc.context, bktInfo, objName, uploadInfo.UploadID)
|
||||
require.NoError(t, err)
|
||||
treeParts, err := hc.tree.GetParts(hc.Context(), bktInfo, innerUploadInfo.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, treeParts, len(list.Parts))
|
||||
|
||||
w = completeMultipartUploadBase(hc, bktName, objName, uploadInfo.UploadID, []string{etag1, etag2})
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
|
||||
data, _ := getObject(hc, bktName, objName)
|
||||
equalDataSlices(t, append(data1, data2...), data)
|
||||
}
|
||||
|
||||
func listParts(hc *handlerContext, bktName, objName string, uploadID string) *ListPartsResponse {
|
||||
return listPartsBase(hc, bktName, objName, false, uploadID)
|
||||
}
|
||||
|
||||
func listPartsBase(hc *handlerContext, bktName, objName string, encrypted bool, uploadID string) *ListPartsResponse {
|
||||
query := make(url.Values)
|
||||
query.Set(uploadIDQuery, uploadID)
|
||||
|
||||
w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, nil)
|
||||
if encrypted {
|
||||
setEncryptHeaders(r)
|
||||
}
|
||||
|
||||
hc.Handler().ListPartsHandler(w, r)
|
||||
listPartsResponse := &ListPartsResponse{}
|
||||
readResponse(hc.t, w, http.StatusOK, listPartsResponse)
|
||||
|
||||
return listPartsResponse
|
||||
}
|
|
@ -3,18 +3,18 @@ package handler
|
|||
import (
|
||||
"net/http"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
)
|
||||
|
||||
func (h *handler) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
h.logAndSendError(w, "not supported", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
h.logAndSendError(w, "not supported", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
h.logAndSendError(w, "not supported", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
}
|
||||
|
|
|
@ -6,14 +6,13 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"github.com/google/uuid"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/bearer"
|
||||
)
|
||||
|
||||
type (
|
||||
|
@ -21,9 +20,8 @@ type (
|
|||
Event string
|
||||
NotificationInfo *data.NotificationInfo
|
||||
BktInfo *data.BucketInfo
|
||||
ReqInfo *middleware.ReqInfo
|
||||
ReqInfo *api.ReqInfo
|
||||
User string
|
||||
Time time.Time
|
||||
}
|
||||
|
||||
NotificationConfiguration struct {
|
||||
|
@ -96,7 +94,7 @@ var validEvents = map[string]struct{}{
|
|||
}
|
||||
|
||||
func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
|
@ -109,7 +107,7 @@ func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
if _, err = h.checkBucketConfiguration(r.Context(), conf, reqInfo); err != nil {
|
||||
if _, err = h.checkBucketConfiguration(conf, reqInfo); err != nil {
|
||||
h.logAndSendError(w, "couldn't check bucket configuration", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -118,12 +116,7 @@ func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Re
|
|||
RequestInfo: reqInfo,
|
||||
BktInfo: bktInfo,
|
||||
Configuration: conf,
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
CopiesNumber: h.cfg.CopiesNumber,
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketNotificationConfiguration(r.Context(), p); err != nil {
|
||||
|
@ -133,7 +126,7 @@ func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
|
||||
func (h *handler) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -147,7 +140,7 @@ func (h *handler) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, conf); err != nil {
|
||||
if err = api.EncodeToResponse(w, conf); err != nil {
|
||||
h.logAndSendError(w, "could not encode bucket notification configuration to response", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -171,15 +164,13 @@ func (h *handler) sendNotifications(ctx context.Context, p *SendNotificationPara
|
|||
p.User = bearer.ResolveIssuer(*box.Gate.BearerToken).EncodeToString()
|
||||
}
|
||||
|
||||
p.Time = layer.TimeNow(ctx)
|
||||
|
||||
topics := filterSubjects(conf, p.Event, p.NotificationInfo.Name)
|
||||
|
||||
return h.notificator.SendNotifications(topics, p)
|
||||
}
|
||||
|
||||
// checkBucketConfiguration checks notification configuration and generates an ID for configurations with empty ids.
|
||||
func (h *handler) checkBucketConfiguration(ctx context.Context, conf *data.NotificationConfiguration, r *middleware.ReqInfo) (completed bool, err error) {
|
||||
func (h *handler) checkBucketConfiguration(conf *data.NotificationConfiguration, r *api.ReqInfo) (completed bool, err error) {
|
||||
if conf == nil {
|
||||
return
|
||||
}
|
||||
|
@ -198,11 +189,11 @@ func (h *handler) checkBucketConfiguration(ctx context.Context, conf *data.Notif
|
|||
}
|
||||
|
||||
if h.cfg.NotificatorEnabled {
|
||||
if err = h.notificator.SendTestNotification(q.QueueArn, r.BucketName, r.RequestID, r.Host, layer.TimeNow(ctx)); err != nil {
|
||||
if err = h.notificator.SendTestNotification(q.QueueArn, r.BucketName, r.RequestID, r.Host); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
h.reqLogger(ctx).Warn("failed to send test event because notifications is disabled")
|
||||
h.log.Warn("failed to send test event because notifications is disabled")
|
||||
}
|
||||
|
||||
if q.ID == "" {
|
||||
|
|
|
@ -3,8 +3,8 @@ package handler
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
|
|
@ -6,16 +6,16 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
// ListObjectsV1Handler handles objects listing requests for API version 1.
|
||||
func (h *handler) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
params, err := parseListObjectsArgsV1(reqInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "failed to parse arguments", reqInfo, err)
|
||||
|
@ -33,7 +33,7 @@ func (h *handler) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, encodeV1(params, list)); err != nil {
|
||||
if err = api.EncodeToResponse(w, encodeV1(params, list)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ func encodeV1(p *layer.ListObjectsParamsV1, list *layer.ListObjectsInfoV1) *List
|
|||
|
||||
// ListObjectsV2Handler handles objects listing requests for API version 2.
|
||||
func (h *handler) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
params, err := parseListObjectsArgsV2(reqInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "failed to parse arguments", reqInfo, err)
|
||||
|
@ -77,7 +77,7 @@ func (h *handler) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, encodeV2(params, list)); err != nil {
|
||||
if err = api.EncodeToResponse(w, encodeV2(params, list)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ func encodeV2(p *layer.ListObjectsParamsV2, list *layer.ListObjectsInfoV2) *List
|
|||
return res
|
||||
}
|
||||
|
||||
func parseListObjectsArgsV1(reqInfo *middleware.ReqInfo) (*layer.ListObjectsParamsV1, error) {
|
||||
func parseListObjectsArgsV1(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsV1, error) {
|
||||
var (
|
||||
res layer.ListObjectsParamsV1
|
||||
queryValues = reqInfo.URL.Query()
|
||||
|
@ -120,7 +120,7 @@ func parseListObjectsArgsV1(reqInfo *middleware.ReqInfo) (*layer.ListObjectsPara
|
|||
return &res, nil
|
||||
}
|
||||
|
||||
func parseListObjectsArgsV2(reqInfo *middleware.ReqInfo) (*layer.ListObjectsParamsV2, error) {
|
||||
func parseListObjectsArgsV2(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsV2, error) {
|
||||
var (
|
||||
res layer.ListObjectsParamsV2
|
||||
queryValues = reqInfo.URL.Query()
|
||||
|
@ -142,7 +142,7 @@ func parseListObjectsArgsV2(reqInfo *middleware.ReqInfo) (*layer.ListObjectsPara
|
|||
return &res, nil
|
||||
}
|
||||
|
||||
func parseListObjectArgs(reqInfo *middleware.ReqInfo) (*layer.ListObjectsParamsCommon, error) {
|
||||
func parseListObjectArgs(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsCommon, error) {
|
||||
var (
|
||||
err error
|
||||
res layer.ListObjectsParamsCommon
|
||||
|
@ -211,7 +211,7 @@ func fillContents(src []*data.ObjectInfo, encode string, fetchOwner bool) []Obje
|
|||
}
|
||||
|
||||
func (h *handler) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
p, err := parseListObjectVersionsRequest(reqInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "failed to parse request", reqInfo, err)
|
||||
|
@ -230,12 +230,12 @@ func (h *handler) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http
|
|||
}
|
||||
|
||||
response := encodeListObjectVersionsToResponse(info, p.BktInfo.Name)
|
||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||
if err = api.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func parseListObjectVersionsRequest(reqInfo *middleware.ReqInfo) (*layer.ListObjectVersionsParams, error) {
|
||||
func parseListObjectVersionsRequest(reqInfo *api.ReqInfo) (*layer.ListObjectVersionsParams, error) {
|
||||
var (
|
||||
err error
|
||||
res layer.ListObjectVersionsParams
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"strconv"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
|
|
@ -16,16 +16,15 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/auth"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer/encryption"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/eacl"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -44,13 +43,13 @@ func (p *postPolicy) condition(key string) *policyCondition {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *postPolicy) CheckContentLength(size uint64) bool {
|
||||
func (p *postPolicy) CheckContentLength(size int64) bool {
|
||||
if p.empty {
|
||||
return true
|
||||
}
|
||||
for _, condition := range p.Conditions {
|
||||
if condition.Matching == "content-length-range" {
|
||||
length := strconv.FormatUint(size, 10)
|
||||
length := strconv.FormatInt(size, 10)
|
||||
return condition.Key <= length && length <= condition.Value
|
||||
}
|
||||
}
|
||||
|
@ -180,8 +179,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
newEaclTable *eacl.Table
|
||||
sessionTokenEACL *session.Container
|
||||
containsACL = containsACLHeaders(r)
|
||||
ctx = r.Context()
|
||||
reqInfo = middleware.GetReqInfo(ctx)
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
)
|
||||
|
||||
if containsACL {
|
||||
|
@ -214,57 +212,44 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
metadata[api.Expires] = expires
|
||||
}
|
||||
|
||||
encryptionParams, err := formEncryptionParams(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := h.getBodyReader(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "failed to get body reader", reqInfo, err)
|
||||
return
|
||||
}
|
||||
if encodings := r.Header.Get(api.ContentEncoding); len(encodings) > 0 {
|
||||
metadata[api.ContentEncoding] = encodings
|
||||
}
|
||||
|
||||
var size uint64
|
||||
if r.ContentLength > 0 {
|
||||
size = uint64(r.ContentLength)
|
||||
}
|
||||
|
||||
params := &layer.PutObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Object: reqInfo.ObjectName,
|
||||
Reader: body,
|
||||
Size: size,
|
||||
Header: metadata,
|
||||
Encryption: encryptionParams,
|
||||
}
|
||||
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, bktInfo.LocationConstraint)
|
||||
copiesNumber, err := getCopiesNumberOrDefault(metadata, h.cfg.CopiesNumber)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||
encryption, err := h.formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
params := &layer.PutObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Object: reqInfo.ObjectName,
|
||||
Reader: r.Body,
|
||||
Size: r.ContentLength,
|
||||
Header: metadata,
|
||||
Encryption: encryption,
|
||||
CopiesNumber: copiesNumber,
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
params.Lock, err = formObjectLock(ctx, bktInfo, settings.LockConfiguration, r.Header)
|
||||
params.Lock, err = formObjectLock(bktInfo, settings.LockConfiguration, r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not form object lock", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
extendedObjInfo, err := h.obj.PutObject(ctx, params)
|
||||
extendedObjInfo, err := h.obj.PutObject(r.Context(), params)
|
||||
if err != nil {
|
||||
_, err2 := io.Copy(io.Discard, body)
|
||||
err3 := body.Close()
|
||||
_, err2 := io.Copy(io.Discard, r.Body)
|
||||
err3 := r.Body.Close()
|
||||
h.logAndSendError(w, "could not upload object", reqInfo, err, zap.Errors("body close errors", []error{err2, err3}))
|
||||
return
|
||||
}
|
||||
|
@ -276,8 +261,8 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
|
||||
if containsACL {
|
||||
|
@ -319,66 +304,38 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
if settings.VersioningEnabled() {
|
||||
w.Header().Set(api.AmzVersionID, objInfo.VersionID())
|
||||
}
|
||||
if encryptionParams.Enabled() {
|
||||
if encryption.Enabled() {
|
||||
addSSECHeaders(w.Header(), r.Header)
|
||||
}
|
||||
|
||||
w.Header().Set(api.ETag, objInfo.HashSum)
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
api.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) {
|
||||
if !api.IsSignedStreamingV4(r) {
|
||||
return r.Body, nil
|
||||
func getCopiesNumberOrDefault(metadata map[string]string, defaultCopiesNumber uint32) (uint32, error) {
|
||||
copiesNumberStr, ok := metadata[layer.AttributeNeofsCopiesNumber]
|
||||
if !ok {
|
||||
return defaultCopiesNumber, nil
|
||||
}
|
||||
|
||||
encodings := r.Header.Values(api.ContentEncoding)
|
||||
var chunkedEncoding bool
|
||||
resultContentEncoding := make([]string, 0, len(encodings))
|
||||
for _, enc := range encodings {
|
||||
for _, e := range strings.Split(enc, ",") {
|
||||
e = strings.TrimSpace(e)
|
||||
if e == api.AwsChunked { // probably we should also check position of this header value
|
||||
chunkedEncoding = true
|
||||
} else {
|
||||
resultContentEncoding = append(resultContentEncoding, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
r.Header.Set(api.ContentEncoding, strings.Join(resultContentEncoding, ","))
|
||||
|
||||
if !chunkedEncoding && !h.cfg.Kludge.BypassContentEncodingInChunks() {
|
||||
return nil, fmt.Errorf("%w: request is not chunk encoded, encodings '%s'",
|
||||
errors.GetAPIError(errors.ErrInvalidEncodingMethod), strings.Join(encodings, ","))
|
||||
}
|
||||
|
||||
decodeContentSize := r.Header.Get(api.AmzDecodedContentLength)
|
||||
if len(decodeContentSize) == 0 {
|
||||
return nil, errors.GetAPIError(errors.ErrMissingContentLength)
|
||||
}
|
||||
|
||||
if _, err := strconv.Atoi(decodeContentSize); err != nil {
|
||||
return nil, fmt.Errorf("%w: parse decoded content length: %s", errors.GetAPIError(errors.ErrMissingContentLength), err.Error())
|
||||
}
|
||||
|
||||
chunkReader, err := newSignV4ChunkedReader(r)
|
||||
copiesNumber, err := strconv.ParseUint(copiesNumberStr, 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialize chunk reader: %w", err)
|
||||
return 0, fmt.Errorf("pasrse copies number: %w", err)
|
||||
}
|
||||
|
||||
return chunkReader, nil
|
||||
return uint32(copiesNumber), nil
|
||||
}
|
||||
|
||||
func formEncryptionParams(r *http.Request) (enc encryption.Params, err error) {
|
||||
sseCustomerAlgorithm := r.Header.Get(api.AmzServerSideEncryptionCustomerAlgorithm)
|
||||
sseCustomerKey := r.Header.Get(api.AmzServerSideEncryptionCustomerKey)
|
||||
sseCustomerKeyMD5 := r.Header.Get(api.AmzServerSideEncryptionCustomerKeyMD5)
|
||||
func (h handler) formEncryptionParams(header http.Header) (enc encryption.Params, err error) {
|
||||
sseCustomerAlgorithm := header.Get(api.AmzServerSideEncryptionCustomerAlgorithm)
|
||||
sseCustomerKey := header.Get(api.AmzServerSideEncryptionCustomerKey)
|
||||
sseCustomerKeyMD5 := header.Get(api.AmzServerSideEncryptionCustomerKeyMD5)
|
||||
|
||||
if len(sseCustomerAlgorithm) == 0 && len(sseCustomerKey) == 0 && len(sseCustomerKeyMD5) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if r.TLS == nil {
|
||||
if !h.cfg.TLSEnabled {
|
||||
return enc, errorsStd.New("encryption available only when TLS is enabled")
|
||||
}
|
||||
|
||||
|
@ -418,8 +375,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
newEaclTable *eacl.Table
|
||||
tagSet map[string]string
|
||||
sessionTokenEACL *session.Container
|
||||
ctx = r.Context()
|
||||
reqInfo = middleware.GetReqInfo(ctx)
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
metadata = make(map[string]string)
|
||||
containsACL = containsACLHeaders(r)
|
||||
)
|
||||
|
@ -440,17 +396,17 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if containsACL {
|
||||
if sessionTokenEACL, err = getSessionTokenSetEACL(ctx); err != nil {
|
||||
if sessionTokenEACL, err = getSessionTokenSetEACL(r.Context()); err != nil {
|
||||
h.logAndSendError(w, "could not get eacl session token from a box", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var contentReader io.Reader
|
||||
var size uint64
|
||||
var size int64
|
||||
if content, ok := r.MultipartForm.Value["file"]; ok {
|
||||
contentReader = bytes.NewBufferString(content[0])
|
||||
size = uint64(len(content[0]))
|
||||
size = int64(len(content[0]))
|
||||
} else {
|
||||
file, head, err := r.FormFile("file")
|
||||
if err != nil {
|
||||
|
@ -458,7 +414,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
contentReader = file
|
||||
size = uint64(head.Size)
|
||||
size = head.Size
|
||||
reqInfo.ObjectName = strings.ReplaceAll(reqInfo.ObjectName, "${filename}", head.Filename)
|
||||
}
|
||||
if !policy.CheckContentLength(size) {
|
||||
|
@ -466,7 +422,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.obj.GetBucketInfo(ctx, reqInfo.BucketName)
|
||||
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
|
@ -480,7 +436,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
Header: metadata,
|
||||
}
|
||||
|
||||
extendedObjInfo, err := h.obj.PutObject(ctx, params)
|
||||
extendedObjInfo, err := h.obj.PutObject(r.Context(), params)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not upload object", reqInfo, err)
|
||||
return
|
||||
|
@ -493,8 +449,8 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
|
||||
if acl := auth.MultipartFormValue(r, "acl"); acl != "" {
|
||||
|
@ -519,7 +475,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
NodeVersion: extendedObjInfo.NodeVersion,
|
||||
}
|
||||
|
||||
if _, err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
||||
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
|
||||
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -532,14 +488,14 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
SessionToken: sessionTokenEACL,
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketACL(ctx, p); err != nil {
|
||||
if err = h.obj.PutBucketACL(r.Context(), p); err != nil {
|
||||
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if settings, err := h.obj.GetBucketSettings(ctx, bktInfo); err != nil {
|
||||
h.reqLogger(ctx).Warn("couldn't get bucket versioning", zap.String("bucket name", reqInfo.BucketName), zap.Error(err))
|
||||
if settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo); err != nil {
|
||||
h.log.Warn("couldn't get bucket versioning", zap.String("bucket name", reqInfo.BucketName), zap.Error(err))
|
||||
} else if settings.VersioningEnabled() {
|
||||
w.Header().Set(api.AmzVersionID, objInfo.VersionID())
|
||||
}
|
||||
|
@ -561,7 +517,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
ETag: objInfo.HashSum,
|
||||
}
|
||||
w.WriteHeader(status)
|
||||
if _, err = w.Write(middleware.EncodeResponse(resp)); err != nil {
|
||||
if _, err = w.Write(api.EncodeResponse(resp)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
return
|
||||
|
@ -572,7 +528,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
w.WriteHeader(status)
|
||||
}
|
||||
|
||||
func checkPostPolicy(r *http.Request, reqInfo *middleware.ReqInfo, metadata map[string]string) (*postPolicy, error) {
|
||||
func checkPostPolicy(r *http.Request, reqInfo *api.ReqInfo, metadata map[string]string) (*postPolicy, error) {
|
||||
policy := &postPolicy{empty: true}
|
||||
if policyStr := auth.MultipartFormValue(r, "policy"); policyStr != "" {
|
||||
policyData, err := base64.StdEncoding.DecodeString(policyStr)
|
||||
|
@ -711,18 +667,19 @@ func parseMetadata(r *http.Request) map[string]string {
|
|||
}
|
||||
|
||||
func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
p := &layer.CreateBucketParams{
|
||||
Name: reqInfo.BucketName,
|
||||
}
|
||||
var (
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
p = layer.CreateBucketParams{
|
||||
Name: reqInfo.BucketName,
|
||||
}
|
||||
)
|
||||
|
||||
if err := checkBucketName(reqInfo.BucketName); err != nil {
|
||||
h.logAndSendError(w, "invalid bucket name", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
key, err := h.bearerTokenIssuerKey(ctx)
|
||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get bearer token signature key", reqInfo, err)
|
||||
return
|
||||
|
@ -748,7 +705,7 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
var policies []*accessbox.ContainerPolicy
|
||||
boxData, err := layer.GetBoxData(ctx)
|
||||
boxData, err := layer.GetBoxData(r.Context())
|
||||
if err == nil {
|
||||
policies = boxData.Policies
|
||||
p.SessionContainerCreation = boxData.Gate.SessionTokenForPut()
|
||||
|
@ -765,57 +722,44 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = h.setPolicy(p, createParams.LocationConstraint, policies); err != nil {
|
||||
h.logAndSendError(w, "couldn't set placement policy", reqInfo, err)
|
||||
return
|
||||
useDefaultPolicy := true
|
||||
if createParams.LocationConstraint != "" {
|
||||
for _, placementPolicy := range policies {
|
||||
if placementPolicy.LocationConstraint == createParams.LocationConstraint {
|
||||
p.Policy = placementPolicy.Policy
|
||||
p.LocationConstraint = createParams.LocationConstraint
|
||||
useDefaultPolicy = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if useDefaultPolicy {
|
||||
p.Policy = h.cfg.DefaultPolicy
|
||||
}
|
||||
|
||||
p.ObjectLockEnabled = isLockEnabled(r.Header)
|
||||
|
||||
bktInfo, err := h.obj.CreateBucket(ctx, p)
|
||||
bktInfo, err := h.obj.CreateBucket(r.Context(), &p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not create bucket", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
h.reqLogger(ctx).Info("bucket is created", zap.Stringer("container_id", bktInfo.CID))
|
||||
|
||||
if p.ObjectLockEnabled {
|
||||
sp := &layer.PutSettingsParams{
|
||||
BktInfo: bktInfo,
|
||||
Settings: &data.BucketSettings{Versioning: data.VersioningEnabled},
|
||||
}
|
||||
if err = h.obj.PutBucketSettings(ctx, sp); err != nil {
|
||||
if err = h.obj.PutBucketSettings(r.Context(), sp); err != nil {
|
||||
h.logAndSendError(w, "couldn't enable bucket versioning", reqInfo, err,
|
||||
zap.String("container_id", bktInfo.CID.EncodeToString()))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
h.log.Info("bucket is created", zap.Stringer("container_id", bktInfo.CID))
|
||||
|
||||
func (h handler) setPolicy(prm *layer.CreateBucketParams, locationConstraint string, userPolicies []*accessbox.ContainerPolicy) error {
|
||||
prm.Policy = h.cfg.Policy.DefaultPlacementPolicy()
|
||||
prm.LocationConstraint = locationConstraint
|
||||
|
||||
if locationConstraint == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, placementPolicy := range userPolicies {
|
||||
if placementPolicy.LocationConstraint == locationConstraint {
|
||||
prm.Policy = placementPolicy.Policy
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if policy, ok := h.cfg.Policy.PlacementPolicy(locationConstraint); ok {
|
||||
prm.Policy = policy
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.GetAPIError(errors.ErrInvalidLocationConstraint)
|
||||
api.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func isLockEnabled(header http.Header) bool {
|
||||
|
|
|
@ -1,26 +1,15 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -111,7 +100,7 @@ func TestEmptyPostPolicy(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
reqInfo := &middleware.ReqInfo{}
|
||||
reqInfo := &api.ReqInfo{}
|
||||
metadata := make(map[string]string)
|
||||
|
||||
_, err := checkPostPolicy(r, reqInfo, metadata)
|
||||
|
@ -125,7 +114,7 @@ func TestPutObjectOverrideCopiesNumber(t *testing.T) {
|
|||
bktInfo := createTestBucket(tc, bktName)
|
||||
|
||||
w, r := prepareTestRequest(tc, bktName, objName, nil)
|
||||
r.Header.Set(api.MetadataPrefix+strings.ToUpper(layer.AttributeFrostfsCopiesNumber), "1")
|
||||
r.Header.Set(api.MetadataPrefix+strings.ToUpper(layer.AttributeNeofsCopiesNumber), "1")
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
|
||||
p := &layer.HeadObjectParams{
|
||||
|
@ -135,156 +124,5 @@ func TestPutObjectOverrideCopiesNumber(t *testing.T) {
|
|||
|
||||
objInfo, err := tc.Layer().GetObjectInfo(tc.Context(), p)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "1", objInfo.Headers[layer.AttributeFrostfsCopiesNumber])
|
||||
}
|
||||
|
||||
func TestPutObjectWithNegativeContentLength(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-put", "object-for-put"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
content := []byte("content")
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
r.ContentLength = -1
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, r = prepareTestRequest(tc, bktName, objName, nil)
|
||||
tc.Handler().HeadObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
require.Equal(t, strconv.Itoa(len(content)), w.Header().Get(api.ContentLength))
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamBodyError(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-put", "object-for-put"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
content := []byte("content")
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
r.Header.Set(api.AmzContentSha256, api.StreamingContentSHA256)
|
||||
r.Header.Set(api.ContentEncoding, api.AwsChunked)
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrMissingContentLength))
|
||||
|
||||
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamBodyAWSExample(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "examplebucket", "chunkObject.txt"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
w, req, chunk := getChunkedRequest(hc.context, t, bktName, objName)
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
data := getObjectRange(t, hc, bktName, objName, 0, 66824)
|
||||
for i := range chunk {
|
||||
require.Equal(t, chunk[i], data[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutChunkedTestContentEncoding(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "examplebucket", "chunkObject.txt"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
w, req, _ := getChunkedRequest(hc.context, t, bktName, objName)
|
||||
req.Header.Set(api.ContentEncoding, api.AwsChunked+",gzip")
|
||||
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
resp := headObjectBase(hc, bktName, objName, emptyVersion)
|
||||
require.Equal(t, "gzip", resp.Header().Get(api.ContentEncoding))
|
||||
|
||||
w, req, _ = getChunkedRequest(hc.context, t, bktName, objName)
|
||||
req.Header.Set(api.ContentEncoding, "gzip")
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrInvalidEncodingMethod))
|
||||
|
||||
hc.kludge.bypassContentEncodingInChunks = true
|
||||
w, req, _ = getChunkedRequest(hc.context, t, bktName, objName)
|
||||
req.Header.Set(api.ContentEncoding, "gzip")
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
resp = headObjectBase(hc, bktName, objName, emptyVersion)
|
||||
require.Equal(t, "gzip", resp.Header().Get(api.ContentEncoding))
|
||||
}
|
||||
|
||||
func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) {
|
||||
chunk := make([]byte, 65*1024)
|
||||
for i := range chunk {
|
||||
chunk[i] = 'a'
|
||||
}
|
||||
chunk1 := chunk[:64*1024]
|
||||
chunk2 := chunk[64*1024:]
|
||||
|
||||
AWSAccessKeyID := "AKIAIOSFODNN7EXAMPLE"
|
||||
AWSSecretAccessKey := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||
|
||||
awsCreds := credentials.NewStaticCredentials(AWSAccessKeyID, AWSSecretAccessKey, "")
|
||||
signer := v4.NewSigner(awsCreds)
|
||||
|
||||
reqBody := bytes.NewBufferString("10000;chunk-signature=ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648\r\n")
|
||||
_, err := reqBody.Write(chunk1)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n400;chunk-signature=0055627c9e194cb4542bae2aa5492e3c1575bbb81b612b7d234b86a503ef5497\r\n")
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.Write(chunk2)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0;chunk-signature=b6c6ea8a5354eaf15b3cb7646744f4275b71ea724fed81ceb9323e279d449df9\r\n\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("PUT", "https://s3.amazonaws.com/"+bktName+"/"+objName, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("content-encoding", "aws-chunked")
|
||||
req.Header.Set("content-length", "66824")
|
||||
req.Header.Set("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
|
||||
req.Header.Set("x-amz-decoded-content-length", "66560")
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
|
||||
signTime, err := time.Parse("20060102T150405Z", "20130524T000000Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = signer.Sign(req, nil, "s3", "us-east-1", signTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Body = io.NopCloser(reqBody)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName})
|
||||
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
req = req.WithContext(context.WithValue(req.Context(), middleware.ClientTime, signTime))
|
||||
req = req.WithContext(context.WithValue(req.Context(), middleware.AuthHeaders, &auth.AuthHeader{
|
||||
AccessKeyID: AWSAccessKeyID,
|
||||
SignatureV4: "4f232c4386841ef735655705268965c44a0e4690baa4adea153f7db9fa80a0a9",
|
||||
Service: "s3",
|
||||
Region: "us-east-1",
|
||||
}))
|
||||
req = req.WithContext(context.WithValue(req.Context(), middleware.BoxData, &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
AccessKey: AWSSecretAccessKey,
|
||||
},
|
||||
}))
|
||||
|
||||
return w, req, chunk
|
||||
}
|
||||
|
||||
func TestCreateBucket(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName := "bkt-name"
|
||||
|
||||
box, _ := createAccessBox(t)
|
||||
createBucket(t, hc, bktName, box)
|
||||
createBucketAssertS3Error(hc, bktName, box, s3errors.ErrBucketAlreadyOwnedByYou)
|
||||
|
||||
box2, _ := createAccessBox(t)
|
||||
createBucketAssertS3Error(hc, bktName, box2, s3errors.ErrBucketAlreadyExists)
|
||||
require.Equal(t, "1", objInfo.Headers[layer.AttributeNeofsCopiesNumber])
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ type Object struct {
|
|||
Key string
|
||||
LastModified string // time string of format "2006-01-02T15:04:05.000Z"
|
||||
ETag string `xml:"ETag,omitempty"`
|
||||
Size uint64
|
||||
Size int64
|
||||
|
||||
// Owner of the object.
|
||||
Owner *Owner `xml:"Owner,omitempty"`
|
||||
|
@ -120,7 +120,7 @@ type ObjectVersionResponse struct {
|
|||
Key string `xml:"Key"`
|
||||
LastModified string `xml:"LastModified"`
|
||||
Owner Owner `xml:"Owner"`
|
||||
Size uint64 `xml:"Size"`
|
||||
Size int64 `xml:"Size"`
|
||||
StorageClass string `xml:"StorageClass,omitempty"` // is empty!!
|
||||
VersionID string `xml:"VersionId"`
|
||||
}
|
||||
|
|
|
@ -1,224 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
errs "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
)
|
||||
|
||||
const (
|
||||
chunkSignatureHeader = "chunk-signature="
|
||||
maxChunkSize = 16 << 20
|
||||
)
|
||||
|
||||
type (
|
||||
s3ChunkReader struct {
|
||||
reader *bufio.Reader
|
||||
streamSigner *v4.StreamSigner
|
||||
|
||||
requestTime time.Time
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
errGiantChunk = errors.New("chunk too big: choose chunk size <= 16MiB")
|
||||
errMalformedChunkedEncoding = errors.New("malformed chunked encoding")
|
||||
)
|
||||
|
||||
func (c *s3ChunkReader) Close() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3ChunkReader) Read(buf []byte) (num int, err error) {
|
||||
if c.offset > 0 {
|
||||
num = copy(buf, c.buffer[c.offset:])
|
||||
if num == len(buf) {
|
||||
c.offset += num
|
||||
return num, nil
|
||||
}
|
||||
c.offset = 0
|
||||
buf = buf[num:]
|
||||
}
|
||||
|
||||
var size int
|
||||
for {
|
||||
b, err := c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b == ';' { // separating character
|
||||
break
|
||||
}
|
||||
|
||||
// Manually deserialize the size since AWS specified
|
||||
// the chunk size to be of variable width. In particular,
|
||||
// a size of 16 is encoded as `10` while a size of 64 KB
|
||||
// is `10000`.
|
||||
switch {
|
||||
case b >= '0' && b <= '9':
|
||||
size = size<<4 | int(b-'0')
|
||||
case b >= 'a' && b <= 'f':
|
||||
size = size<<4 | int(b-('a'-10))
|
||||
case b >= 'A' && b <= 'F':
|
||||
size = size<<4 | int(b-('A'-10))
|
||||
default:
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
if size > maxChunkSize {
|
||||
c.err = errGiantChunk
|
||||
return num, c.err
|
||||
}
|
||||
}
|
||||
|
||||
// Now, we read the signature of the following payload and expect:
|
||||
// chunk-signature=" + <signature-as-hex> + "\r\n"
|
||||
//
|
||||
// The signature is 64 bytes long (hex-encoded SHA256 hash) and
|
||||
// starts with a 16 byte header: len("chunk-signature=") + 64 == 80.
|
||||
var signature [80]byte
|
||||
_, err = io.ReadFull(c.reader, signature[:])
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if !bytes.HasPrefix(signature[:], []byte(chunkSignatureHeader)) {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err := c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\r' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if cap(c.buffer) < size {
|
||||
c.buffer = make([]byte, size)
|
||||
} else {
|
||||
c.buffer = c.buffer[:size]
|
||||
}
|
||||
|
||||
// Now, we read the payload and compute its SHA-256 hash.
|
||||
_, err = io.ReadFull(c.reader, c.buffer)
|
||||
if err == io.EOF && size != 0 {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if b != '\r' || err != nil {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
// Once we have read the entire chunk successfully, we verify
|
||||
// that the received signature matches our computed signature.
|
||||
|
||||
calculatedSignature, err := c.streamSigner.GetSignature(nil, c.buffer, c.requestTime)
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if string(signature[16:]) != hex.EncodeToString(calculatedSignature) {
|
||||
c.err = errs.GetAPIError(errs.ErrSignatureDoesNotMatch)
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
// If the chunk size is zero we return io.EOF. As specified by AWS,
|
||||
// only the last chunk is zero-sized.
|
||||
if size == 0 {
|
||||
c.err = io.EOF
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
c.offset = copy(buf, c.buffer)
|
||||
num += c.offset
|
||||
return num, err
|
||||
}
|
||||
|
||||
func newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, error) {
|
||||
// Expecting to refactor this in future:
|
||||
// https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/issues/137
|
||||
box, ok := req.Context().Value(middleware.BoxData).(*accessbox.Box)
|
||||
if !ok {
|
||||
return nil, errs.GetAPIError(errs.ErrAuthorizationHeaderMalformed)
|
||||
}
|
||||
|
||||
authHeaders, ok := req.Context().Value(middleware.AuthHeaders).(*auth.AuthHeader)
|
||||
if !ok {
|
||||
return nil, errs.GetAPIError(errs.ErrAuthorizationHeaderMalformed)
|
||||
}
|
||||
|
||||
currentCredentials := credentials.NewStaticCredentials(authHeaders.AccessKeyID, box.Gate.AccessKey, "")
|
||||
seed, err := hex.DecodeString(authHeaders.SignatureV4)
|
||||
if err != nil {
|
||||
return nil, errs.GetAPIError(errs.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
|
||||
reqTime, ok := req.Context().Value(middleware.ClientTime).(time.Time)
|
||||
if !ok {
|
||||
return nil, errs.GetAPIError(errs.ErrMalformedDate)
|
||||
}
|
||||
newStreamSigner := v4.NewStreamSigner(authHeaders.Region, "s3", seed, currentCredentials)
|
||||
|
||||
return &s3ChunkReader{
|
||||
reader: bufio.NewReader(req.Body),
|
||||
streamSigner: newStreamSigner,
|
||||
requestTime: reqTime,
|
||||
buffer: make([]byte, 64*1024),
|
||||
}, nil
|
||||
}
|
|
@ -8,11 +8,10 @@ import (
|
|||
"strings"
|
||||
"unicode"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -25,8 +24,7 @@ const (
|
|||
)
|
||||
|
||||
func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
tagSet, err := readTagSet(r.Body)
|
||||
if err != nil {
|
||||
|
@ -48,7 +46,7 @@ func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request
|
|||
},
|
||||
TagSet: tagSet,
|
||||
}
|
||||
nodeVersion, err := h.obj.PutObjectTagging(ctx, tagPrm)
|
||||
nodeVersion, err := h.obj.PutObjectTagging(r.Context(), tagPrm)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not put object tagging", reqInfo, err)
|
||||
return
|
||||
|
@ -65,15 +63,15 @@ func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *handler) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -104,14 +102,13 @@ func (h *handler) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request
|
|||
if settings.VersioningEnabled() {
|
||||
w.Header().Set(api.AmzVersionID, versionID)
|
||||
}
|
||||
if err = middleware.EncodeToResponse(w, encodeTagging(tagSet)); err != nil {
|
||||
if err = api.EncodeToResponse(w, encodeTagging(tagSet)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -125,7 +122,7 @@ func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Requ
|
|||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||
}
|
||||
|
||||
nodeVersion, err := h.obj.DeleteObjectTagging(ctx, p)
|
||||
nodeVersion, err := h.obj.DeleteObjectTagging(r.Context(), p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not delete object tagging", reqInfo, err)
|
||||
return
|
||||
|
@ -142,15 +139,15 @@ func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Requ
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (h *handler) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
tagSet, err := readTagSet(r.Body)
|
||||
if err != nil {
|
||||
|
@ -171,7 +168,7 @@ func (h *handler) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request
|
|||
}
|
||||
|
||||
func (h *handler) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -185,14 +182,14 @@ func (h *handler) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request
|
|||
return
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, encodeTagging(tagSet)); err != nil {
|
||||
if err = api.EncodeToResponse(w, encodeTagging(tagSet)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
|
|
@ -3,58 +3,58 @@ package handler
|
|||
import (
|
||||
"net/http"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
)
|
||||
|
||||
func (h *handler) SelectObjectContentHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketAccelerateHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketRequestPaymentHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketLoggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketReplicationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) ListObjectsV2MHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
|
|
@ -2,31 +2,21 @@ package handler
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
errorsStd "errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
frosterrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (h *handler) reqLogger(ctx context.Context) *zap.Logger {
|
||||
reqLogger := middleware.GetReqLog(ctx)
|
||||
if reqLogger != nil {
|
||||
return reqLogger
|
||||
}
|
||||
return h.log
|
||||
}
|
||||
|
||||
func (h *handler) logAndSendError(w http.ResponseWriter, logText string, reqInfo *middleware.ReqInfo, err error, additional ...zap.Field) {
|
||||
code := middleware.WriteErrorResponse(w, reqInfo, transformToS3Error(err))
|
||||
func (h *handler) logAndSendError(w http.ResponseWriter, logText string, reqInfo *api.ReqInfo, err error, additional ...zap.Field) {
|
||||
code := api.WriteErrorResponse(w, reqInfo, transformToS3Error(err))
|
||||
fields := []zap.Field{
|
||||
zap.Int("status", code),
|
||||
zap.String("request_id", reqInfo.RequestID),
|
||||
|
@ -36,42 +26,20 @@ func (h *handler) logAndSendError(w http.ResponseWriter, logText string, reqInfo
|
|||
zap.String("description", logText),
|
||||
zap.Error(err)}
|
||||
fields = append(fields, additional...)
|
||||
h.log.Error("request failed", fields...) // consider using h.reqLogger (it requires accept context.Context or http.Request)
|
||||
}
|
||||
|
||||
func (h *handler) logAndSendErrorNoHeader(w http.ResponseWriter, logText string, reqInfo *middleware.ReqInfo, err error, additional ...zap.Field) {
|
||||
middleware.WriteErrorResponseNoHeader(w, reqInfo, transformToS3Error(err))
|
||||
fields := []zap.Field{
|
||||
zap.String("request_id", reqInfo.RequestID),
|
||||
zap.String("method", reqInfo.API),
|
||||
zap.String("bucket", reqInfo.BucketName),
|
||||
zap.String("object", reqInfo.ObjectName),
|
||||
zap.String("description", logText),
|
||||
zap.Error(err)}
|
||||
fields = append(fields, additional...)
|
||||
h.log.Error("request failed", fields...) // consider using h.reqLogger (it requires accept context.Context or http.Request)
|
||||
h.log.Error("call method", fields...)
|
||||
}
|
||||
|
||||
func transformToS3Error(err error) error {
|
||||
err = frosterrors.UnwrapErr(err) // this wouldn't work with errors.Join
|
||||
if _, ok := err.(s3errors.Error); ok {
|
||||
if _, ok := err.(errors.Error); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
if errors.Is(err, layer.ErrAccessDenied) ||
|
||||
errors.Is(err, layer.ErrNodeAccessDenied) {
|
||||
return s3errors.GetAPIError(s3errors.ErrAccessDenied)
|
||||
if errorsStd.Is(err, layer.ErrAccessDenied) ||
|
||||
errorsStd.Is(err, layer.ErrNodeAccessDenied) {
|
||||
return errors.GetAPIError(errors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
if errors.Is(err, layer.ErrGatewayTimeout) {
|
||||
return s3errors.GetAPIError(s3errors.ErrGatewayTimeout)
|
||||
}
|
||||
|
||||
return s3errors.GetAPIError(s3errors.ErrInternalError)
|
||||
}
|
||||
|
||||
func (h *handler) ResolveBucket(ctx context.Context, bucket string) (*data.BucketInfo, error) {
|
||||
return h.obj.GetBucketInfo(ctx, bucket)
|
||||
return errors.GetAPIError(errors.ErrInternalError)
|
||||
}
|
||||
|
||||
func (h *handler) getBucketAndCheckOwner(r *http.Request, bucket string, header ...string) (*data.BucketInfo, error) {
|
||||
|
@ -102,26 +70,26 @@ func parseRange(s string) (*layer.RangeParams, error) {
|
|||
prefix := "bytes="
|
||||
|
||||
if !strings.HasPrefix(s, prefix) {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
||||
}
|
||||
|
||||
s = strings.TrimPrefix(s, prefix)
|
||||
|
||||
valuesStr := strings.Split(s, "-")
|
||||
if len(valuesStr) != 2 {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
||||
}
|
||||
|
||||
values := make([]uint64, 0, len(valuesStr))
|
||||
for _, v := range valuesStr {
|
||||
num, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
||||
}
|
||||
values = append(values, num)
|
||||
}
|
||||
if values[0] > values[1] {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
||||
}
|
||||
|
||||
return &layer.RangeParams{
|
||||
|
@ -137,7 +105,7 @@ func getSessionTokenSetEACL(ctx context.Context) (*session.Container, error) {
|
|||
}
|
||||
sessionToken := boxData.Gate.SessionTokenForSetEACL()
|
||||
if sessionToken == nil {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrAccessDenied)
|
||||
return nil, errors.GetAPIError(errors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
return sessionToken, nil
|
||||
|
|
|
@ -1,64 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTransformS3Errors(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
err error
|
||||
expected s3errors.ErrorCode
|
||||
}{
|
||||
{
|
||||
name: "simple std error to internal error",
|
||||
err: errors.New("some error"),
|
||||
expected: s3errors.ErrInternalError,
|
||||
},
|
||||
{
|
||||
name: "layer access denied error to s3 access denied error",
|
||||
err: layer.ErrAccessDenied,
|
||||
expected: s3errors.ErrAccessDenied,
|
||||
},
|
||||
{
|
||||
name: "wrapped layer access denied error to s3 access denied error",
|
||||
err: fmt.Errorf("wrap: %w", layer.ErrAccessDenied),
|
||||
expected: s3errors.ErrAccessDenied,
|
||||
},
|
||||
{
|
||||
name: "layer node access denied error to s3 access denied error",
|
||||
err: layer.ErrNodeAccessDenied,
|
||||
expected: s3errors.ErrAccessDenied,
|
||||
},
|
||||
{
|
||||
name: "layer gateway timeout error to s3 gateway timeout error",
|
||||
err: layer.ErrGatewayTimeout,
|
||||
expected: s3errors.ErrGatewayTimeout,
|
||||
},
|
||||
{
|
||||
name: "s3 error to s3 error",
|
||||
err: s3errors.GetAPIError(s3errors.ErrInvalidPart),
|
||||
expected: s3errors.ErrInvalidPart,
|
||||
},
|
||||
{
|
||||
name: "wrapped s3 error to s3 error",
|
||||
err: fmt.Errorf("wrap: %w", s3errors.GetAPIError(s3errors.ErrInvalidPart)),
|
||||
expected: s3errors.ErrInvalidPart,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := transformToS3Error(tc.err)
|
||||
s3err, ok := err.(s3errors.Error)
|
||||
require.True(t, ok, "error must be s3 error")
|
||||
require.Equalf(t, tc.expected, s3err.ErrCode,
|
||||
"expected: '%s', got: '%s'",
|
||||
s3errors.GetAPIError(tc.expected).Code, s3errors.GetAPIError(s3err.ErrCode).Code)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -4,14 +4,14 @@ import (
|
|||
"encoding/xml"
|
||||
"net/http"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
)
|
||||
|
||||
func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
configuration := new(VersioningConfiguration)
|
||||
if err := xml.NewDecoder(r.Body).Decode(configuration); err != nil {
|
||||
|
@ -57,7 +57,7 @@ func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
// GetBucketVersioningHandler implements bucket versioning getter handler.
|
||||
func (h *handler) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -71,7 +71,7 @@ func (h *handler) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, formVersioningConfiguration(settings)); err != nil {
|
||||
if err = api.EncodeToResponse(w, formVersioningConfiguration(settings)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,20 +1,18 @@
|
|||
package api
|
||||
|
||||
import "net/http"
|
||||
|
||||
// Standard S3 HTTP request/response constants.
|
||||
const (
|
||||
MetadataPrefix = "X-Amz-Meta-"
|
||||
FrostFSSystemMetadataPrefix = "S3-"
|
||||
AmzMetadataDirective = "X-Amz-Metadata-Directive"
|
||||
AmzTaggingDirective = "X-Amz-Tagging-Directive"
|
||||
AmzVersionID = "X-Amz-Version-Id"
|
||||
AmzTaggingCount = "X-Amz-Tagging-Count"
|
||||
AmzTagging = "X-Amz-Tagging"
|
||||
AmzDeleteMarker = "X-Amz-Delete-Marker"
|
||||
AmzCopySource = "X-Amz-Copy-Source"
|
||||
AmzCopySourceRange = "X-Amz-Copy-Source-Range"
|
||||
AmzDate = "X-Amz-Date"
|
||||
MetadataPrefix = "X-Amz-Meta-"
|
||||
NeoFSSystemMetadataPrefix = "S3-"
|
||||
AmzMetadataDirective = "X-Amz-Metadata-Directive"
|
||||
AmzTaggingDirective = "X-Amz-Tagging-Directive"
|
||||
AmzVersionID = "X-Amz-Version-Id"
|
||||
AmzTaggingCount = "X-Amz-Tagging-Count"
|
||||
AmzTagging = "X-Amz-Tagging"
|
||||
AmzDeleteMarker = "X-Amz-Delete-Marker"
|
||||
AmzCopySource = "X-Amz-Copy-Source"
|
||||
AmzCopySourceRange = "X-Amz-Copy-Source-Range"
|
||||
AmzDate = "X-Amz-Date"
|
||||
|
||||
LastModified = "Last-Modified"
|
||||
Date = "Date"
|
||||
|
@ -41,13 +39,11 @@ const (
|
|||
IfMatch = "If-Match"
|
||||
IfNoneMatch = "If-None-Match"
|
||||
|
||||
AmzContentSha256 = "X-Amz-Content-Sha256"
|
||||
AmzCopyIfModifiedSince = "X-Amz-Copy-Source-If-Modified-Since"
|
||||
AmzCopyIfUnmodifiedSince = "X-Amz-Copy-Source-If-Unmodified-Since"
|
||||
AmzCopyIfMatch = "X-Amz-Copy-Source-If-Match"
|
||||
AmzCopyIfNoneMatch = "X-Amz-Copy-Source-If-None-Match"
|
||||
AmzACL = "X-Amz-Acl"
|
||||
AmzDecodedContentLength = "X-Amz-Decoded-Content-Length"
|
||||
AmzGrantFullControl = "X-Amz-Grant-Full-Control"
|
||||
AmzGrantRead = "X-Amz-Grant-Read"
|
||||
AmzGrantWrite = "X-Amz-Grant-Write"
|
||||
|
@ -66,10 +62,7 @@ const (
|
|||
AmzServerSideEncryptionCustomerKey = "x-amz-server-side-encryption-customer-key"
|
||||
AmzServerSideEncryptionCustomerKeyMD5 = "x-amz-server-side-encryption-customer-key-MD5"
|
||||
|
||||
OwnerID = "X-Owner-Id"
|
||||
ContainerID = "X-Container-Id"
|
||||
ContainerName = "X-Container-Name"
|
||||
ContainerZone = "X-Container-Zone"
|
||||
ContainerID = "X-Container-Id"
|
||||
|
||||
AccessControlAllowOrigin = "Access-Control-Allow-Origin"
|
||||
AccessControlAllowMethods = "Access-Control-Allow-Methods"
|
||||
|
@ -82,13 +75,9 @@ const (
|
|||
AccessControlRequestMethod = "Access-Control-Request-Method"
|
||||
AccessControlRequestHeaders = "Access-Control-Request-Headers"
|
||||
|
||||
AwsChunked = "aws-chunked"
|
||||
|
||||
Vary = "Vary"
|
||||
|
||||
DefaultLocationConstraint = "default"
|
||||
|
||||
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
)
|
||||
|
||||
// S3 request query params.
|
||||
|
@ -115,8 +104,3 @@ var SystemMetadata = map[string]struct{}{
|
|||
LastModified: {},
|
||||
ETag: {},
|
||||
}
|
||||
|
||||
func IsSignedStreamingV4(r *http.Request) bool {
|
||||
return r.Header.Get(AmzContentSha256) == StreamingContentSHA256 &&
|
||||
r.Method == http.MethodPut
|
||||
}
|
||||
|
|
|
@ -1,71 +0,0 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
type HostBucketRouter struct {
|
||||
routes map[string]chi.Router
|
||||
bktParam string
|
||||
defaultRouter chi.Router
|
||||
}
|
||||
|
||||
func NewHostBucketRouter(bktParam string) HostBucketRouter {
|
||||
return HostBucketRouter{
|
||||
routes: make(map[string]chi.Router),
|
||||
bktParam: bktParam,
|
||||
}
|
||||
}
|
||||
|
||||
func (hr *HostBucketRouter) Default(router chi.Router) {
|
||||
hr.defaultRouter = router
|
||||
}
|
||||
|
||||
func (hr HostBucketRouter) Map(host string, h chi.Router) {
|
||||
hr.routes[strings.ToLower(host)] = h
|
||||
}
|
||||
|
||||
func (hr HostBucketRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
bucket, domain := getBucketDomain(getHost(r))
|
||||
router, ok := hr.routes[strings.ToLower(domain)]
|
||||
if !ok {
|
||||
router = hr.defaultRouter
|
||||
if router == nil {
|
||||
http.Error(w, http.StatusText(404), 404)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if rctx := chi.RouteContext(r.Context()); rctx != nil && bucket != "" {
|
||||
rctx.URLParams.Add(hr.bktParam, bucket)
|
||||
}
|
||||
|
||||
router.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func getBucketDomain(host string) (bucket string, domain string) {
|
||||
parts := strings.Split(host, ".")
|
||||
if len(parts) > 1 {
|
||||
return parts[0], strings.Join(parts[1:], ".")
|
||||
}
|
||||
return "", host
|
||||
}
|
||||
|
||||
// getHost tries its best to return the request host.
|
||||
// According to section 14.23 of RFC 2616 the Host header
|
||||
// can include the port number if the default value of 80 is not used.
|
||||
func getHost(r *http.Request) string {
|
||||
host := r.Host
|
||||
if r.URL.IsAbs() {
|
||||
host = r.URL.Host
|
||||
}
|
||||
|
||||
if i := strings.Index(host, ":"); i != -1 {
|
||||
host = host[:i]
|
||||
}
|
||||
|
||||
return host
|
||||
}
|
|
@ -1,11 +1,11 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/cache"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
|
|
@ -2,11 +2,10 @@ package layer
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
errorsStd "errors"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
)
|
||||
|
||||
func (n *layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *ObjectVersion, nodeVersion *data.NodeVersion) (map[string]string, *data.LockInfo, error) {
|
||||
|
@ -29,8 +28,8 @@ func (n *layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *ObjectV
|
|||
|
||||
tags, lockInfo, err = n.treeService.GetObjectTaggingAndLock(ctx, objVersion.BktInfo, nodeVersion)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
||||
return nil, nil, errors.GetAPIError(errors.ErrNoSuchKey)
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
|
@ -4,16 +4,16 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/client"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/container"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/eacl"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -34,19 +34,22 @@ func (n *layer) containerInfo(ctx context.Context, idCnr cid.ID) (*data.BucketIn
|
|||
var (
|
||||
err error
|
||||
res *container.Container
|
||||
log = n.reqLogger(ctx).With(zap.Stringer("cid", idCnr))
|
||||
rid = api.GetRequestID(ctx)
|
||||
log = n.log.With(zap.Stringer("cid", idCnr), zap.String("request_id", rid))
|
||||
|
||||
info = &data.BucketInfo{
|
||||
CID: idCnr,
|
||||
Name: idCnr.EncodeToString(),
|
||||
}
|
||||
)
|
||||
res, err = n.frostFS.Container(ctx, idCnr)
|
||||
res, err = n.neoFS.Container(ctx, idCnr)
|
||||
if err != nil {
|
||||
log.Error("could not fetch container", zap.Error(err))
|
||||
|
||||
if client.IsErrContainerNotFound(err) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchBucket), err.Error())
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchBucket)
|
||||
}
|
||||
return nil, fmt.Errorf("get frostfs container: %w", err)
|
||||
return nil, fmt.Errorf("get neofs container: %w", err)
|
||||
}
|
||||
|
||||
cnr := *res
|
||||
|
@ -54,7 +57,6 @@ func (n *layer) containerInfo(ctx context.Context, idCnr cid.ID) (*data.BucketIn
|
|||
info.Owner = cnr.Owner()
|
||||
if domain := container.ReadDomain(cnr); domain.Name() != "" {
|
||||
info.Name = domain.Name()
|
||||
info.Zone = domain.Zone()
|
||||
}
|
||||
info.Created = container.CreatedAt(cnr)
|
||||
info.LocationConstraint = cnr.Attribute(attributeLocationConstraint)
|
||||
|
@ -80,10 +82,13 @@ func (n *layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
|
|||
err error
|
||||
own = n.Owner(ctx)
|
||||
res []cid.ID
|
||||
rid = api.GetRequestID(ctx)
|
||||
)
|
||||
res, err = n.frostFS.UserContainers(ctx, own)
|
||||
res, err = n.neoFS.UserContainers(ctx, own)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Error("could not list user containers", zap.Error(err))
|
||||
n.log.Error("could not list user containers",
|
||||
zap.String("request_id", rid),
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -91,7 +96,9 @@ func (n *layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
|
|||
for i := range res {
|
||||
info, err := n.containerInfo(ctx, res[i])
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Error("could not fetch container info", zap.Error(err))
|
||||
n.log.Error("could not fetch container info",
|
||||
zap.String("request_id", rid),
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -108,9 +115,8 @@ func (n *layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
|
|||
}
|
||||
bktInfo := &data.BucketInfo{
|
||||
Name: p.Name,
|
||||
Zone: v2container.SysAttributeZoneDefault,
|
||||
Owner: ownerID,
|
||||
Created: TimeNow(ctx),
|
||||
Created: time.Now(), // this can be a little incorrect since the real time is set later
|
||||
LocationConstraint: p.LocationConstraint,
|
||||
ObjectLockEnabled: p.ObjectLockEnabled,
|
||||
}
|
||||
|
@ -127,12 +133,11 @@ func (n *layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
|
|||
})
|
||||
}
|
||||
|
||||
idCnr, err := n.frostFS.CreateContainer(ctx, PrmContainerCreate{
|
||||
idCnr, err := n.neoFS.CreateContainer(ctx, PrmContainerCreate{
|
||||
Creator: bktInfo.Owner,
|
||||
Policy: p.Policy,
|
||||
Name: p.Name,
|
||||
SessionToken: p.SessionContainerCreation,
|
||||
CreationTime: bktInfo.Created,
|
||||
AdditionalAttributes: attributes,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -153,9 +158,9 @@ func (n *layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
|
|||
func (n *layer) setContainerEACLTable(ctx context.Context, idCnr cid.ID, table *eacl.Table, sessionToken *session.Container) error {
|
||||
table.SetCID(idCnr)
|
||||
|
||||
return n.frostFS.SetContainerEACL(ctx, *table, sessionToken)
|
||||
return n.neoFS.SetContainerEACL(ctx, *table, sessionToken)
|
||||
}
|
||||
|
||||
func (n *layer) GetContainerEACL(ctx context.Context, idCnr cid.ID) (*eacl.Table, error) {
|
||||
return n.frostFS.ContainerEACL(ctx, idCnr)
|
||||
return n.neoFS.ContainerEACL(ctx, idCnr)
|
||||
}
|
||||
|
|
|
@ -8,8 +8,8 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -39,13 +39,12 @@ func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
|||
prm := PrmObjectCreate{
|
||||
Container: p.BktInfo.CID,
|
||||
Creator: p.BktInfo.Owner,
|
||||
Payload: &buf,
|
||||
Payload: p.Reader,
|
||||
Filepath: p.BktInfo.CORSObjectName(),
|
||||
CreationTime: TimeNow(ctx),
|
||||
CopiesNumber: p.CopiesNumbers,
|
||||
CopiesNumber: p.CopiesNumber,
|
||||
}
|
||||
|
||||
_, objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("put system object: %w", err)
|
||||
}
|
||||
|
@ -58,8 +57,9 @@ func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
|||
|
||||
if !objIDToDeleteNotFound {
|
||||
if err = n.objectDelete(ctx, p.BktInfo, objIDToDelete); err != nil {
|
||||
n.reqLogger(ctx).Error("couldn't delete cors object", zap.Error(err),
|
||||
n.log.Error("couldn't delete cors object", zap.Error(err),
|
||||
zap.String("cnrID", p.BktInfo.CID.EncodeToString()),
|
||||
zap.String("bucket name", p.BktInfo.Name),
|
||||
zap.String("objID", objIDToDelete.EncodeToString()))
|
||||
}
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ func (n *layer) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (*d
|
|||
cors, err := n.getCORS(ctx, bktInfo)
|
||||
if err != nil {
|
||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchCORSConfiguration), err.Error())
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchCORSConfiguration)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -11,22 +11,20 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nats-io/nats.go"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer/encryption"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/bearer"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/eacl"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -47,7 +45,7 @@ type (
|
|||
}
|
||||
|
||||
layer struct {
|
||||
frostFS FrostFS
|
||||
neoFS NeoFS
|
||||
log *zap.Logger
|
||||
anonKey AnonymousKey
|
||||
resolver BucketResolver
|
||||
|
@ -74,7 +72,7 @@ type (
|
|||
Range *RangeParams
|
||||
ObjectInfo *data.ObjectInfo
|
||||
BucketInfo *data.BucketInfo
|
||||
Versioned bool
|
||||
Writer io.Writer
|
||||
Encryption encryption.Params
|
||||
}
|
||||
|
||||
|
@ -101,23 +99,14 @@ type (
|
|||
|
||||
// PutObjectParams stores object put request parameters.
|
||||
PutObjectParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Object string
|
||||
Size uint64
|
||||
Reader io.Reader
|
||||
Header map[string]string
|
||||
Lock *data.ObjectLock
|
||||
Encryption encryption.Params
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
PutCombinedObjectParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Object string
|
||||
Size uint64
|
||||
Header map[string]string
|
||||
Lock *data.ObjectLock
|
||||
Encryption encryption.Params
|
||||
BktInfo *data.BucketInfo
|
||||
Object string
|
||||
Size int64
|
||||
Reader io.Reader
|
||||
Header map[string]string
|
||||
Lock *data.ObjectLock
|
||||
Encryption encryption.Params
|
||||
CopiesNumber uint32
|
||||
}
|
||||
|
||||
DeleteObjectParams struct {
|
||||
|
@ -134,24 +123,23 @@ type (
|
|||
|
||||
// PutCORSParams stores PutCORS request parameters.
|
||||
PutCORSParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Reader io.Reader
|
||||
CopiesNumbers []uint32
|
||||
BktInfo *data.BucketInfo
|
||||
Reader io.Reader
|
||||
CopiesNumber uint32
|
||||
}
|
||||
|
||||
// CopyObjectParams stores object copy request parameters.
|
||||
CopyObjectParams struct {
|
||||
SrcVersioned bool
|
||||
SrcObject *data.ObjectInfo
|
||||
ScrBktInfo *data.BucketInfo
|
||||
DstBktInfo *data.BucketInfo
|
||||
DstObject string
|
||||
SrcSize uint64
|
||||
Header map[string]string
|
||||
Range *RangeParams
|
||||
Lock *data.ObjectLock
|
||||
Encryption encryption.Params
|
||||
CopiesNumbers []uint32
|
||||
SrcObject *data.ObjectInfo
|
||||
ScrBktInfo *data.BucketInfo
|
||||
DstBktInfo *data.BucketInfo
|
||||
DstObject string
|
||||
SrcSize int64
|
||||
Header map[string]string
|
||||
Range *RangeParams
|
||||
Lock *data.ObjectLock
|
||||
Encryption encryption.Params
|
||||
CopiesNuber uint32
|
||||
}
|
||||
// CreateBucketParams stores bucket create request parameters.
|
||||
CreateBucketParams struct {
|
||||
|
@ -195,13 +183,6 @@ type (
|
|||
Error error
|
||||
}
|
||||
|
||||
ObjectPayload struct {
|
||||
r io.Reader
|
||||
params getParams
|
||||
encrypted bool
|
||||
decryptedLen uint64
|
||||
}
|
||||
|
||||
// Client provides S3 API client interface.
|
||||
Client interface {
|
||||
Initialize(ctx context.Context, c EventListener) error
|
||||
|
@ -221,7 +202,7 @@ type (
|
|||
CreateBucket(ctx context.Context, p *CreateBucketParams) (*data.BucketInfo, error)
|
||||
DeleteBucket(ctx context.Context, p *DeleteBucketParams) error
|
||||
|
||||
GetObject(ctx context.Context, p *GetObjectParams) (*ObjectPayload, error)
|
||||
GetObject(ctx context.Context, p *GetObjectParams) error
|
||||
GetObjectInfo(ctx context.Context, p *HeadObjectParams) (*data.ObjectInfo, error)
|
||||
GetExtendedObjectInfo(ctx context.Context, p *HeadObjectParams) (*data.ExtendedObjectInfo, error)
|
||||
|
||||
|
@ -269,12 +250,12 @@ const (
|
|||
|
||||
AESEncryptionAlgorithm = "AES256"
|
||||
AESKeySize = 32
|
||||
AttributeEncryptionAlgorithm = api.FrostFSSystemMetadataPrefix + "Algorithm"
|
||||
AttributeDecryptedSize = api.FrostFSSystemMetadataPrefix + "Decrypted-Size"
|
||||
AttributeHMACSalt = api.FrostFSSystemMetadataPrefix + "HMAC-Salt"
|
||||
AttributeHMACKey = api.FrostFSSystemMetadataPrefix + "HMAC-Key"
|
||||
AttributeEncryptionAlgorithm = api.NeoFSSystemMetadataPrefix + "Algorithm"
|
||||
AttributeDecryptedSize = api.NeoFSSystemMetadataPrefix + "Decrypted-Size"
|
||||
AttributeHMACSalt = api.NeoFSSystemMetadataPrefix + "HMAC-Salt"
|
||||
AttributeHMACKey = api.NeoFSSystemMetadataPrefix + "HMAC-Key"
|
||||
|
||||
AttributeFrostfsCopiesNumber = "frostfs-copies-number" // such format to match X-Amz-Meta-Frostfs-Copies-Number header
|
||||
AttributeNeofsCopiesNumber = "neofs-copies-number" // such formate to match X-Amz-Meta-Neofs-Copies-Number header
|
||||
)
|
||||
|
||||
func (t *VersionedObject) String() string {
|
||||
|
@ -285,15 +266,11 @@ func (f MsgHandlerFunc) HandleMessage(ctx context.Context, msg *nats.Msg) error
|
|||
return f(ctx, msg)
|
||||
}
|
||||
|
||||
func (p HeadObjectParams) Versioned() bool {
|
||||
return len(p.VersionID) > 0
|
||||
}
|
||||
|
||||
// NewLayer creates an instance of a layer. It checks credentials
|
||||
// and establishes gRPC connection with the node.
|
||||
func NewLayer(log *zap.Logger, frostFS FrostFS, config *Config) Client {
|
||||
func NewLayer(log *zap.Logger, neoFS NeoFS, config *Config) Client {
|
||||
return &layer{
|
||||
frostFS: frostFS,
|
||||
neoFS: neoFS,
|
||||
log: log,
|
||||
anonKey: config.AnonKey,
|
||||
resolver: config.Resolver,
|
||||
|
@ -325,22 +302,13 @@ func (n *layer) IsNotificationEnabled() bool {
|
|||
|
||||
// IsAuthenticatedRequest checks if access box exists in the current request.
|
||||
func IsAuthenticatedRequest(ctx context.Context) bool {
|
||||
_, ok := ctx.Value(middleware.BoxData).(*accessbox.Box)
|
||||
_, ok := ctx.Value(api.BoxData).(*accessbox.Box)
|
||||
return ok
|
||||
}
|
||||
|
||||
// TimeNow returns client time from request or time.Now().
|
||||
func TimeNow(ctx context.Context) time.Time {
|
||||
if now, ok := ctx.Value(middleware.ClientTime).(time.Time); ok {
|
||||
return now
|
||||
}
|
||||
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// Owner returns owner id from BearerToken (context) or from client owner.
|
||||
func (n *layer) Owner(ctx context.Context) user.ID {
|
||||
if bd, ok := ctx.Value(middleware.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
||||
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
||||
return bearer.ResolveIssuer(*bd.Gate.BearerToken)
|
||||
}
|
||||
|
||||
|
@ -350,17 +318,9 @@ func (n *layer) Owner(ctx context.Context) user.ID {
|
|||
return ownerID
|
||||
}
|
||||
|
||||
func (n *layer) reqLogger(ctx context.Context) *zap.Logger {
|
||||
reqLogger := middleware.GetReqLog(ctx)
|
||||
if reqLogger != nil {
|
||||
return reqLogger
|
||||
}
|
||||
return n.log
|
||||
}
|
||||
|
||||
func (n *layer) prepareAuthParameters(ctx context.Context, prm *PrmAuth, bktOwner user.ID) {
|
||||
if bd, ok := ctx.Value(middleware.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
||||
if bd.Gate.BearerToken.Impersonate() || bktOwner.Equals(bearer.ResolveIssuer(*bd.Gate.BearerToken)) {
|
||||
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
||||
if bktOwner.Equals(bearer.ResolveIssuer(*bd.Gate.BearerToken)) {
|
||||
prm.BearerToken = bd.Gate.BearerToken
|
||||
return
|
||||
}
|
||||
|
@ -382,10 +342,8 @@ func (n *layer) GetBucketInfo(ctx context.Context, name string) (*data.BucketInf
|
|||
|
||||
containerID, err := n.ResolveBucket(ctx, name)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucket), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
n.log.Debug("bucket not found", zap.Error(err))
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchBucket)
|
||||
}
|
||||
|
||||
return n.containerInfo(ctx, containerID)
|
||||
|
@ -410,16 +368,16 @@ func (n *layer) PutBucketACL(ctx context.Context, param *PutBucketACLParams) err
|
|||
}
|
||||
|
||||
// ListBuckets returns all user containers. The name of the bucket is a container
|
||||
// id. Timestamp is omitted since it is not saved in frostfs container.
|
||||
// id. Timestamp is omitted since it is not saved in neofs container.
|
||||
func (n *layer) ListBuckets(ctx context.Context) ([]*data.BucketInfo, error) {
|
||||
return n.containerList(ctx)
|
||||
}
|
||||
|
||||
// GetObject from storage.
|
||||
func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) (*ObjectPayload, error) {
|
||||
func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
|
||||
var params getParams
|
||||
|
||||
params.objInfo = p.ObjectInfo
|
||||
params.oid = p.ObjectInfo.ID
|
||||
params.bktInfo = p.BucketInfo
|
||||
|
||||
var decReader *encryption.Decrypter
|
||||
|
@ -427,7 +385,7 @@ func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) (*ObjectPaylo
|
|||
var err error
|
||||
decReader, err = getDecrypter(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating decrypter: %w", err)
|
||||
return fmt.Errorf("creating decrypter: %w", err)
|
||||
}
|
||||
params.off = decReader.EncryptedOffset()
|
||||
params.ln = decReader.EncryptedLength()
|
||||
|
@ -441,58 +399,32 @@ func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) (*ObjectPaylo
|
|||
}
|
||||
}
|
||||
|
||||
r, err := n.initObjectPayloadReader(ctx, params)
|
||||
payload, err := n.initObjectPayloadReader(ctx, params)
|
||||
if err != nil {
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
if p.Versioned {
|
||||
err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchVersion), err.Error())
|
||||
} else {
|
||||
err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("init object payload reader: %w", err)
|
||||
return fmt.Errorf("init object payload reader: %w", err)
|
||||
}
|
||||
|
||||
var decryptedLen uint64
|
||||
if decReader != nil {
|
||||
if err = decReader.SetReader(r); err != nil {
|
||||
return nil, fmt.Errorf("set reader to decrypter: %w", err)
|
||||
}
|
||||
r = io.LimitReader(decReader, int64(decReader.DecryptedLength()))
|
||||
decryptedLen = decReader.DecryptedLength()
|
||||
}
|
||||
|
||||
return &ObjectPayload{
|
||||
r: r,
|
||||
params: params,
|
||||
encrypted: decReader != nil,
|
||||
decryptedLen: decryptedLen,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Read implements io.Reader. If you want to use ObjectPayload as io.Reader
|
||||
// you must not use ObjectPayload.StreamTo method and vice versa.
|
||||
func (o *ObjectPayload) Read(p []byte) (int, error) {
|
||||
return o.r.Read(p)
|
||||
}
|
||||
|
||||
// StreamTo reads all payload to provided writer.
|
||||
// If you want to use this method you must not use ObjectPayload.Read and vice versa.
|
||||
func (o *ObjectPayload) StreamTo(w io.Writer) error {
|
||||
bufSize := uint64(32 * 1024) // configure?
|
||||
if o.params.ln != 0 && o.params.ln < bufSize {
|
||||
bufSize = o.params.ln
|
||||
if params.ln != 0 && params.ln < bufSize {
|
||||
bufSize = params.ln
|
||||
}
|
||||
|
||||
// alloc buffer for copying
|
||||
buf := make([]byte, bufSize) // sync-pool it?
|
||||
|
||||
r := payload
|
||||
if decReader != nil {
|
||||
if err = decReader.SetReader(payload); err != nil {
|
||||
return fmt.Errorf("set reader to decrypter: %w", err)
|
||||
}
|
||||
r = io.LimitReader(decReader, int64(decReader.DecryptedLength()))
|
||||
}
|
||||
|
||||
// copy full payload
|
||||
written, err := io.CopyBuffer(w, o.r, buf)
|
||||
written, err := io.CopyBuffer(p.Writer, r, buf)
|
||||
if err != nil {
|
||||
if o.encrypted {
|
||||
return fmt.Errorf("copy object payload written: '%d', decLength: '%d', params.ln: '%d' : %w", written, o.decryptedLen, o.params.ln, err)
|
||||
if decReader != nil {
|
||||
return fmt.Errorf("copy object payload written: '%d', decLength: '%d', params.ln: '%d' : %w", written, decReader.DecryptedLength(), params.ln, err)
|
||||
}
|
||||
return fmt.Errorf("copy object payload written: '%d': %w", written, err)
|
||||
}
|
||||
|
@ -541,46 +473,39 @@ func (n *layer) GetObjectInfo(ctx context.Context, p *HeadObjectParams) (*data.O
|
|||
|
||||
// GetExtendedObjectInfo returns meta information and corresponding info from the tree service about the object.
|
||||
func (n *layer) GetExtendedObjectInfo(ctx context.Context, p *HeadObjectParams) (*data.ExtendedObjectInfo, error) {
|
||||
var objInfo *data.ExtendedObjectInfo
|
||||
var err error
|
||||
|
||||
if p.Versioned() {
|
||||
objInfo, err = n.headVersion(ctx, p.BktInfo, p)
|
||||
} else {
|
||||
objInfo, err = n.headLastVersionIfNotDeleted(ctx, p.BktInfo, p.Object)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if len(p.VersionID) == 0 {
|
||||
return n.headLastVersionIfNotDeleted(ctx, p.BktInfo, p.Object)
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Debug("get object",
|
||||
zap.Stringer("cid", p.BktInfo.CID),
|
||||
zap.Stringer("oid", objInfo.ObjectInfo.ID))
|
||||
|
||||
return objInfo, nil
|
||||
return n.headVersion(ctx, p.BktInfo, p)
|
||||
}
|
||||
|
||||
// CopyObject from one bucket into another bucket.
|
||||
func (n *layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*data.ExtendedObjectInfo, error) {
|
||||
objPayload, err := n.GetObject(ctx, &GetObjectParams{
|
||||
ObjectInfo: p.SrcObject,
|
||||
Versioned: p.SrcVersioned,
|
||||
Range: p.Range,
|
||||
BucketInfo: p.ScrBktInfo,
|
||||
Encryption: p.Encryption,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object to copy: %w", err)
|
||||
}
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
go func() {
|
||||
err := n.GetObject(ctx, &GetObjectParams{
|
||||
ObjectInfo: p.SrcObject,
|
||||
Writer: pw,
|
||||
Range: p.Range,
|
||||
BucketInfo: p.ScrBktInfo,
|
||||
Encryption: p.Encryption,
|
||||
})
|
||||
|
||||
if err = pw.CloseWithError(err); err != nil {
|
||||
n.log.Error("could not get object", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
return n.PutObject(ctx, &PutObjectParams{
|
||||
BktInfo: p.DstBktInfo,
|
||||
Object: p.DstObject,
|
||||
Size: p.SrcSize,
|
||||
Reader: objPayload,
|
||||
Header: p.Header,
|
||||
Encryption: p.Encryption,
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
BktInfo: p.DstBktInfo,
|
||||
Object: p.DstObject,
|
||||
Size: p.SrcSize,
|
||||
Reader: pr,
|
||||
Header: p.Header,
|
||||
Encryption: p.Encryption,
|
||||
CopiesNumber: p.CopiesNuber,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -599,11 +524,11 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
if len(obj.VersionID) != 0 || settings.Unversioned() {
|
||||
var nodeVersion *data.NodeVersion
|
||||
if nodeVersion, obj.Error = n.getNodeVersionToDelete(ctx, bkt, obj); obj.Error != nil {
|
||||
return n.handleNotFoundError(bkt, obj)
|
||||
return dismissNotFoundError(obj)
|
||||
}
|
||||
|
||||
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nodeVersion, obj); obj.Error != nil {
|
||||
return n.handleObjectDeleteErrors(ctx, bkt, obj, nodeVersion.ID)
|
||||
return obj
|
||||
}
|
||||
|
||||
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeVersion.ID)
|
||||
|
@ -611,38 +536,21 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
return obj
|
||||
}
|
||||
|
||||
lastVersion, err := n.getLastNodeVersion(ctx, bkt, obj)
|
||||
if err != nil {
|
||||
obj.Error = err
|
||||
return n.handleNotFoundError(bkt, obj)
|
||||
}
|
||||
var newVersion *data.NodeVersion
|
||||
|
||||
if settings.VersioningSuspended() {
|
||||
obj.VersionID = data.UnversionedObjectVersionID
|
||||
|
||||
var nullVersionToDelete *data.NodeVersion
|
||||
if lastVersion.IsUnversioned {
|
||||
if !lastVersion.IsDeleteMarker() {
|
||||
nullVersionToDelete = lastVersion
|
||||
}
|
||||
} else if nullVersionToDelete, obj.Error = n.getNodeVersionToDelete(ctx, bkt, obj); obj.Error != nil {
|
||||
if !isNotFoundError(obj.Error) {
|
||||
return obj
|
||||
}
|
||||
var nodeVersion *data.NodeVersion
|
||||
if nodeVersion, obj.Error = n.getNodeVersionToDelete(ctx, bkt, obj); obj.Error != nil {
|
||||
return dismissNotFoundError(obj)
|
||||
}
|
||||
|
||||
if nullVersionToDelete != nil {
|
||||
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nullVersionToDelete, obj); obj.Error != nil {
|
||||
return n.handleObjectDeleteErrors(ctx, bkt, obj, nullVersionToDelete.ID)
|
||||
}
|
||||
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nodeVersion, obj); obj.Error != nil {
|
||||
return obj
|
||||
}
|
||||
}
|
||||
|
||||
if lastVersion.IsDeleteMarker() {
|
||||
obj.DeleteMarkVersion = lastVersion.OID.EncodeToString()
|
||||
return obj
|
||||
}
|
||||
|
||||
randOID, err := getRandomOID()
|
||||
if err != nil {
|
||||
obj.Error = fmt.Errorf("couldn't get random oid: %w", err)
|
||||
|
@ -651,13 +559,13 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
|
||||
obj.DeleteMarkVersion = randOID.EncodeToString()
|
||||
|
||||
newVersion := &data.NodeVersion{
|
||||
newVersion = &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
OID: randOID,
|
||||
FilePath: obj.Name,
|
||||
},
|
||||
DeleteMarker: &data.DeleteMarkerInfo{
|
||||
Created: TimeNow(ctx),
|
||||
Created: time.Now(),
|
||||
Owner: n.Owner(ctx),
|
||||
},
|
||||
IsUnversioned: settings.VersioningSuspended(),
|
||||
|
@ -672,46 +580,15 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
return obj
|
||||
}
|
||||
|
||||
func (n *layer) handleNotFoundError(bkt *data.BucketInfo, obj *VersionedObject) *VersionedObject {
|
||||
if isNotFoundError(obj.Error) {
|
||||
func dismissNotFoundError(obj *VersionedObject) *VersionedObject {
|
||||
if errors.IsS3Error(obj.Error, errors.ErrNoSuchKey) ||
|
||||
errors.IsS3Error(obj.Error, errors.ErrNoSuchVersion) {
|
||||
obj.Error = nil
|
||||
n.cache.CleanListCacheEntriesContainingObject(obj.Name, bkt.CID)
|
||||
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
|
||||
}
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
func (n *layer) handleObjectDeleteErrors(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject, nodeID uint64) *VersionedObject {
|
||||
if client.IsErrObjectAlreadyRemoved(obj.Error) {
|
||||
n.reqLogger(ctx).Debug("object already removed",
|
||||
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID))
|
||||
|
||||
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeID)
|
||||
if obj.Error != nil {
|
||||
return obj
|
||||
}
|
||||
|
||||
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
|
||||
}
|
||||
|
||||
if client.IsErrObjectNotFound(obj.Error) {
|
||||
n.reqLogger(ctx).Debug("object not found",
|
||||
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID))
|
||||
|
||||
obj.Error = nil
|
||||
|
||||
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
|
||||
}
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
func isNotFoundError(err error) bool {
|
||||
return errors.IsS3Error(err, errors.ErrNoSuchKey) ||
|
||||
errors.IsS3Error(err, errors.ErrNoSuchVersion)
|
||||
}
|
||||
|
||||
func (n *layer) getNodeVersionToDelete(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject) (*data.NodeVersion, error) {
|
||||
objVersion := &ObjectVersion{
|
||||
BktInfo: bkt,
|
||||
|
@ -723,17 +600,6 @@ func (n *layer) getNodeVersionToDelete(ctx context.Context, bkt *data.BucketInfo
|
|||
return n.getNodeVersion(ctx, objVersion)
|
||||
}
|
||||
|
||||
func (n *layer) getLastNodeVersion(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject) (*data.NodeVersion, error) {
|
||||
objVersion := &ObjectVersion{
|
||||
BktInfo: bkt,
|
||||
ObjectName: obj.Name,
|
||||
VersionID: "",
|
||||
NoErrorOnDeleteMarker: true,
|
||||
}
|
||||
|
||||
return n.getNodeVersion(ctx, objVersion)
|
||||
}
|
||||
|
||||
func (n *layer) removeOldVersion(ctx context.Context, bkt *data.BucketInfo, nodeVersion *data.NodeVersion, obj *VersionedObject) (string, error) {
|
||||
if nodeVersion.IsDeleteMarker() {
|
||||
return obj.VersionID, nil
|
||||
|
@ -770,18 +636,14 @@ func (n *layer) CreateBucket(ctx context.Context, p *CreateBucketParams) (*data.
|
|||
func (n *layer) ResolveBucket(ctx context.Context, name string) (cid.ID, error) {
|
||||
var cnrID cid.ID
|
||||
if err := cnrID.DecodeString(name); err != nil {
|
||||
if cnrID, err = n.resolver.Resolve(ctx, name); err != nil {
|
||||
return cid.ID{}, err
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Info("resolve bucket", zap.Stringer("cid", cnrID))
|
||||
return n.resolver.Resolve(ctx, name)
|
||||
}
|
||||
|
||||
return cnrID, nil
|
||||
}
|
||||
|
||||
func (n *layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
|
||||
nodeVersions, err := n.getAllObjectsVersions(ctx, p.BktInfo, "", "")
|
||||
nodeVersions, err := n.bucketNodeVersions(ctx, p.BktInfo, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -790,5 +652,5 @@ func (n *layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
|
|||
}
|
||||
|
||||
n.cache.DeleteBucket(p.BktInfo.Name)
|
||||
return n.frostFS.DeleteContainer(ctx, p.BktInfo.CID, p.SessionToken)
|
||||
return n.neoFS.DeleteContainer(ctx, p.BktInfo.CID, p.SessionToken)
|
||||
}
|
||||
|
|
|
@ -4,8 +4,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -30,7 +29,7 @@ func TestObjectLockAttributes(t *testing.T) {
|
|||
Until: time.Now(),
|
||||
},
|
||||
},
|
||||
CopiesNumbers: []uint32{0},
|
||||
CopiesNumber: 0,
|
||||
}
|
||||
|
||||
err = tc.layer.PutLockInfo(tc.ctx, p)
|
||||
|
@ -44,10 +43,10 @@ func TestObjectLockAttributes(t *testing.T) {
|
|||
|
||||
expEpoch := false
|
||||
for _, attr := range lockObj.Attributes() {
|
||||
if attr.Key() == object.SysAttributeExpEpoch {
|
||||
if attr.Key() == AttributeExpirationEpoch {
|
||||
expEpoch = true
|
||||
}
|
||||
}
|
||||
|
||||
require.Truef(t, expEpoch, "system header __SYSTEM__EXPIRATION_EPOCH presence")
|
||||
require.Truef(t, expEpoch, "system header __NEOFS__EXPIRATION_EPOCH presence")
|
||||
}
|
||||
|
|
|
@ -1,149 +0,0 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
type partObj struct {
|
||||
OID oid.ID
|
||||
Size uint64
|
||||
}
|
||||
|
||||
type readerInitiator interface {
|
||||
initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFSParams) (io.Reader, error)
|
||||
}
|
||||
|
||||
// implements io.Reader of payloads of the object list stored in the FrostFS network.
|
||||
type multiObjectReader struct {
|
||||
ctx context.Context
|
||||
|
||||
layer readerInitiator
|
||||
|
||||
startPartOffset uint64
|
||||
endPartLength uint64
|
||||
|
||||
prm getFrostFSParams
|
||||
|
||||
curIndex int
|
||||
curReader io.Reader
|
||||
|
||||
parts []partObj
|
||||
}
|
||||
|
||||
type multiObjectReaderConfig struct {
|
||||
layer readerInitiator
|
||||
|
||||
// the offset of complete object and total size to read
|
||||
off, ln uint64
|
||||
|
||||
bktInfo *data.BucketInfo
|
||||
parts []partObj
|
||||
}
|
||||
|
||||
var (
|
||||
errOffsetIsOutOfRange = errors.New("offset is out of payload range")
|
||||
errLengthIsOutOfRange = errors.New("length is out of payload range")
|
||||
errEmptyPartsList = errors.New("empty parts list")
|
||||
errorZeroRangeLength = errors.New("zero range length")
|
||||
)
|
||||
|
||||
func newMultiObjectReader(ctx context.Context, cfg multiObjectReaderConfig) (*multiObjectReader, error) {
|
||||
if len(cfg.parts) == 0 {
|
||||
return nil, errEmptyPartsList
|
||||
}
|
||||
|
||||
r := &multiObjectReader{
|
||||
ctx: ctx,
|
||||
layer: cfg.layer,
|
||||
prm: getFrostFSParams{
|
||||
bktInfo: cfg.bktInfo,
|
||||
},
|
||||
parts: cfg.parts,
|
||||
}
|
||||
|
||||
if cfg.off+cfg.ln == 0 {
|
||||
return r, nil
|
||||
}
|
||||
|
||||
if cfg.off > 0 && cfg.ln == 0 {
|
||||
return nil, errorZeroRangeLength
|
||||
}
|
||||
|
||||
startPartIndex, startPartOffset := findStartPart(cfg)
|
||||
if startPartIndex == -1 {
|
||||
return nil, errOffsetIsOutOfRange
|
||||
}
|
||||
r.startPartOffset = startPartOffset
|
||||
|
||||
endPartIndex, endPartLength := findEndPart(cfg)
|
||||
if endPartIndex == -1 {
|
||||
return nil, errLengthIsOutOfRange
|
||||
}
|
||||
r.endPartLength = endPartLength
|
||||
|
||||
r.parts = cfg.parts[startPartIndex : endPartIndex+1]
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func findStartPart(cfg multiObjectReaderConfig) (index int, offset uint64) {
|
||||
return findPartByPosition(cfg.off, cfg.parts)
|
||||
}
|
||||
|
||||
func findEndPart(cfg multiObjectReaderConfig) (index int, length uint64) {
|
||||
return findPartByPosition(cfg.off+cfg.ln, cfg.parts)
|
||||
}
|
||||
|
||||
func findPartByPosition(position uint64, parts []partObj) (index int, positionInPart uint64) {
|
||||
for i, part := range parts {
|
||||
if position <= part.Size {
|
||||
return i, position
|
||||
}
|
||||
position -= part.Size
|
||||
}
|
||||
|
||||
return -1, 0
|
||||
}
|
||||
|
||||
func (x *multiObjectReader) Read(p []byte) (n int, err error) {
|
||||
if x.curReader != nil {
|
||||
n, err = x.curReader.Read(p)
|
||||
if !errors.Is(err, io.EOF) {
|
||||
return n, err
|
||||
}
|
||||
x.curIndex++
|
||||
}
|
||||
|
||||
if x.curIndex == len(x.parts) {
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
x.prm.oid = x.parts[x.curIndex].OID
|
||||
|
||||
if x.curIndex == 0 {
|
||||
x.prm.off = x.startPartOffset
|
||||
x.prm.ln = x.parts[x.curIndex].Size - x.startPartOffset
|
||||
}
|
||||
|
||||
if x.curIndex == len(x.parts)-1 {
|
||||
x.prm.ln = x.endPartLength - x.prm.off
|
||||
}
|
||||
|
||||
x.curReader, err = x.layer.initFrostFSObjectPayloadReader(x.ctx, x.prm)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("init payload reader for the next part: %w", err)
|
||||
}
|
||||
|
||||
x.prm.off = 0
|
||||
x.prm.ln = 0
|
||||
|
||||
next, err := x.Read(p[n:])
|
||||
|
||||
return n + next, err
|
||||
}
|
|
@ -1,127 +0,0 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type readerInitiatorMock struct {
|
||||
parts map[oid.ID][]byte
|
||||
}
|
||||
|
||||
func (r *readerInitiatorMock) initFrostFSObjectPayloadReader(_ context.Context, p getFrostFSParams) (io.Reader, error) {
|
||||
partPayload, ok := r.parts[p.oid]
|
||||
if !ok {
|
||||
return nil, errors.New("part not found")
|
||||
}
|
||||
|
||||
if p.off+p.ln == 0 {
|
||||
return bytes.NewReader(partPayload), nil
|
||||
}
|
||||
|
||||
if p.off > uint64(len(partPayload)-1) {
|
||||
return nil, fmt.Errorf("invalid offset: %d/%d", p.off, len(partPayload))
|
||||
}
|
||||
|
||||
if p.off+p.ln > uint64(len(partPayload)) {
|
||||
return nil, fmt.Errorf("invalid range: %d-%d/%d", p.off, p.off+p.ln, len(partPayload))
|
||||
}
|
||||
|
||||
return bytes.NewReader(partPayload[p.off : p.off+p.ln]), nil
|
||||
}
|
||||
|
||||
func prepareDataReader() ([]byte, []partObj, *readerInitiatorMock) {
|
||||
mockInitReader := &readerInitiatorMock{
|
||||
parts: map[oid.ID][]byte{
|
||||
oidtest.ID(): []byte("first part 1"),
|
||||
oidtest.ID(): []byte("second part 2"),
|
||||
oidtest.ID(): []byte("third part 3"),
|
||||
},
|
||||
}
|
||||
|
||||
var fullPayload []byte
|
||||
parts := make([]partObj, 0, len(mockInitReader.parts))
|
||||
for id, payload := range mockInitReader.parts {
|
||||
parts = append(parts, partObj{OID: id, Size: uint64(len(payload))})
|
||||
fullPayload = append(fullPayload, payload...)
|
||||
}
|
||||
|
||||
return fullPayload, parts, mockInitReader
|
||||
}
|
||||
|
||||
func TestMultiReader(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
fullPayload, parts, mockInitReader := prepareDataReader()
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
off uint64
|
||||
ln uint64
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "simple read all",
|
||||
},
|
||||
{
|
||||
name: "simple read with length",
|
||||
ln: uint64(len(fullPayload)),
|
||||
},
|
||||
{
|
||||
name: "middle of parts",
|
||||
off: parts[0].Size + 2,
|
||||
ln: 4,
|
||||
},
|
||||
{
|
||||
name: "first and second",
|
||||
off: parts[0].Size - 4,
|
||||
ln: 8,
|
||||
},
|
||||
{
|
||||
name: "first and third",
|
||||
off: parts[0].Size - 4,
|
||||
ln: parts[1].Size + 8,
|
||||
},
|
||||
{
|
||||
name: "offset out of range",
|
||||
off: uint64(len(fullPayload) + 1),
|
||||
ln: 1,
|
||||
err: errOffsetIsOutOfRange,
|
||||
},
|
||||
{
|
||||
name: "zero length",
|
||||
off: parts[1].Size + 1,
|
||||
ln: 0,
|
||||
err: errorZeroRangeLength,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
multiReader, err := newMultiObjectReader(ctx, multiObjectReaderConfig{
|
||||
layer: mockInitReader,
|
||||
parts: parts,
|
||||
off: tc.off,
|
||||
ln: tc.ln,
|
||||
})
|
||||
require.ErrorIs(t, err, tc.err)
|
||||
|
||||
if tc.err == nil {
|
||||
off := tc.off
|
||||
ln := tc.ln
|
||||
if off+ln == 0 {
|
||||
ln = uint64(len(fullPayload))
|
||||
}
|
||||
data, err := io.ReadAll(multiReader)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fullPayload[off:off+ln], data)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,11 +1,9 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
@ -13,12 +11,12 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/minio/sio"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer/encryption"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -27,10 +25,6 @@ const (
|
|||
UploadPartNumberAttributeName = "S3-Upload-Part-Number"
|
||||
UploadCompletedParts = "S3-Completed-Parts"
|
||||
|
||||
// MultipartObjectSize contains the real object size if object is combined (payload contains list of parts).
|
||||
// This header is used to determine if object is combined.
|
||||
MultipartObjectSize = "S3-Multipart-Object-Size"
|
||||
|
||||
metaPrefix = "meta-"
|
||||
aclPrefix = "acl-"
|
||||
|
||||
|
@ -38,8 +32,8 @@ const (
|
|||
MaxSizePartsList = 1000
|
||||
UploadMinPartNumber = 1
|
||||
UploadMaxPartNumber = 10000
|
||||
UploadMinSize = 5 * 1024 * 1024 // 5MB
|
||||
UploadMaxSize = 1024 * UploadMinSize // 5GB
|
||||
uploadMinSize = 5 * 1048576 // 5MB
|
||||
uploadMaxSize = 5 * 1073741824 // 5GB
|
||||
)
|
||||
|
||||
type (
|
||||
|
@ -51,10 +45,10 @@ type (
|
|||
}
|
||||
|
||||
CreateMultipartParams struct {
|
||||
Info *UploadInfoParams
|
||||
Header map[string]string
|
||||
Data *UploadData
|
||||
CopiesNumbers []uint32
|
||||
Info *UploadInfoParams
|
||||
Header map[string]string
|
||||
Data *UploadData
|
||||
CopiesNumber uint32
|
||||
}
|
||||
|
||||
UploadData struct {
|
||||
|
@ -65,12 +59,11 @@ type (
|
|||
UploadPartParams struct {
|
||||
Info *UploadInfoParams
|
||||
PartNumber int
|
||||
Size uint64
|
||||
Size int64
|
||||
Reader io.Reader
|
||||
}
|
||||
|
||||
UploadCopyParams struct {
|
||||
Versioned bool
|
||||
Info *UploadInfoParams
|
||||
SrcObjInfo *data.ObjectInfo
|
||||
SrcBktInfo *data.BucketInfo
|
||||
|
@ -97,7 +90,7 @@ type (
|
|||
ETag string
|
||||
LastModified string
|
||||
PartNumber int
|
||||
Size uint64
|
||||
Size int64
|
||||
}
|
||||
|
||||
ListMultipartUploadsParams struct {
|
||||
|
@ -147,12 +140,12 @@ func (n *layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
|
|||
}
|
||||
|
||||
info := &data.MultipartInfo{
|
||||
Key: p.Info.Key,
|
||||
UploadID: p.Info.UploadID,
|
||||
Owner: n.Owner(ctx),
|
||||
Created: TimeNow(ctx),
|
||||
Meta: make(map[string]string, metaSize),
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
Key: p.Info.Key,
|
||||
UploadID: p.Info.UploadID,
|
||||
Owner: n.Owner(ctx),
|
||||
Created: time.Now(),
|
||||
Meta: make(map[string]string, metaSize),
|
||||
CopiesNumber: p.CopiesNumber,
|
||||
}
|
||||
|
||||
for key, val := range p.Header {
|
||||
|
@ -181,14 +174,14 @@ func (n *layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
|
|||
func (n *layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, error) {
|
||||
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Info.Bkt, p.Info.Key, p.Info.UploadID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return "", fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchUpload), err.Error())
|
||||
if stderrors.Is(err, ErrNodeNotFound) {
|
||||
return "", errors.GetAPIError(errors.ErrNoSuchUpload)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
if p.Size > UploadMaxSize {
|
||||
return "", fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooLarge), p.Size, UploadMaxSize)
|
||||
if p.Size > uploadMaxSize {
|
||||
return "", errors.GetAPIError(errors.ErrEntityTooLarge)
|
||||
}
|
||||
|
||||
objInfo, err := n.uploadPart(ctx, multipartInfo, p)
|
||||
|
@ -202,8 +195,8 @@ func (n *layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, er
|
|||
func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInfo, p *UploadPartParams) (*data.ObjectInfo, error) {
|
||||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
||||
if err := p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
|
||||
n.reqLogger(ctx).Warn("mismatched obj encryptionInfo", zap.Error(err))
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidEncryptionParameters)
|
||||
n.log.Warn("mismatched obj encryptionInfo", zap.Error(err))
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidEncryptionParameters)
|
||||
}
|
||||
|
||||
bktInfo := p.Info.Bkt
|
||||
|
@ -212,56 +205,49 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
Creator: bktInfo.Owner,
|
||||
Attributes: make([][2]string, 2),
|
||||
Payload: p.Reader,
|
||||
CreationTime: TimeNow(ctx),
|
||||
CopiesNumber: multipartInfo.CopiesNumbers,
|
||||
CopiesNumber: multipartInfo.CopiesNumber,
|
||||
}
|
||||
|
||||
decSize := p.Size
|
||||
if p.Info.Encryption.Enabled() {
|
||||
r, encSize, err := encryptionReader(p.Reader, p.Size, p.Info.Encryption.Key())
|
||||
r, encSize, err := encryptionReader(p.Reader, uint64(p.Size), p.Info.Encryption.Key())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create ecnrypted reader: %w", err)
|
||||
}
|
||||
prm.Attributes = append(prm.Attributes, [2]string{AttributeDecryptedSize, strconv.FormatUint(p.Size, 10)})
|
||||
prm.Attributes = append(prm.Attributes, [2]string{AttributeDecryptedSize, strconv.FormatInt(p.Size, 10)})
|
||||
prm.Payload = r
|
||||
p.Size = encSize
|
||||
p.Size = int64(encSize)
|
||||
}
|
||||
|
||||
prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID
|
||||
prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber)
|
||||
|
||||
size, id, hash, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
id, hash, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.Info.Encryption.Enabled() {
|
||||
size = decSize
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Debug("upload part",
|
||||
zap.String("multipart upload", p.Info.UploadID), zap.Int("part number", p.PartNumber),
|
||||
zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
|
||||
|
||||
partInfo := &data.PartInfo{
|
||||
Key: p.Info.Key,
|
||||
UploadID: p.Info.UploadID,
|
||||
Number: p.PartNumber,
|
||||
OID: id,
|
||||
Size: size,
|
||||
Size: decSize,
|
||||
ETag: hex.EncodeToString(hash),
|
||||
Created: prm.CreationTime,
|
||||
Created: time.Now(),
|
||||
}
|
||||
|
||||
oldPartID, err := n.treeService.AddPart(ctx, bktInfo, multipartInfo.ID, partInfo)
|
||||
oldPartIDNotFound := errors.Is(err, ErrNoNodeToRemove)
|
||||
oldPartIDNotFound := stderrors.Is(err, ErrNoNodeToRemove)
|
||||
if err != nil && !oldPartIDNotFound {
|
||||
return nil, err
|
||||
}
|
||||
if !oldPartIDNotFound {
|
||||
if err = n.objectDelete(ctx, bktInfo, oldPartID); err != nil {
|
||||
n.reqLogger(ctx).Error("couldn't delete old part object", zap.Error(err),
|
||||
zap.String("cid", bktInfo.CID.EncodeToString()),
|
||||
zap.String("oid", oldPartID.EncodeToString()))
|
||||
n.log.Error("couldn't delete old part object", zap.Error(err),
|
||||
zap.String("cnrID", bktInfo.CID.EncodeToString()),
|
||||
zap.String("bucket name", bktInfo.Name),
|
||||
zap.String("objID", oldPartID.EncodeToString()))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -282,47 +268,91 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
func (n *layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.ObjectInfo, error) {
|
||||
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Info.Bkt, p.Info.Key, p.Info.UploadID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchUpload), err.Error())
|
||||
if stderrors.Is(err, ErrNodeNotFound) {
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchUpload)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
size := p.SrcObjInfo.Size
|
||||
if p.Range != nil {
|
||||
size = p.Range.End - p.Range.Start + 1
|
||||
if p.Range.End > p.SrcObjInfo.Size {
|
||||
return nil, fmt.Errorf("%w: %d-%d/%d", s3errors.GetAPIError(s3errors.ErrInvalidCopyPartRangeSource), p.Range.Start, p.Range.End, p.SrcObjInfo.Size)
|
||||
size = int64(p.Range.End - p.Range.Start + 1)
|
||||
if p.Range.End > uint64(p.SrcObjInfo.Size) {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidCopyPartRangeSource)
|
||||
}
|
||||
}
|
||||
if size > UploadMaxSize {
|
||||
return nil, fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooLarge), size, UploadMaxSize)
|
||||
if size > uploadMaxSize {
|
||||
return nil, errors.GetAPIError(errors.ErrEntityTooLarge)
|
||||
}
|
||||
|
||||
objPayload, err := n.GetObject(ctx, &GetObjectParams{
|
||||
ObjectInfo: p.SrcObjInfo,
|
||||
Versioned: p.Versioned,
|
||||
Range: p.Range,
|
||||
BucketInfo: p.SrcBktInfo,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object to upload copy: %w", err)
|
||||
}
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
go func() {
|
||||
err = n.GetObject(ctx, &GetObjectParams{
|
||||
ObjectInfo: p.SrcObjInfo,
|
||||
Writer: pw,
|
||||
Range: p.Range,
|
||||
BucketInfo: p.SrcBktInfo,
|
||||
})
|
||||
|
||||
if err = pw.CloseWithError(err); err != nil {
|
||||
n.log.Error("could not get object", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
params := &UploadPartParams{
|
||||
Info: p.Info,
|
||||
PartNumber: p.PartNumber,
|
||||
Size: size,
|
||||
Reader: objPayload,
|
||||
Reader: pr,
|
||||
}
|
||||
|
||||
return n.uploadPart(ctx, multipartInfo, params)
|
||||
}
|
||||
|
||||
// implements io.Reader of payloads of the object list stored in the NeoFS network.
|
||||
type multiObjectReader struct {
|
||||
ctx context.Context
|
||||
|
||||
layer *layer
|
||||
|
||||
prm getParams
|
||||
|
||||
curReader io.Reader
|
||||
|
||||
parts []*data.PartInfo
|
||||
}
|
||||
|
||||
func (x *multiObjectReader) Read(p []byte) (n int, err error) {
|
||||
if x.curReader != nil {
|
||||
n, err = x.curReader.Read(p)
|
||||
if !stderrors.Is(err, io.EOF) {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(x.parts) == 0 {
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
x.prm.oid = x.parts[0].OID
|
||||
|
||||
x.curReader, err = x.layer.initObjectPayloadReader(x.ctx, x.prm)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("init payload reader for the next part: %w", err)
|
||||
}
|
||||
|
||||
x.parts = x.parts[1:]
|
||||
|
||||
next, err := x.Read(p[n:])
|
||||
|
||||
return n + next, err
|
||||
}
|
||||
|
||||
func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipartParams) (*UploadData, *data.ExtendedObjectInfo, error) {
|
||||
for i := 1; i < len(p.Parts); i++ {
|
||||
if p.Parts[i].PartNumber <= p.Parts[i-1].PartNumber {
|
||||
return nil, nil, s3errors.GetAPIError(s3errors.ErrInvalidPartOrder)
|
||||
return nil, nil, errors.GetAPIError(errors.ErrInvalidPartOrder)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -333,10 +363,10 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
||||
|
||||
if len(partsInfo) < len(p.Parts) {
|
||||
return nil, nil, fmt.Errorf("%w: found %d parts, need %d", s3errors.GetAPIError(s3errors.ErrInvalidPart), len(partsInfo), len(p.Parts))
|
||||
return nil, nil, errors.GetAPIError(errors.ErrInvalidPart)
|
||||
}
|
||||
|
||||
var multipartObjetSize uint64
|
||||
var multipartObjetSize int64
|
||||
var encMultipartObjectSize uint64
|
||||
parts := make([]*data.PartInfo, 0, len(p.Parts))
|
||||
|
||||
|
@ -344,19 +374,17 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
for i, part := range p.Parts {
|
||||
partInfo := partsInfo[part.PartNumber]
|
||||
if partInfo == nil || part.ETag != partInfo.ETag {
|
||||
return nil, nil, fmt.Errorf("%w: unknown part %d or etag mismatched", s3errors.GetAPIError(s3errors.ErrInvalidPart), part.PartNumber)
|
||||
return nil, nil, errors.GetAPIError(errors.ErrInvalidPart)
|
||||
}
|
||||
delete(partsInfo, part.PartNumber)
|
||||
|
||||
// for the last part we have no minimum size limit
|
||||
if i != len(p.Parts)-1 && partInfo.Size < UploadMinSize {
|
||||
return nil, nil, fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooSmall), partInfo.Size, UploadMinSize)
|
||||
if i != len(p.Parts)-1 && partInfo.Size < uploadMinSize {
|
||||
return nil, nil, errors.GetAPIError(errors.ErrEntityTooSmall)
|
||||
}
|
||||
parts = append(parts, partInfo)
|
||||
multipartObjetSize += partInfo.Size // even if encryption is enabled size is actual (decrypted)
|
||||
|
||||
if encInfo.Enabled {
|
||||
encPartSize, err := sio.EncryptedSize(partInfo.Size)
|
||||
encPartSize, err := sio.EncryptedSize(uint64(partInfo.Size))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("compute encrypted size: %w", err)
|
||||
}
|
||||
|
@ -374,7 +402,6 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
|
||||
initMetadata := make(map[string]string, len(multipartInfo.Meta)+1)
|
||||
initMetadata[UploadCompletedParts] = completedPartsHeader.String()
|
||||
initMetadata[MultipartObjectSize] = strconv.FormatUint(multipartObjetSize, 10)
|
||||
|
||||
uploadData := &UploadData{
|
||||
TagSet: make(map[string]string),
|
||||
|
@ -394,39 +421,43 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
initMetadata[AttributeEncryptionAlgorithm] = encInfo.Algorithm
|
||||
initMetadata[AttributeHMACKey] = encInfo.HMACKey
|
||||
initMetadata[AttributeHMACSalt] = encInfo.HMACSalt
|
||||
initMetadata[AttributeDecryptedSize] = strconv.FormatUint(multipartObjetSize, 10)
|
||||
multipartObjetSize = encMultipartObjectSize
|
||||
initMetadata[AttributeDecryptedSize] = strconv.FormatInt(multipartObjetSize, 10)
|
||||
multipartObjetSize = int64(encMultipartObjectSize)
|
||||
}
|
||||
|
||||
partsData, err := json.Marshal(parts)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("marshal parst for combined object: %w", err)
|
||||
r := &multiObjectReader{
|
||||
ctx: ctx,
|
||||
layer: n,
|
||||
parts: parts,
|
||||
}
|
||||
|
||||
r.prm.bktInfo = p.Info.Bkt
|
||||
|
||||
extObjInfo, err := n.PutObject(ctx, &PutObjectParams{
|
||||
BktInfo: p.Info.Bkt,
|
||||
Object: p.Info.Key,
|
||||
Reader: bytes.NewReader(partsData),
|
||||
Header: initMetadata,
|
||||
Size: multipartObjetSize,
|
||||
Encryption: p.Info.Encryption,
|
||||
CopiesNumbers: multipartInfo.CopiesNumbers,
|
||||
BktInfo: p.Info.Bkt,
|
||||
Object: p.Info.Key,
|
||||
Reader: r,
|
||||
Header: initMetadata,
|
||||
Size: multipartObjetSize,
|
||||
Encryption: p.Info.Encryption,
|
||||
CopiesNumber: multipartInfo.CopiesNumber,
|
||||
})
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Error("could not put a completed object (multipart upload)",
|
||||
n.log.Error("could not put a completed object (multipart upload)",
|
||||
zap.String("uploadID", p.Info.UploadID),
|
||||
zap.String("uploadKey", p.Info.Key),
|
||||
zap.Error(err))
|
||||
|
||||
return nil, nil, s3errors.GetAPIError(s3errors.ErrInternalError)
|
||||
return nil, nil, errors.GetAPIError(errors.ErrInternalError)
|
||||
}
|
||||
|
||||
var addr oid.Address
|
||||
addr.SetContainer(p.Info.Bkt.CID)
|
||||
for _, partInfo := range partsInfo {
|
||||
if err = n.objectDelete(ctx, p.Info.Bkt, partInfo.OID); err != nil {
|
||||
n.reqLogger(ctx).Warn("could not delete upload part",
|
||||
zap.Stringer("cid", p.Info.Bkt.CID), zap.Stringer("oid", &partInfo.OID),
|
||||
n.log.Warn("could not delete upload part",
|
||||
zap.Stringer("object id", &partInfo.OID),
|
||||
zap.Stringer("bucket id", p.Info.Bkt.CID),
|
||||
zap.Error(err))
|
||||
}
|
||||
addr.SetObject(partInfo.OID)
|
||||
|
@ -504,7 +535,7 @@ func (n *layer) AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) e
|
|||
|
||||
for _, info := range parts {
|
||||
if err = n.objectDelete(ctx, p.Bkt, info.OID); err != nil {
|
||||
n.reqLogger(ctx).Warn("couldn't delete part", zap.String("cid", p.Bkt.CID.EncodeToString()),
|
||||
n.log.Warn("couldn't delete part", zap.String("cid", p.Bkt.CID.EncodeToString()),
|
||||
zap.String("oid", info.OID.EncodeToString()), zap.Int("part number", info.Number), zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
@ -521,8 +552,8 @@ func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
|
|||
|
||||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
||||
if err = p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
|
||||
n.reqLogger(ctx).Warn("mismatched obj encryptionInfo", zap.Error(err))
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidEncryptionParameters)
|
||||
n.log.Warn("mismatched obj encryptionInfo", zap.Error(err))
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidEncryptionParameters)
|
||||
}
|
||||
|
||||
res.Owner = multipartInfo.Owner
|
||||
|
@ -565,8 +596,8 @@ func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
|
|||
func (n *layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.MultipartInfo, map[int]*data.PartInfo, error) {
|
||||
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Bkt, p.Key, p.UploadID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchUpload), err.Error())
|
||||
if stderrors.Is(err, ErrNodeNotFound) {
|
||||
return nil, nil, errors.GetAPIError(errors.ErrNoSuchUpload)
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -577,20 +608,10 @@ func (n *layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.
|
|||
}
|
||||
|
||||
res := make(map[int]*data.PartInfo, len(parts))
|
||||
partsNumbers := make([]int, len(parts))
|
||||
oids := make([]string, len(parts))
|
||||
for i, part := range parts {
|
||||
for _, part := range parts {
|
||||
res[part.Number] = part
|
||||
partsNumbers[i] = part.Number
|
||||
oids[i] = part.OID.EncodeToString()
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Debug("part details",
|
||||
zap.Stringer("cid", p.Bkt.CID),
|
||||
zap.String("upload id", p.UploadID),
|
||||
zap.Ints("part numbers", partsNumbers),
|
||||
zap.Strings("oids", oids))
|
||||
|
||||
return multipartInfo, res, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -7,21 +7,21 @@ import (
|
|||
"io"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/bearer"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/container"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/container/acl"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/eacl"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
||||
)
|
||||
|
||||
// PrmContainerCreate groups parameters of FrostFS.CreateContainer operation.
|
||||
// PrmContainerCreate groups parameters of NeoFS.CreateContainer operation.
|
||||
type PrmContainerCreate struct {
|
||||
// FrostFS identifier of the container creator.
|
||||
// NeoFS identifier of the container creator.
|
||||
Creator user.ID
|
||||
|
||||
// Container placement policy.
|
||||
|
@ -30,9 +30,6 @@ type PrmContainerCreate struct {
|
|||
// Name for the container.
|
||||
Name string
|
||||
|
||||
// CreationTime value for Timestamp attribute
|
||||
CreationTime time.Time
|
||||
|
||||
// Token of the container's creation session. Nil means session absence.
|
||||
SessionToken *session.Container
|
||||
|
||||
|
@ -43,7 +40,7 @@ type PrmContainerCreate struct {
|
|||
AdditionalAttributes [][2]string
|
||||
}
|
||||
|
||||
// PrmAuth groups authentication parameters for the FrostFS operation.
|
||||
// PrmAuth groups authentication parameters for the NeoFS operation.
|
||||
type PrmAuth struct {
|
||||
// Bearer token to be used for the operation. Overlaps PrivateKey. Optional.
|
||||
BearerToken *bearer.Token
|
||||
|
@ -52,7 +49,7 @@ type PrmAuth struct {
|
|||
PrivateKey *ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
// PrmObjectRead groups parameters of FrostFS.ReadObject operation.
|
||||
// PrmObjectRead groups parameters of NeoFS.ReadObject operation.
|
||||
type PrmObjectRead struct {
|
||||
// Authentication parameters.
|
||||
PrmAuth
|
||||
|
@ -73,7 +70,7 @@ type PrmObjectRead struct {
|
|||
PayloadRange [2]uint64
|
||||
}
|
||||
|
||||
// ObjectPart represents partially read FrostFS object.
|
||||
// ObjectPart represents partially read NeoFS object.
|
||||
type ObjectPart struct {
|
||||
// Object header with optional in-memory payload part.
|
||||
Head *object.Object
|
||||
|
@ -83,7 +80,7 @@ type ObjectPart struct {
|
|||
Payload io.ReadCloser
|
||||
}
|
||||
|
||||
// PrmObjectCreate groups parameters of FrostFS.CreateObject operation.
|
||||
// PrmObjectCreate groups parameters of NeoFS.CreateObject operation.
|
||||
type PrmObjectCreate struct {
|
||||
// Authentication parameters.
|
||||
PrmAuth
|
||||
|
@ -91,15 +88,12 @@ type PrmObjectCreate struct {
|
|||
// Container to store the object.
|
||||
Container cid.ID
|
||||
|
||||
// FrostFS identifier of the object creator.
|
||||
// NeoFS identifier of the object creator.
|
||||
Creator user.ID
|
||||
|
||||
// Key-value object attributes.
|
||||
Attributes [][2]string
|
||||
|
||||
// Value for Timestamp attribute (optional).
|
||||
CreationTime time.Time
|
||||
|
||||
// List of ids to lock (optional).
|
||||
Locks []oid.ID
|
||||
|
||||
|
@ -113,10 +107,10 @@ type PrmObjectCreate struct {
|
|||
Payload io.Reader
|
||||
|
||||
// Number of object copies that is enough to consider put successful.
|
||||
CopiesNumber []uint32
|
||||
CopiesNumber uint32
|
||||
}
|
||||
|
||||
// PrmObjectDelete groups parameters of FrostFS.DeleteObject operation.
|
||||
// PrmObjectDelete groups parameters of NeoFS.DeleteObject operation.
|
||||
type PrmObjectDelete struct {
|
||||
// Authentication parameters.
|
||||
PrmAuth
|
||||
|
@ -128,33 +122,12 @@ type PrmObjectDelete struct {
|
|||
Object oid.ID
|
||||
}
|
||||
|
||||
// PrmObjectSearch groups parameters of FrostFS.sear SearchObjects operation.
|
||||
type PrmObjectSearch struct {
|
||||
// Authentication parameters.
|
||||
PrmAuth
|
||||
// ErrAccessDenied is returned from NeoFS in case of access violation.
|
||||
var ErrAccessDenied = errors.New("access denied")
|
||||
|
||||
// Container to select the objects from.
|
||||
Container cid.ID
|
||||
|
||||
// Key-value object attribute which should be
|
||||
// presented in selected objects. Optional, empty key means any.
|
||||
ExactAttribute [2]string
|
||||
|
||||
// File prefix of the selected objects. Optional, empty value means any.
|
||||
FilePrefix string
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrAccessDenied is returned from FrostFS in case of access violation.
|
||||
ErrAccessDenied = errors.New("access denied")
|
||||
|
||||
// ErrGatewayTimeout is returned from FrostFS in case of timeout, deadline exceeded etc.
|
||||
ErrGatewayTimeout = errors.New("gateway timeout")
|
||||
)
|
||||
|
||||
// FrostFS represents virtual connection to FrostFS network.
|
||||
type FrostFS interface {
|
||||
// CreateContainer creates and saves parameterized container in FrostFS.
|
||||
// NeoFS represents virtual connection to NeoFS network.
|
||||
type NeoFS interface {
|
||||
// CreateContainer creates and saves parameterized container in NeoFS.
|
||||
// It sets 'Timestamp' attribute to the current time.
|
||||
// It returns the ID of the saved container.
|
||||
//
|
||||
|
@ -164,7 +137,7 @@ type FrostFS interface {
|
|||
// prevented the container from being created.
|
||||
CreateContainer(context.Context, PrmContainerCreate) (cid.ID, error)
|
||||
|
||||
// Container reads a container from FrostFS by ID.
|
||||
// Container reads a container from NeoFS by ID.
|
||||
//
|
||||
// It returns exactly one non-nil value. It returns any error encountered which
|
||||
// prevented the container from being read.
|
||||
|
@ -176,26 +149,26 @@ type FrostFS interface {
|
|||
// prevented the containers from being listed.
|
||||
UserContainers(context.Context, user.ID) ([]cid.ID, error)
|
||||
|
||||
// SetContainerEACL saves the eACL table of the container in FrostFS. The
|
||||
// SetContainerEACL saves the eACL table of the container in NeoFS. The
|
||||
// extended ACL is modified within session if session token is not nil.
|
||||
//
|
||||
// It returns any error encountered which prevented the eACL from being saved.
|
||||
SetContainerEACL(context.Context, eacl.Table, *session.Container) error
|
||||
|
||||
// ContainerEACL reads the container eACL from FrostFS by the container ID.
|
||||
// ContainerEACL reads the container eACL from NeoFS by the container ID.
|
||||
//
|
||||
// It returns exactly one non-nil value. It returns any error encountered which
|
||||
// prevented the eACL from being read.
|
||||
ContainerEACL(context.Context, cid.ID) (*eacl.Table, error)
|
||||
|
||||
// DeleteContainer marks the container to be removed from FrostFS by ID.
|
||||
// DeleteContainer marks the container to be removed from NeoFS by ID.
|
||||
// Request is sent within session if the session token is specified.
|
||||
// Successful return does not guarantee actual removal.
|
||||
//
|
||||
// It returns any error encountered which prevented the removal request from being sent.
|
||||
DeleteContainer(context.Context, cid.ID, *session.Container) error
|
||||
|
||||
// ReadObject reads a part of the object from the FrostFS container by identifier.
|
||||
// ReadObject reads a part of the object from the NeoFS container by identifier.
|
||||
// Exact part is returned according to the parameters:
|
||||
// * with header only: empty payload (both in-mem and reader parts are nil);
|
||||
// * with payload only: header is nil (zero range means full payload);
|
||||
|
@ -211,7 +184,7 @@ type FrostFS interface {
|
|||
// prevented the object header from being read.
|
||||
ReadObject(context.Context, PrmObjectRead) (*ObjectPart, error)
|
||||
|
||||
// CreateObject creates and saves a parameterized object in the FrostFS container.
|
||||
// CreateObject creates and saves a parameterized object in the NeoFS container.
|
||||
// It sets 'Timestamp' attribute to the current time.
|
||||
// It returns the ID of the saved object.
|
||||
//
|
||||
|
@ -223,7 +196,7 @@ type FrostFS interface {
|
|||
// prevented the container from being created.
|
||||
CreateObject(context.Context, PrmObjectCreate) (oid.ID, error)
|
||||
|
||||
// DeleteObject marks the object to be removed from the FrostFS container by identifier.
|
||||
// DeleteObject marks the object to be removed from the NeoFS container by identifier.
|
||||
// Successful return does not guarantee actual removal.
|
||||
//
|
||||
// It returns ErrAccessDenied on remove access violation.
|
||||
|
@ -231,20 +204,11 @@ type FrostFS interface {
|
|||
// It returns any error encountered which prevented the removal request from being sent.
|
||||
DeleteObject(context.Context, PrmObjectDelete) error
|
||||
|
||||
// SearchObjects performs object search from the NeoFS container according
|
||||
// to the specified parameters. It searches user's objects only.
|
||||
//
|
||||
// It returns ErrAccessDenied on selection access violation.
|
||||
//
|
||||
// It returns exactly one non-nil value. It returns any error encountered which
|
||||
// prevented the objects from being selected.
|
||||
SearchObjects(context.Context, PrmObjectSearch) ([]oid.ID, error)
|
||||
|
||||
// TimeToEpoch computes current epoch and the epoch that corresponds to the provided now and future time.
|
||||
// TimeToEpoch computes current epoch and the epoch that corresponds to the provided time.
|
||||
// Note:
|
||||
// * future time must be after the now
|
||||
// * future time will be ceil rounded to match epoch
|
||||
// * time must be in the future
|
||||
// * time will be ceil rounded to match epoch
|
||||
//
|
||||
// It returns any error encountered which prevented computing epochs.
|
||||
TimeToEpoch(ctx context.Context, now time.Time, future time.Time) (uint64, uint64, error)
|
||||
TimeToEpoch(context.Context, time.Time) (uint64, uint64, error)
|
||||
}
|
|
@ -10,53 +10,42 @@ import (
|
|||
"io"
|
||||
"time"
|
||||
|
||||
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
objectv2 "github.com/nspcc-dev/neofs-api-go/v2/object"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/bearer"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/checksum"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/container"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/eacl"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
||||
)
|
||||
|
||||
type TestFrostFS struct {
|
||||
FrostFS
|
||||
type TestNeoFS struct {
|
||||
NeoFS
|
||||
|
||||
objects map[string]*object.Object
|
||||
objectErrors map[string]error
|
||||
containers map[string]*container.Container
|
||||
eaclTables map[string]*eacl.Table
|
||||
currentEpoch uint64
|
||||
}
|
||||
|
||||
func NewTestFrostFS() *TestFrostFS {
|
||||
return &TestFrostFS{
|
||||
objects: make(map[string]*object.Object),
|
||||
objectErrors: make(map[string]error),
|
||||
containers: make(map[string]*container.Container),
|
||||
eaclTables: make(map[string]*eacl.Table),
|
||||
func NewTestNeoFS() *TestNeoFS {
|
||||
return &TestNeoFS{
|
||||
objects: make(map[string]*object.Object),
|
||||
containers: make(map[string]*container.Container),
|
||||
eaclTables: make(map[string]*eacl.Table),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) CurrentEpoch() uint64 {
|
||||
func (t *TestNeoFS) CurrentEpoch() uint64 {
|
||||
return t.currentEpoch
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) SetObjectError(addr oid.Address, err error) {
|
||||
if err == nil {
|
||||
delete(t.objectErrors, addr.EncodeToString())
|
||||
} else {
|
||||
t.objectErrors[addr.EncodeToString()] = err
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) Objects() []*object.Object {
|
||||
func (t *TestNeoFS) Objects() []*object.Object {
|
||||
res := make([]*object.Object, 0, len(t.objects))
|
||||
|
||||
for _, obj := range t.objects {
|
||||
|
@ -66,21 +55,11 @@ func (t *TestFrostFS) Objects() []*object.Object {
|
|||
return res
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) ObjectExists(objID oid.ID) bool {
|
||||
for _, obj := range t.objects {
|
||||
if id, _ := obj.ID(); id.Equals(objID) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) AddObject(key string, obj *object.Object) {
|
||||
func (t *TestNeoFS) AddObject(key string, obj *object.Object) {
|
||||
t.objects[key] = obj
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) ContainerID(name string) (cid.ID, error) {
|
||||
func (t *TestNeoFS) ContainerID(name string) (cid.ID, error) {
|
||||
for id, cnr := range t.containers {
|
||||
if container.Name(*cnr) == name {
|
||||
var cnrID cid.ID
|
||||
|
@ -90,18 +69,13 @@ func (t *TestFrostFS) ContainerID(name string) (cid.ID, error) {
|
|||
return cid.ID{}, fmt.Errorf("not found")
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) CreateContainer(_ context.Context, prm PrmContainerCreate) (cid.ID, error) {
|
||||
func (t *TestNeoFS) CreateContainer(_ context.Context, prm PrmContainerCreate) (cid.ID, error) {
|
||||
var cnr container.Container
|
||||
cnr.Init()
|
||||
cnr.SetOwner(prm.Creator)
|
||||
cnr.SetPlacementPolicy(prm.Policy)
|
||||
cnr.SetBasicACL(prm.BasicACL)
|
||||
|
||||
creationTime := prm.CreationTime
|
||||
if creationTime.IsZero() {
|
||||
creationTime = time.Now()
|
||||
}
|
||||
container.SetCreationTime(&cnr, creationTime)
|
||||
container.SetCreationTime(&cnr, time.Now())
|
||||
|
||||
if prm.Name != "" {
|
||||
var d container.Domain
|
||||
|
@ -127,13 +101,13 @@ func (t *TestFrostFS) CreateContainer(_ context.Context, prm PrmContainerCreate)
|
|||
return id, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) DeleteContainer(_ context.Context, cnrID cid.ID, _ *session.Container) error {
|
||||
func (t *TestNeoFS) DeleteContainer(_ context.Context, cnrID cid.ID, _ *session.Container) error {
|
||||
delete(t.containers, cnrID.EncodeToString())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) Container(_ context.Context, id cid.ID) (*container.Container, error) {
|
||||
func (t *TestNeoFS) Container(_ context.Context, id cid.ID) (*container.Container, error) {
|
||||
for k, v := range t.containers {
|
||||
if k == id.EncodeToString() {
|
||||
return v, nil
|
||||
|
@ -143,7 +117,7 @@ func (t *TestFrostFS) Container(_ context.Context, id cid.ID) (*container.Contai
|
|||
return nil, fmt.Errorf("container not found %s", id)
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) UserContainers(_ context.Context, _ user.ID) ([]cid.ID, error) {
|
||||
func (t *TestNeoFS) UserContainers(_ context.Context, _ user.ID) ([]cid.ID, error) {
|
||||
var res []cid.ID
|
||||
for k := range t.containers {
|
||||
var idCnr cid.ID
|
||||
|
@ -156,20 +130,16 @@ func (t *TestFrostFS) UserContainers(_ context.Context, _ user.ID) ([]cid.ID, er
|
|||
return res, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) ReadObject(ctx context.Context, prm PrmObjectRead) (*ObjectPart, error) {
|
||||
func (t *TestNeoFS) ReadObject(ctx context.Context, prm PrmObjectRead) (*ObjectPart, error) {
|
||||
var addr oid.Address
|
||||
addr.SetContainer(prm.Container)
|
||||
addr.SetObject(prm.Object)
|
||||
|
||||
sAddr := addr.EncodeToString()
|
||||
|
||||
if err := t.objectErrors[sAddr]; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if obj, ok := t.objects[sAddr]; ok {
|
||||
owner := getOwner(ctx)
|
||||
if !obj.OwnerID().Equals(owner) && !t.isPublicRead(prm.Container) {
|
||||
if !obj.OwnerID().Equals(owner) {
|
||||
return nil, ErrAccessDenied
|
||||
}
|
||||
|
||||
|
@ -186,10 +156,10 @@ func (t *TestFrostFS) ReadObject(ctx context.Context, prm PrmObjectRead) (*Objec
|
|||
}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("%w: %s", apistatus.ObjectNotFound{}, addr)
|
||||
return nil, fmt.Errorf("object not found %s", addr)
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.ID, error) {
|
||||
func (t *TestNeoFS) CreateObject(ctx context.Context, prm PrmObjectCreate) (oid.ID, error) {
|
||||
b := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, b); err != nil {
|
||||
return oid.ID{}, err
|
||||
|
@ -248,15 +218,11 @@ func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.
|
|||
return objID, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) DeleteObject(ctx context.Context, prm PrmObjectDelete) error {
|
||||
func (t *TestNeoFS) DeleteObject(ctx context.Context, prm PrmObjectDelete) error {
|
||||
var addr oid.Address
|
||||
addr.SetContainer(prm.Container)
|
||||
addr.SetObject(prm.Object)
|
||||
|
||||
if err := t.objectErrors[addr.EncodeToString()]; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if obj, ok := t.objects[addr.EncodeToString()]; ok {
|
||||
owner := getOwner(ctx)
|
||||
if !obj.OwnerID().Equals(owner) {
|
||||
|
@ -269,11 +235,11 @@ func (t *TestFrostFS) DeleteObject(ctx context.Context, prm PrmObjectDelete) err
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) TimeToEpoch(_ context.Context, now, futureTime time.Time) (uint64, uint64, error) {
|
||||
return t.currentEpoch, t.currentEpoch + uint64(futureTime.Sub(now).Seconds()), nil
|
||||
func (t *TestNeoFS) TimeToEpoch(_ context.Context, futureTime time.Time) (uint64, uint64, error) {
|
||||
return t.currentEpoch, t.currentEpoch + uint64(futureTime.Second()), nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) AllObjects(cnrID cid.ID) []oid.ID {
|
||||
func (t *TestNeoFS) AllObjects(cnrID cid.ID) []oid.ID {
|
||||
result := make([]oid.ID, 0, len(t.objects))
|
||||
|
||||
for _, val := range t.objects {
|
||||
|
@ -287,7 +253,7 @@ func (t *TestFrostFS) AllObjects(cnrID cid.ID) []oid.ID {
|
|||
return result
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) SetContainerEACL(_ context.Context, table eacl.Table, _ *session.Container) error {
|
||||
func (t *TestNeoFS) SetContainerEACL(_ context.Context, table eacl.Table, _ *session.Container) error {
|
||||
cnrID, ok := table.CID()
|
||||
if !ok {
|
||||
return errors.New("invalid cid")
|
||||
|
@ -302,7 +268,7 @@ func (t *TestFrostFS) SetContainerEACL(_ context.Context, table eacl.Table, _ *s
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) ContainerEACL(_ context.Context, cnrID cid.ID) (*eacl.Table, error) {
|
||||
func (t *TestNeoFS) ContainerEACL(_ context.Context, cnrID cid.ID) (*eacl.Table, error) {
|
||||
table, ok := t.eaclTables[cnrID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, errors.New("not found")
|
||||
|
@ -311,27 +277,8 @@ func (t *TestFrostFS) ContainerEACL(_ context.Context, cnrID cid.ID) (*eacl.Tabl
|
|||
return table, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) isPublicRead(cnrID cid.ID) bool {
|
||||
table, ok := t.eaclTables[cnrID.EncodeToString()]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, rec := range table.Records() {
|
||||
if rec.Operation() == eacl.OperationGet && len(rec.Filters()) == 0 {
|
||||
for _, trgt := range rec.Targets() {
|
||||
if trgt.Role() == eacl.RoleOthers {
|
||||
return rec.Action() == eacl.ActionAllow
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getOwner(ctx context.Context) user.ID {
|
||||
if bd, ok := ctx.Value(middleware.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
||||
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
||||
return bearer.ResolveIssuer(*bd.Gate.BearerToken)
|
||||
}
|
||||
|
|
@ -7,16 +7,16 @@ import (
|
|||
errorsStd "errors"
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type PutBucketNotificationConfigurationParams struct {
|
||||
RequestInfo *middleware.ReqInfo
|
||||
RequestInfo *api.ReqInfo
|
||||
BktInfo *data.BucketInfo
|
||||
Configuration *data.NotificationConfiguration
|
||||
CopiesNumbers []uint32
|
||||
CopiesNumber uint32
|
||||
}
|
||||
|
||||
func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBucketNotificationConfigurationParams) error {
|
||||
|
@ -30,11 +30,10 @@ func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBu
|
|||
Creator: p.BktInfo.Owner,
|
||||
Payload: bytes.NewReader(confXML),
|
||||
Filepath: p.BktInfo.NotificationConfigurationObjectName(),
|
||||
CreationTime: TimeNow(ctx),
|
||||
CopiesNumber: p.CopiesNumbers,
|
||||
CopiesNumber: p.CopiesNumber,
|
||||
}
|
||||
|
||||
_, objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -47,9 +46,10 @@ func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBu
|
|||
|
||||
if !objIDToDeleteNotFound {
|
||||
if err = n.objectDelete(ctx, p.BktInfo, objIDToDelete); err != nil {
|
||||
n.reqLogger(ctx).Error("couldn't delete notification configuration object", zap.Error(err),
|
||||
zap.String("cid", p.BktInfo.CID.EncodeToString()),
|
||||
zap.String("oid", objIDToDelete.EncodeToString()))
|
||||
n.log.Error("couldn't delete notification configuration object", zap.Error(err),
|
||||
zap.String("cnrID", p.BktInfo.CID.EncodeToString()),
|
||||
zap.String("bucket name", p.BktInfo.Name),
|
||||
zap.String("objID", objIDToDelete.EncodeToString()))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -14,16 +13,17 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/minio/sio"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/cache"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
apiErrors "github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/client"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"github.com/panjf2000/ants/v2"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -33,14 +33,6 @@ type (
|
|||
// payload range
|
||||
off, ln uint64
|
||||
|
||||
objInfo *data.ObjectInfo
|
||||
bktInfo *data.BucketInfo
|
||||
}
|
||||
|
||||
getFrostFSParams struct {
|
||||
// payload range
|
||||
off, ln uint64
|
||||
|
||||
oid oid.ID
|
||||
bktInfo *data.BucketInfo
|
||||
}
|
||||
|
@ -99,7 +91,7 @@ func (n *layer) objectHead(ctx context.Context, bktInfo *data.BucketInfo, idObj
|
|||
|
||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
||||
|
||||
res, err := n.frostFS.ReadObject(ctx, prm)
|
||||
res, err := n.neoFS.ReadObject(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -107,54 +99,9 @@ func (n *layer) objectHead(ctx context.Context, bktInfo *data.BucketInfo, idObj
|
|||
return res.Head, nil
|
||||
}
|
||||
|
||||
func (n *layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Reader, error) {
|
||||
if _, isCombined := p.objInfo.Headers[MultipartObjectSize]; !isCombined {
|
||||
return n.initFrostFSObjectPayloadReader(ctx, getFrostFSParams{
|
||||
off: p.off,
|
||||
ln: p.ln,
|
||||
oid: p.objInfo.ID,
|
||||
bktInfo: p.bktInfo,
|
||||
})
|
||||
}
|
||||
|
||||
combinedObj, err := n.objectGet(ctx, p.bktInfo, p.objInfo.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get combined object '%s': %w", p.objInfo.ID.EncodeToString(), err)
|
||||
}
|
||||
|
||||
var parts []*data.PartInfo
|
||||
if err = json.Unmarshal(combinedObj.Payload(), &parts); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal combined object parts: %w", err)
|
||||
}
|
||||
|
||||
isEncrypted := FormEncryptionInfo(p.objInfo.Headers).Enabled
|
||||
objParts := make([]partObj, len(parts))
|
||||
for i, part := range parts {
|
||||
size := part.Size
|
||||
if isEncrypted {
|
||||
if size, err = sio.EncryptedSize(part.Size); err != nil {
|
||||
return nil, fmt.Errorf("compute encrypted size: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
objParts[i] = partObj{
|
||||
OID: part.OID,
|
||||
Size: size,
|
||||
}
|
||||
}
|
||||
|
||||
return newMultiObjectReader(ctx, multiObjectReaderConfig{
|
||||
layer: n,
|
||||
off: p.off,
|
||||
ln: p.ln,
|
||||
parts: objParts,
|
||||
bktInfo: p.bktInfo,
|
||||
})
|
||||
}
|
||||
|
||||
// initializes payload reader of the FrostFS object.
|
||||
// initializes payload reader of the NeoFS object.
|
||||
// Zero range corresponds to full payload (panics if only offset is set).
|
||||
func (n *layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFSParams) (io.Reader, error) {
|
||||
func (n *layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Reader, error) {
|
||||
prm := PrmObjectRead{
|
||||
Container: p.bktInfo.CID,
|
||||
Object: p.oid,
|
||||
|
@ -164,7 +111,7 @@ func (n *layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFS
|
|||
|
||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, p.bktInfo.Owner)
|
||||
|
||||
res, err := n.frostFS.ReadObject(ctx, prm)
|
||||
res, err := n.neoFS.ReadObject(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -183,7 +130,7 @@ func (n *layer) objectGet(ctx context.Context, bktInfo *data.BucketInfo, objID o
|
|||
|
||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
||||
|
||||
res, err := n.frostFS.ReadObject(ctx, prm)
|
||||
res, err := n.neoFS.ReadObject(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -224,7 +171,7 @@ func ParseCompletedPartHeader(hdr string) (*Part, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid completed part number '%s': %w", partInfo[0], err)
|
||||
}
|
||||
size, err := strconv.ParseUint(partInfo[1], 10, 64)
|
||||
size, err := strconv.Atoi(partInfo[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid completed part size '%s': %w", partInfo[1], err)
|
||||
}
|
||||
|
@ -232,11 +179,11 @@ func ParseCompletedPartHeader(hdr string) (*Part, error) {
|
|||
return &Part{
|
||||
ETag: partInfo[2],
|
||||
PartNumber: num,
|
||||
Size: size,
|
||||
Size: int64(size),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PutObject stores object into FrostFS, took payload from io.Reader.
|
||||
// PutObject stores object into NeoFS, took payload from io.Reader.
|
||||
func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.ExtendedObjectInfo, error) {
|
||||
owner := n.Owner(ctx)
|
||||
|
||||
|
@ -245,18 +192,26 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
return nil, fmt.Errorf("couldn't get versioning settings object: %w", err)
|
||||
}
|
||||
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
FilePath: p.Object,
|
||||
Size: p.Size,
|
||||
},
|
||||
IsUnversioned: !bktSettings.VersioningEnabled(),
|
||||
}
|
||||
|
||||
r := p.Reader
|
||||
if p.Encryption.Enabled() {
|
||||
p.Header[AttributeDecryptedSize] = strconv.FormatUint(p.Size, 10)
|
||||
p.Header[AttributeDecryptedSize] = strconv.FormatInt(p.Size, 10)
|
||||
if err = addEncryptionHeaders(p.Header, p.Encryption); err != nil {
|
||||
return nil, fmt.Errorf("add encryption header: %w", err)
|
||||
}
|
||||
|
||||
var encSize uint64
|
||||
if r, encSize, err = encryptionReader(p.Reader, p.Size, p.Encryption.Key()); err != nil {
|
||||
if r, encSize, err = encryptionReader(p.Reader, uint64(p.Size), p.Encryption.Key()); err != nil {
|
||||
return nil, fmt.Errorf("create encrypter: %w", err)
|
||||
}
|
||||
p.Size = encSize
|
||||
p.Size = int64(encSize)
|
||||
}
|
||||
|
||||
if r != nil {
|
||||
|
@ -276,11 +231,10 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
prm := PrmObjectCreate{
|
||||
Container: p.BktInfo.CID,
|
||||
Creator: owner,
|
||||
PayloadSize: p.Size,
|
||||
PayloadSize: uint64(p.Size),
|
||||
Filepath: p.Object,
|
||||
Payload: r,
|
||||
CreationTime: TimeNow(ctx),
|
||||
CopiesNumber: p.CopiesNumbers,
|
||||
CopiesNumber: p.CopiesNumber,
|
||||
}
|
||||
|
||||
prm.Attributes = make([][2]string, 0, len(p.Header))
|
||||
|
@ -289,24 +243,13 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
prm.Attributes = append(prm.Attributes, [2]string{k, v})
|
||||
}
|
||||
|
||||
size, id, hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
id, hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Debug("put object", zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
||||
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
OID: id,
|
||||
ETag: hex.EncodeToString(hash),
|
||||
FilePath: p.Object,
|
||||
Size: size,
|
||||
},
|
||||
IsUnversioned: !bktSettings.VersioningEnabled(),
|
||||
IsCombined: p.Header[MultipartObjectSize] != "",
|
||||
}
|
||||
|
||||
newVersion.OID = id
|
||||
newVersion.ETag = hex.EncodeToString(hash)
|
||||
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
|
||||
return nil, fmt.Errorf("couldn't add new verion to tree service: %w", err)
|
||||
}
|
||||
|
@ -318,9 +261,9 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
ObjectName: p.Object,
|
||||
VersionID: id.EncodeToString(),
|
||||
},
|
||||
NewLock: p.Lock,
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
NodeVersion: newVersion, // provide new version to make one less tree service call in PutLockInfo
|
||||
NewLock: p.Lock,
|
||||
CopiesNumber: p.CopiesNumber,
|
||||
NodeVersion: newVersion, // provide new version to make one less tree service call in PutLockInfo
|
||||
}
|
||||
|
||||
if err = n.PutLockInfo(ctx, putLockInfoPrms); err != nil {
|
||||
|
@ -337,8 +280,8 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
Owner: owner,
|
||||
Bucket: p.BktInfo.Name,
|
||||
Name: p.Object,
|
||||
Size: size,
|
||||
Created: prm.CreationTime,
|
||||
Size: p.Size,
|
||||
Created: time.Now(),
|
||||
Headers: p.Header,
|
||||
ContentType: p.Header[api.ContentType],
|
||||
HashSum: newVersion.ETag,
|
||||
|
@ -363,20 +306,17 @@ func (n *layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.Bucke
|
|||
node, err := n.treeService.GetLatestVersion(ctx, bkt, objectName)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error())
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchKey)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if node.IsDeleteMarker() {
|
||||
return nil, fmt.Errorf("%w: found version is delete marker", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey))
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
meta, err := n.objectHead(ctx, bkt, node.OID)
|
||||
if err != nil {
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
objInfo := objectInfoFromMeta(bkt, meta)
|
||||
|
@ -398,7 +338,7 @@ func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
|||
foundVersion, err = n.treeService.GetUnversioned(ctx, bkt, p.Object)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion), err.Error())
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -415,7 +355,7 @@ func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
|||
}
|
||||
}
|
||||
if foundVersion == nil {
|
||||
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion))
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -427,7 +367,7 @@ func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
|||
meta, err := n.objectHead(ctx, bkt, foundVersion.OID)
|
||||
if err != nil {
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion), err.Error())
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -443,7 +383,7 @@ func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
|||
return extObjInfo, nil
|
||||
}
|
||||
|
||||
// objectDelete puts tombstone object into frostfs.
|
||||
// objectDelete puts tombstone object into neofs.
|
||||
func (n *layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) error {
|
||||
prm := PrmObjectDelete{
|
||||
Container: bktInfo.CID,
|
||||
|
@ -454,24 +394,22 @@ func (n *layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idOb
|
|||
|
||||
n.cache.DeleteObject(newAddress(bktInfo.CID, idObj))
|
||||
|
||||
return n.frostFS.DeleteObject(ctx, prm)
|
||||
return n.neoFS.DeleteObject(ctx, prm)
|
||||
}
|
||||
|
||||
// objectPutAndHash prepare auth parameters and invoke frostfs.CreateObject.
|
||||
// objectPutAndHash prepare auth parameters and invoke neofs.CreateObject.
|
||||
// Returns object ID and payload sha256 hash.
|
||||
func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (uint64, oid.ID, []byte, error) {
|
||||
func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (oid.ID, []byte, error) {
|
||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
||||
var size uint64
|
||||
hash := sha256.New()
|
||||
prm.Payload = wrapReader(prm.Payload, 64*1024, func(buf []byte) {
|
||||
size += uint64(len(buf))
|
||||
hash.Write(buf)
|
||||
})
|
||||
id, err := n.frostFS.CreateObject(ctx, prm)
|
||||
id, err := n.neoFS.CreateObject(ctx, prm)
|
||||
if err != nil {
|
||||
return 0, oid.ID{}, nil, err
|
||||
return oid.ID{}, nil, err
|
||||
}
|
||||
return size, id, hash.Sum(nil), nil
|
||||
return id, hash.Sum(nil), nil
|
||||
}
|
||||
|
||||
// ListObjectsV1 returns objects in a bucket for requests of Version 1.
|
||||
|
@ -616,8 +554,7 @@ func nodesGenerator(ctx context.Context, p allObjectParams, nodeVersions []*data
|
|||
}
|
||||
|
||||
func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams, input <-chan *data.NodeVersion) (<-chan *data.ObjectInfo, error) {
|
||||
reqLog := n.reqLogger(ctx)
|
||||
pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{reqLog}))
|
||||
pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{n.log}))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("coudln't init go pool for listing: %w", err)
|
||||
}
|
||||
|
@ -640,12 +577,12 @@ func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams,
|
|||
wg.Add(1)
|
||||
err = pool.Submit(func() {
|
||||
defer wg.Done()
|
||||
oi := n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter)
|
||||
oi := n.objectInfoFromObjectsCacheOrNeoFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter)
|
||||
if oi == nil {
|
||||
// try to get object again
|
||||
if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter); oi == nil {
|
||||
// do not process object which are definitely missing in object service
|
||||
return
|
||||
if oi = n.objectInfoFromObjectsCacheOrNeoFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter); oi == nil {
|
||||
// form object info with data that the tree node contains
|
||||
oi = getPartialObjectInfo(p.Bucket, node)
|
||||
}
|
||||
}
|
||||
select {
|
||||
|
@ -655,7 +592,7 @@ func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams,
|
|||
})
|
||||
if err != nil {
|
||||
wg.Done()
|
||||
reqLog.Warn("failed to submit task to pool", zap.Error(err))
|
||||
n.log.Warn("failed to submit task to pool", zap.Error(err))
|
||||
}
|
||||
}(node)
|
||||
}
|
||||
|
@ -667,6 +604,18 @@ func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams,
|
|||
return objCh, nil
|
||||
}
|
||||
|
||||
// getPartialObjectInfo form data.ObjectInfo using data available in data.NodeVersion.
|
||||
func getPartialObjectInfo(bktInfo *data.BucketInfo, node *data.NodeVersion) *data.ObjectInfo {
|
||||
return &data.ObjectInfo{
|
||||
ID: node.OID,
|
||||
CID: bktInfo.CID,
|
||||
Bucket: bktInfo.Name,
|
||||
Name: node.FilePath,
|
||||
Size: node.Size,
|
||||
HashSum: node.ETag,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *layer) bucketNodeVersions(ctx context.Context, bkt *data.BucketInfo, prefix string) ([]*data.NodeVersion, error) {
|
||||
var err error
|
||||
|
||||
|
@ -697,14 +646,14 @@ func (n *layer) getAllObjectsVersions(ctx context.Context, bkt *data.BucketInfo,
|
|||
for _, nodeVersion := range nodeVersions {
|
||||
oi := &data.ObjectInfo{}
|
||||
|
||||
if nodeVersion.IsDeleteMarker() { // delete marker does not match any object in FrostFS
|
||||
if nodeVersion.IsDeleteMarker() { // delete marker does not match any object in NeoFS
|
||||
oi.ID = nodeVersion.OID
|
||||
oi.Name = nodeVersion.FilePath
|
||||
oi.Owner = nodeVersion.DeleteMarker.Owner
|
||||
oi.Created = nodeVersion.DeleteMarker.Created
|
||||
oi.IsDeleteMarker = true
|
||||
} else {
|
||||
if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, bkt, nodeVersion, prefix, delimiter); oi == nil {
|
||||
if oi = n.objectInfoFromObjectsCacheOrNeoFS(ctx, bkt, nodeVersion, prefix, delimiter); oi == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -728,7 +677,7 @@ func (n *layer) getAllObjectsVersions(ctx context.Context, bkt *data.BucketInfo,
|
|||
|
||||
func IsSystemHeader(key string) bool {
|
||||
_, ok := api.SystemMetadata[key]
|
||||
return ok || strings.HasPrefix(key, api.FrostFSSystemMetadataPrefix)
|
||||
return ok || strings.HasPrefix(key, api.NeoFSSystemMetadataPrefix)
|
||||
}
|
||||
|
||||
func shouldSkip(node *data.NodeVersion, p allObjectParams, existed map[string]struct{}) bool {
|
||||
|
@ -785,7 +734,7 @@ func triageExtendedObjects(allObjects []*data.ExtendedObjectInfo) (prefixes []st
|
|||
return
|
||||
}
|
||||
|
||||
func (n *layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo *data.BucketInfo, node *data.NodeVersion, prefix, delimiter string) (oi *data.ObjectInfo) {
|
||||
func (n *layer) objectInfoFromObjectsCacheOrNeoFS(ctx context.Context, bktInfo *data.BucketInfo, node *data.NodeVersion, prefix, delimiter string) (oi *data.ObjectInfo) {
|
||||
if oiDir := tryDirectory(bktInfo, node, prefix, delimiter); oiDir != nil {
|
||||
return oiDir
|
||||
}
|
||||
|
@ -797,7 +746,7 @@ func (n *layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo
|
|||
|
||||
meta, err := n.objectHead(ctx, bktInfo, node.OID)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Warn("could not fetch object meta", zap.Error(err))
|
||||
n.log.Warn("could not fetch object meta", zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -9,21 +9,21 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
const (
|
||||
AttributeComplianceMode = ".s3-compliance-mode"
|
||||
AttributeComplianceMode = ".s3-compliance-mode"
|
||||
AttributeExpirationEpoch = "__NEOFS__EXPIRATION_EPOCH"
|
||||
)
|
||||
|
||||
type PutLockInfoParams struct {
|
||||
ObjVersion *ObjectVersion
|
||||
NewLock *data.ObjectLock
|
||||
CopiesNumbers []uint32
|
||||
NodeVersion *data.NodeVersion // optional
|
||||
ObjVersion *ObjectVersion
|
||||
NewLock *data.ObjectLock
|
||||
CopiesNumber uint32
|
||||
NodeVersion *data.NodeVersion // optional
|
||||
}
|
||||
|
||||
func (n *layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err error) {
|
||||
|
@ -32,7 +32,7 @@ func (n *layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err erro
|
|||
// sometimes node version can be provided from executing context
|
||||
// if not, then receive node version from tree service
|
||||
if versionNode == nil {
|
||||
versionNode, err = n.getNodeVersionFromCacheOrFrostfs(ctx, p.ObjVersion)
|
||||
versionNode, err = n.getNodeVersionFromCacheOrNeofs(ctx, p.ObjVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ func (n *layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err erro
|
|||
}
|
||||
}
|
||||
lock := &data.ObjectLock{Retention: newLock.Retention}
|
||||
retentionOID, err := n.putLockObject(ctx, p.ObjVersion.BktInfo, versionNode.OID, lock, p.CopiesNumbers)
|
||||
retentionOID, err := n.putLockObject(ctx, p.ObjVersion.BktInfo, versionNode.OID, lock, p.CopiesNumber)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ func (n *layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err erro
|
|||
if newLock.LegalHold != nil {
|
||||
if newLock.LegalHold.Enabled && !lockInfo.IsLegalHoldSet() {
|
||||
lock := &data.ObjectLock{LegalHold: newLock.LegalHold}
|
||||
legalHoldOID, err := n.putLockObject(ctx, p.ObjVersion.BktInfo, versionNode.OID, lock, p.CopiesNumbers)
|
||||
legalHoldOID, err := n.putLockObject(ctx, p.ObjVersion.BktInfo, versionNode.OID, lock, p.CopiesNumber)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ func (n *layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err erro
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *layer) getNodeVersionFromCacheOrFrostfs(ctx context.Context, objVersion *ObjectVersion) (nodeVersion *data.NodeVersion, err error) {
|
||||
func (n *layer) getNodeVersionFromCacheOrNeofs(ctx context.Context, objVersion *ObjectVersion) (nodeVersion *data.NodeVersion, err error) {
|
||||
// check cache if node version is stored inside extendedObjectVersion
|
||||
nodeVersion = n.getNodeVersionFromCache(n.Owner(ctx), objVersion)
|
||||
if nodeVersion == nil {
|
||||
|
@ -111,12 +111,11 @@ func (n *layer) getNodeVersionFromCacheOrFrostfs(ctx context.Context, objVersion
|
|||
return nodeVersion, nil
|
||||
}
|
||||
|
||||
func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, lock *data.ObjectLock, copiesNumber []uint32) (oid.ID, error) {
|
||||
func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, lock *data.ObjectLock, copiesNumber uint32) (oid.ID, error) {
|
||||
prm := PrmObjectCreate{
|
||||
Container: bktInfo.CID,
|
||||
Creator: bktInfo.Owner,
|
||||
Locks: []oid.ID{objID},
|
||||
CreationTime: TimeNow(ctx),
|
||||
CopiesNumber: copiesNumber,
|
||||
}
|
||||
|
||||
|
@ -126,7 +125,7 @@ func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, obj
|
|||
return oid.ID{}, err
|
||||
}
|
||||
|
||||
_, id, _, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
id, _, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
return id, err
|
||||
}
|
||||
|
||||
|
@ -167,7 +166,7 @@ func (n *layer) getCORS(ctx context.Context, bkt *data.BucketInfo) (*data.CORSCo
|
|||
}
|
||||
|
||||
if objIDNotFound {
|
||||
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchCORSConfiguration), err.Error())
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchCORSConfiguration)
|
||||
}
|
||||
|
||||
obj, err := n.objectGet(ctx, bkt, objID)
|
||||
|
@ -228,7 +227,7 @@ func (n *layer) attributesFromLock(ctx context.Context, lock *data.ObjectLock) (
|
|||
)
|
||||
|
||||
if lock.Retention != nil {
|
||||
if _, expEpoch, err = n.frostFS.TimeToEpoch(ctx, TimeNow(ctx), lock.Retention.Until); err != nil {
|
||||
if _, expEpoch, err = n.neoFS.TimeToEpoch(ctx, lock.Retention.Until); err != nil {
|
||||
return nil, fmt.Errorf("fetch time to epoch: %w", err)
|
||||
}
|
||||
|
||||
|
@ -238,7 +237,7 @@ func (n *layer) attributesFromLock(ctx context.Context, lock *data.ObjectLock) (
|
|||
}
|
||||
|
||||
if lock.LegalHold != nil && lock.LegalHold.Enabled {
|
||||
// todo: (@KirillovDenis) reconsider this when FrostFS will support Legal Hold https://git.frostfs.info/TrueCloudLab/frostfs-contract/issues/2
|
||||
// todo: (@KirillovDenis) reconsider this when NeoFS will support Legal Hold https://github.com/nspcc-dev/neofs-contract/issues/247
|
||||
// Currently lock object must have an expiration epoch.
|
||||
// Besides we need to override retention expiration epoch since legal hold cannot be deleted yet.
|
||||
expEpoch = math.MaxUint64
|
||||
|
@ -246,7 +245,7 @@ func (n *layer) attributesFromLock(ctx context.Context, lock *data.ObjectLock) (
|
|||
|
||||
if expEpoch != 0 {
|
||||
result = append(result, [2]string{
|
||||
object.SysAttributeExpEpoch, strconv.FormatUint(expEpoch, 10),
|
||||
AttributeExpirationEpoch, strconv.FormatUint(expEpoch, 10),
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -2,15 +2,13 @@ package layer
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
errorsStd "errors"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"go.uber.org/zap"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
||||
)
|
||||
|
||||
type GetObjectTaggingParams struct {
|
||||
|
@ -40,7 +38,7 @@ func (n *layer) GetObjectTagging(ctx context.Context, p *GetObjectTaggingParams)
|
|||
|
||||
nodeVersion := p.NodeVersion
|
||||
if nodeVersion == nil {
|
||||
nodeVersion, err = n.getNodeVersionFromCacheOrFrostfs(ctx, p.ObjectVersion)
|
||||
nodeVersion, err = n.getNodeVersionFromCacheOrNeofs(ctx, p.ObjectVersion)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
@ -53,8 +51,8 @@ func (n *layer) GetObjectTagging(ctx context.Context, p *GetObjectTaggingParams)
|
|||
|
||||
tags, err := n.treeService.GetObjectTagging(ctx, p.ObjectVersion.BktInfo, nodeVersion)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return "", nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
||||
return "", nil, errors.GetAPIError(errors.ErrNoSuchKey)
|
||||
}
|
||||
return "", nil, err
|
||||
}
|
||||
|
@ -67,7 +65,7 @@ func (n *layer) GetObjectTagging(ctx context.Context, p *GetObjectTaggingParams)
|
|||
func (n *layer) PutObjectTagging(ctx context.Context, p *PutObjectTaggingParams) (nodeVersion *data.NodeVersion, err error) {
|
||||
nodeVersion = p.NodeVersion
|
||||
if nodeVersion == nil {
|
||||
nodeVersion, err = n.getNodeVersionFromCacheOrFrostfs(ctx, p.ObjectVersion)
|
||||
nodeVersion, err = n.getNodeVersionFromCacheOrNeofs(ctx, p.ObjectVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -76,8 +74,8 @@ func (n *layer) PutObjectTagging(ctx context.Context, p *PutObjectTaggingParams)
|
|||
|
||||
err = n.treeService.PutObjectTagging(ctx, p.ObjectVersion.BktInfo, nodeVersion, p.TagSet)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchKey)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -95,8 +93,8 @@ func (n *layer) DeleteObjectTagging(ctx context.Context, p *ObjectVersion) (*dat
|
|||
|
||||
err = n.treeService.DeleteObjectTagging(ctx, p.BktInfo, version)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchKey)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -116,7 +114,7 @@ func (n *layer) GetBucketTagging(ctx context.Context, bktInfo *data.BucketInfo)
|
|||
}
|
||||
|
||||
tags, err := n.treeService.GetBucketTagging(ctx, bktInfo)
|
||||
if err != nil && !errors.Is(err, ErrNodeNotFound) {
|
||||
if err != nil && !errorsStd.Is(err, ErrNodeNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -169,19 +167,12 @@ func (n *layer) getNodeVersion(ctx context.Context, objVersion *ObjectVersion) (
|
|||
}
|
||||
}
|
||||
if version == nil {
|
||||
err = fmt.Errorf("%w: there isn't tree node with requested version id", s3errors.GetAPIError(s3errors.ErrNoSuchVersion))
|
||||
err = errors.GetAPIError(errors.ErrNoSuchVersion)
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil && version.IsDeleteMarker() && !objVersion.NoErrorOnDeleteMarker {
|
||||
return nil, fmt.Errorf("%w: found version is delete marker", s3errors.GetAPIError(s3errors.ErrNoSuchKey))
|
||||
} else if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
|
||||
if err == nil && version != nil && !version.IsDeleteMarker() {
|
||||
n.reqLogger(ctx).Debug("get tree node",
|
||||
zap.Stringer("cid", objVersion.BktInfo.CID), zap.Stringer("oid", version.OID))
|
||||
if err == nil && version.IsDeleteMarker() && !objVersion.NoErrorOnDeleteMarker || errorsStd.Is(err, ErrNodeNotFound) {
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
return version, err
|
||||
|
|
|
@ -6,8 +6,8 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
type TreeServiceMock struct {
|
||||
|
@ -59,17 +59,17 @@ func (t *TreeServiceMock) DeleteObjectTagging(_ context.Context, bktInfo *data.B
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetBucketTagging(context.Context, *data.BucketInfo) (map[string]string, error) {
|
||||
func (t *TreeServiceMock) GetBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) (map[string]string, error) {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) PutBucketTagging(context.Context, *data.BucketInfo, map[string]string) error {
|
||||
func (t *TreeServiceMock) PutBucketTagging(ctx context.Context, bktInfo *data.BucketInfo, tagSet map[string]string) error {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) DeleteBucketTagging(context.Context, *data.BucketInfo) error {
|
||||
func (t *TreeServiceMock) DeleteBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) error {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
@ -100,44 +100,23 @@ func (t *TreeServiceMock) GetSettingsNode(_ context.Context, bktInfo *data.Bucke
|
|||
return settings, nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetNotificationConfigurationNode(context.Context, *data.BucketInfo) (oid.ID, error) {
|
||||
func (t *TreeServiceMock) GetNotificationConfigurationNode(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) PutNotificationConfigurationNode(context.Context, *data.BucketInfo, oid.ID) (oid.ID, error) {
|
||||
func (t *TreeServiceMock) PutNotificationConfigurationNode(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetBucketCORS(_ context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
|
||||
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return oid.ID{}, nil
|
||||
}
|
||||
|
||||
node, ok := systemMap["cors"]
|
||||
if !ok {
|
||||
return oid.ID{}, nil
|
||||
}
|
||||
|
||||
return node.OID, nil
|
||||
func (t *TreeServiceMock) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) PutBucketCORS(_ context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
|
||||
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
systemMap = make(map[string]*data.BaseNodeVersion)
|
||||
}
|
||||
|
||||
systemMap["cors"] = &data.BaseNodeVersion{
|
||||
OID: objID,
|
||||
}
|
||||
|
||||
t.system[bktInfo.CID.EncodeToString()] = systemMap
|
||||
|
||||
return oid.ID{}, ErrNoNodeToRemove
|
||||
func (t *TreeServiceMock) PutBucketCORS(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) DeleteBucketCORS(context.Context, *data.BucketInfo) (oid.ID, error) {
|
||||
func (t *TreeServiceMock) DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
@ -314,7 +293,7 @@ func (t *TreeServiceMock) CreateMultipartUpload(_ context.Context, bktInfo *data
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetMultipartUploadsByPrefix(context.Context, *data.BucketInfo, string) ([]*data.MultipartInfo, error) {
|
||||
func (t *TreeServiceMock) GetMultipartUploadsByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.MultipartInfo, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
@ -407,7 +386,7 @@ LOOP:
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) PutLock(_ context.Context, bktInfo *data.BucketInfo, nodeID uint64, lock *data.LockInfo) error {
|
||||
func (t *TreeServiceMock) PutLock(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64, lock *data.LockInfo) error {
|
||||
cnrLockMap, ok := t.locks[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
t.locks[bktInfo.CID.EncodeToString()] = map[uint64]*data.LockInfo{
|
||||
|
@ -421,7 +400,7 @@ func (t *TreeServiceMock) PutLock(_ context.Context, bktInfo *data.BucketInfo, n
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetLock(_ context.Context, bktInfo *data.BucketInfo, nodeID uint64) (*data.LockInfo, error) {
|
||||
func (t *TreeServiceMock) GetLock(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64) (*data.LockInfo, error) {
|
||||
cnrLockMap, ok := t.locks[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
|
|
|
@ -4,8 +4,8 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
// TreeService provide interface to interact with tree service using s3 data models.
|
||||
|
@ -24,7 +24,7 @@ type TreeService interface {
|
|||
GetNotificationConfigurationNode(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error)
|
||||
|
||||
// PutNotificationConfigurationNode puts a node to a system tree
|
||||
// and returns objectID of a previous notif config which must be deleted in FrostFS.
|
||||
// and returns objectID of a previous notif config which must be deleted in NeoFS.
|
||||
//
|
||||
// If object id to remove is not found returns ErrNoNodeToRemove error.
|
||||
PutNotificationConfigurationNode(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error)
|
||||
|
@ -34,12 +34,12 @@ type TreeService interface {
|
|||
// If object id is not found returns ErrNodeNotFound error.
|
||||
GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error)
|
||||
|
||||
// PutBucketCORS puts a node to a system tree and returns objectID of a previous cors config which must be deleted in FrostFS.
|
||||
// PutBucketCORS puts a node to a system tree and returns objectID of a previous cors config which must be deleted in NeoFS.
|
||||
//
|
||||
// If object id to remove is not found returns ErrNoNodeToRemove error.
|
||||
PutBucketCORS(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error)
|
||||
|
||||
// DeleteBucketCORS removes a node from a system tree and returns objID which must be deleted in FrostFS.
|
||||
// DeleteBucketCORS removes a node from a system tree and returns objID which must be deleted in NeoFS.
|
||||
//
|
||||
// If object id to remove is not found returns ErrNoNodeToRemove error.
|
||||
DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error)
|
||||
|
@ -69,7 +69,7 @@ type TreeService interface {
|
|||
GetMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error)
|
||||
|
||||
// AddPart puts a node to a system tree as a child of appropriate multipart upload
|
||||
// and returns objectID of a previous part which must be deleted in FrostFS.
|
||||
// and returns objectID of a previous part which must be deleted in NeoFS.
|
||||
//
|
||||
// If object id to remove is not found returns ErrNoNodeToRemove error.
|
||||
AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDToDelete oid.ID, err error)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue