Compare commits
No commits in common. "master" and "empty" have entirely different histories.
312 changed files with 2 additions and 65984 deletions
|
@ -1,22 +0,0 @@
|
||||||
FROM golang:1.22 AS builder
|
|
||||||
|
|
||||||
ARG BUILD=now
|
|
||||||
ARG REPO=git.frostfs.info/TrueCloudLab/frostfs-s3-gw
|
|
||||||
ARG VERSION=dev
|
|
||||||
ARG GOFLAGS=""
|
|
||||||
|
|
||||||
WORKDIR /src
|
|
||||||
COPY . /src
|
|
||||||
|
|
||||||
RUN make GOFLAGS=${GOFLAGS}
|
|
||||||
|
|
||||||
# Executable image
|
|
||||||
FROM alpine AS frostfs-s3-gw
|
|
||||||
RUN apk add --no-cache bash ca-certificates
|
|
||||||
|
|
||||||
WORKDIR /
|
|
||||||
|
|
||||||
COPY --from=builder /src/bin/frostfs-s3-gw /bin/frostfs-s3-gw
|
|
||||||
COPY --from=builder /src/bin/frostfs-s3-authmate /bin/frostfs-s3-authmate
|
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/frostfs-s3-gw"]
|
|
|
@ -1,9 +0,0 @@
|
||||||
FROM alpine AS frostfs-s3-gw
|
|
||||||
RUN apk add --no-cache bash ca-certificates
|
|
||||||
|
|
||||||
WORKDIR /
|
|
||||||
|
|
||||||
COPY /bin/frostfs-s3-gw /bin/frostfs-s3-gw
|
|
||||||
COPY /bin/frostfs-s3-authmate /bin/frostfs-s3-authmate
|
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/frostfs-s3-gw"]
|
|
|
@ -1,3 +0,0 @@
|
||||||
.git
|
|
||||||
.cache
|
|
||||||
.forgejo
|
|
|
@ -1,45 +0,0 @@
|
||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Create a report to help us improve
|
|
||||||
title: ''
|
|
||||||
labels: community, triage, bug
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--- Provide a general summary of the issue in the Title above -->
|
|
||||||
|
|
||||||
## Expected Behavior
|
|
||||||
<!--- If you're describing a bug, tell us what should happen -->
|
|
||||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
|
||||||
|
|
||||||
## Current Behavior
|
|
||||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
|
||||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
|
||||||
|
|
||||||
## Possible Solution
|
|
||||||
<!--- Not obligatory -->
|
|
||||||
<!--- If no reason/fix/additions for the bug can be suggested, -->
|
|
||||||
<!--- uncomment the following phrase: -->
|
|
||||||
|
|
||||||
<!--- No fix can be suggested by a QA engineer. Further solutions shall be up to developers. -->
|
|
||||||
|
|
||||||
## Steps to Reproduce (for bugs)
|
|
||||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
|
||||||
<!--- reproduce this bug. -->
|
|
||||||
|
|
||||||
1.
|
|
||||||
|
|
||||||
## Context
|
|
||||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
|
||||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
|
||||||
|
|
||||||
## Regression
|
|
||||||
<!-- Is this issue a regression? (Yes / No) -->
|
|
||||||
<!-- If Yes, optionally please include version or commit id or PR# that caused this regression, if you have these details. -->
|
|
||||||
|
|
||||||
## Your Environment
|
|
||||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
|
||||||
* Version used:
|
|
||||||
* Server setup and configuration:
|
|
||||||
* Operating System and version (`uname -a`):
|
|
|
@ -1 +0,0 @@
|
||||||
blank_issues_enabled: false
|
|
|
@ -1,20 +0,0 @@
|
||||||
---
|
|
||||||
name: Feature request
|
|
||||||
about: Suggest an idea for this project
|
|
||||||
title: ''
|
|
||||||
labels: community, triage
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Is your feature request related to a problem? Please describe.
|
|
||||||
<!--- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
|
|
||||||
|
|
||||||
## Describe the solution you'd like
|
|
||||||
<!--- A clear and concise description of what you want to happen. -->
|
|
||||||
|
|
||||||
## Describe alternatives you've considered
|
|
||||||
<!--- A clear and concise description of any alternative solutions or features you've considered. -->
|
|
||||||
|
|
||||||
## Additional context
|
|
||||||
<!--- Add any other context or screenshots about the feature request here. -->
|
|
|
@ -1,70 +0,0 @@
|
||||||
<?xml version="1.0" encoding="utf-8"?>
|
|
||||||
<!-- Generator: Adobe Illustrator 25.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
|
||||||
<svg version="1.1" id="Слой_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
|
||||||
viewBox="0 0 184.2 51.8" style="enable-background:new 0 0 184.2 51.8;" xml:space="preserve">
|
|
||||||
<style type="text/css">
|
|
||||||
.st0{display:none;}
|
|
||||||
.st1{display:inline;}
|
|
||||||
.st2{fill:#01E397;}
|
|
||||||
.st3{display:inline;fill:#010032;}
|
|
||||||
.st4{display:inline;fill:#00E599;}
|
|
||||||
.st5{display:inline;fill:#00AF92;}
|
|
||||||
.st6{fill:#00C3E5;}
|
|
||||||
</style>
|
|
||||||
<g id="Layer_2">
|
|
||||||
<g id="Layer_1-2" class="st0">
|
|
||||||
<g class="st1">
|
|
||||||
<path class="st2" d="M146.6,18.3v7.2h10.9V29h-10.9v10.7h-4V14.8h18v3.5H146.6z"/>
|
|
||||||
<path class="st2" d="M180,15.7c1.7,0.9,3,2.2,4,3.8l-3,2.7c-0.6-1.3-1.5-2.4-2.6-3.3c-1.3-0.7-2.8-1-4.3-1
|
|
||||||
c-1.4-0.1-2.8,0.3-4,1.1c-0.9,0.5-1.5,1.5-1.4,2.6c0,1,0.5,1.9,1.4,2.4c1.5,0.8,3.2,1.3,4.9,1.5c1.9,0.3,3.7,0.8,5.4,1.6
|
|
||||||
c1.2,0.5,2.2,1.3,2.9,2.3c0.6,1,1,2.2,0.9,3.4c0,1.4-0.5,2.7-1.3,3.8c-0.9,1.2-2.1,2.1-3.5,2.6c-1.7,0.6-3.4,0.9-5.2,0.8
|
|
||||||
c-5,0-8.6-1.6-10.7-5l2.9-2.8c0.7,1.4,1.8,2.5,3.1,3.3c1.5,0.7,3.1,1.1,4.7,1c1.5,0.1,2.9-0.2,4.2-0.9c0.9-0.5,1.5-1.5,1.5-2.6
|
|
||||||
c0-0.9-0.5-1.8-1.3-2.2c-1.5-0.7-3.1-1.2-4.8-1.5c-1.9-0.3-3.7-0.8-5.5-1.5c-1.2-0.5-2.2-1.4-3-2.4c-0.6-1-1-2.2-0.9-3.4
|
|
||||||
c0-1.4,0.4-2.7,1.2-3.8c0.8-1.2,2-2.2,3.3-2.8c1.6-0.7,3.4-1.1,5.2-1C176.1,14.3,178.2,14.8,180,15.7z"/>
|
|
||||||
</g>
|
|
||||||
<path class="st3" d="M73.3,16.3c1.9,1.9,2.9,4.5,2.7,7.1v15.9h-4V24.8c0-2.6-0.5-4.5-1.6-5.7c-1.2-1.2-2.8-1.8-4.5-1.7
|
|
||||||
c-1.3,0-2.5,0.3-3.7,0.8c-1.2,0.7-2.2,1.7-2.9,2.9c-0.8,1.5-1.1,3.2-1.1,4.9v13.3h-4V15.1l3.6,1.5v1.7c0.8-1.5,2.1-2.6,3.6-3.3
|
|
||||||
c1.5-0.8,3.2-1.2,4.9-1.1C68.9,13.8,71.3,14.7,73.3,16.3z"/>
|
|
||||||
<path class="st3" d="M104.4,28.3H85.6c0.1,2.2,1,4.3,2.5,5.9c1.5,1.4,3.5,2.2,5.6,2.1c1.6,0.1,3.2-0.2,4.6-0.9
|
|
||||||
c1.1-0.6,2-1.6,2.5-2.8l3.3,1.8c-0.9,1.7-2.3,3.1-4,4c-2,1-4.2,1.5-6.4,1.4c-3.7,0-6.7-1.1-8.8-3.4s-3.2-5.5-3.2-9.6s1-7.2,3-9.5
|
|
||||||
s5-3.4,8.7-3.4c2.1-0.1,4.2,0.5,6.1,1.5c1.6,1,3,2.5,3.8,4.2c0.9,1.8,1.3,3.9,1.3,5.9C104.6,26.4,104.6,27.4,104.4,28.3z
|
|
||||||
M88.1,19.3c-1.4,1.5-2.2,3.4-2.4,5.5h15.1c-0.2-2-1-3.9-2.3-5.5c-1.4-1.3-3.2-2-5.1-1.9C91.5,17.3,89.6,18,88.1,19.3z"/>
|
|
||||||
<path class="st3" d="M131,17.3c2.2,2.3,3.2,5.5,3.2,9.5s-1,7.3-3.2,9.6s-5.1,3.4-8.8,3.4s-6.7-1.1-8.9-3.4s-3.2-5.5-3.2-9.6
|
|
||||||
s1.1-7.2,3.2-9.5s5.1-3.4,8.9-3.4S128.9,15,131,17.3z M116.2,19.9c-1.5,2-2.2,4.4-2.1,6.9c-0.2,2.5,0.6,5,2.1,7
|
|
||||||
c1.5,1.7,3.7,2.7,6,2.6c2.3,0.1,4.4-0.9,5.9-2.6c1.5-2,2.3-4.5,2.1-7c0.1-2.5-0.6-4.9-2.1-6.9c-1.5-1.7-3.6-2.7-5.9-2.6
|
|
||||||
C119.9,17.2,117.7,18.2,116.2,19.9z"/>
|
|
||||||
<polygon class="st4" points="0,9.1 0,43.7 22.5,51.8 22.5,16.9 46.8,7.9 24.8,0 "/>
|
|
||||||
<polygon class="st5" points="24.3,17.9 24.3,36.8 46.8,44.9 46.8,9.6 "/>
|
|
||||||
</g>
|
|
||||||
<g>
|
|
||||||
<g>
|
|
||||||
<path class="st6" d="M41.6,17.5H28.2v6.9h10.4v3.3H28.2v10.2h-3.9V14.2h17.2V17.5z"/>
|
|
||||||
<path class="st6" d="M45.8,37.9v-18h3.3l0.4,3.2c0.5-1.2,1.2-2.1,2.1-2.7c0.9-0.6,2.1-0.9,3.5-0.9c0.4,0,0.7,0,1.1,0.1
|
|
||||||
c0.4,0.1,0.7,0.2,0.9,0.3l-0.5,3.4c-0.3-0.1-0.6-0.2-0.9-0.2C55.4,23,54.9,23,54.4,23c-0.7,0-1.5,0.2-2.2,0.6
|
|
||||||
c-0.7,0.4-1.3,1-1.8,1.8s-0.7,1.8-0.7,3v9.5H45.8z"/>
|
|
||||||
<path class="st6" d="M68.6,19.6c1.8,0,3.3,0.4,4.6,1.1c1.3,0.7,2.4,1.8,3.1,3.2s1.1,3.1,1.1,5c0,1.9-0.4,3.6-1.1,5
|
|
||||||
c-0.8,1.4-1.8,2.5-3.1,3.2c-1.3,0.7-2.9,1.1-4.6,1.1s-3.3-0.4-4.6-1.1c-1.3-0.7-2.4-1.8-3.2-3.2c-0.8-1.4-1.2-3.1-1.2-5
|
|
||||||
c0-1.9,0.4-3.6,1.2-5s1.8-2.5,3.2-3.2C65.3,19.9,66.8,19.6,68.6,19.6z M68.6,22.6c-1.1,0-2,0.2-2.8,0.7c-0.8,0.5-1.3,1.2-1.7,2.1
|
|
||||||
s-0.6,2.1-0.6,3.5c0,1.3,0.2,2.5,0.6,3.4s1,1.7,1.7,2.2s1.7,0.7,2.8,0.7c1.1,0,2-0.2,2.7-0.7c0.7-0.5,1.3-1.2,1.7-2.2
|
|
||||||
s0.6-2.1,0.6-3.4c0-1.4-0.2-2.5-0.6-3.5s-1-1.6-1.7-2.1C70.6,22.8,69.6,22.6,68.6,22.6z"/>
|
|
||||||
<path class="st6" d="M89.2,38.3c-1.8,0-3.4-0.3-4.9-1c-1.5-0.7-2.7-1.7-3.5-3l2.7-2.3c0.5,1,1.3,1.8,2.3,2.4
|
|
||||||
c1,0.6,2.2,0.9,3.6,0.9c1.1,0,2-0.2,2.6-0.6c0.6-0.4,1-0.9,1-1.6c0-0.5-0.2-0.9-0.5-1.2s-0.9-0.6-1.7-0.8l-3.8-0.8
|
|
||||||
c-1.9-0.4-3.3-1-4.1-1.9c-0.8-0.9-1.2-1.9-1.2-3.3c0-1,0.3-1.9,0.9-2.7c0.6-0.8,1.4-1.5,2.5-2s2.5-0.8,4-0.8c1.8,0,3.3,0.3,4.6,1
|
|
||||||
c1.3,0.6,2.2,1.5,2.9,2.7l-2.7,2.2c-0.5-1-1.1-1.7-2-2.1c-0.9-0.5-1.8-0.7-2.8-0.7c-0.8,0-1.4,0.1-2,0.3c-0.6,0.2-1,0.5-1.3,0.8
|
|
||||||
c-0.3,0.3-0.4,0.7-0.4,1.2c0,0.5,0.2,0.9,0.5,1.3s1,0.6,1.9,0.8l4.1,0.9c1.7,0.3,2.9,0.9,3.7,1.7c0.7,0.8,1.1,1.8,1.1,2.9
|
|
||||||
c0,1.2-0.3,2.2-0.9,3c-0.6,0.9-1.5,1.6-2.6,2C92.1,38.1,90.7,38.3,89.2,38.3z"/>
|
|
||||||
<path class="st6" d="M112.8,19.9v3H99.3v-3H112.8z M106.6,14.6v17.9c0,0.9,0.2,1.5,0.7,1.9c0.5,0.4,1.1,0.6,1.9,0.6
|
|
||||||
c0.6,0,1.2-0.1,1.7-0.3c0.5-0.2,0.9-0.5,1.3-0.8l0.9,2.8c-0.6,0.5-1.2,0.9-2,1.1c-0.8,0.3-1.7,0.4-2.7,0.4c-1,0-2-0.2-2.8-0.5
|
|
||||||
s-1.5-0.9-2-1.6c-0.5-0.8-0.7-1.7-0.8-3V15.7L106.6,14.6z"/>
|
|
||||||
<path d="M137.9,17.5h-13.3v6.9h10.4v3.3h-10.4v10.2h-3.9V14.2h17.2V17.5z"/>
|
|
||||||
<path d="M150.9,13.8c2.1,0,4,0.4,5.5,1.2c1.6,0.8,2.9,2,4,3.5l-2.6,2.5c-0.9-1.4-1.9-2.4-3.1-3c-1.1-0.6-2.5-0.9-4-0.9
|
|
||||||
c-1.2,0-2.1,0.2-2.8,0.5c-0.7,0.3-1.3,0.7-1.6,1.2c-0.3,0.5-0.5,1.1-0.5,1.7c0,0.7,0.3,1.4,0.8,1.9c0.5,0.6,1.5,1,2.9,1.3
|
|
||||||
l4.8,1.1c2.3,0.5,3.9,1.3,4.9,2.3c1,1,1.4,2.3,1.4,3.9c0,1.5-0.4,2.7-1.2,3.8c-0.8,1.1-1.9,1.9-3.3,2.5s-3.1,0.9-5,0.9
|
|
||||||
c-1.7,0-3.2-0.2-4.5-0.6c-1.3-0.4-2.5-1-3.5-1.8c-1-0.7-1.8-1.6-2.5-2.6l2.7-2.7c0.5,0.8,1.1,1.6,1.9,2.2
|
|
||||||
c0.8,0.7,1.7,1.2,2.7,1.5c1,0.4,2.2,0.5,3.4,0.5c1.1,0,2.1-0.1,2.9-0.4c0.8-0.3,1.4-0.7,1.8-1.2c0.4-0.5,0.6-1.1,0.6-1.9
|
|
||||||
c0-0.7-0.2-1.3-0.7-1.8c-0.5-0.5-1.3-0.9-2.6-1.2l-5.2-1.2c-1.4-0.3-2.6-0.8-3.6-1.3c-0.9-0.6-1.6-1.3-2.1-2.1s-0.7-1.8-0.7-2.8
|
|
||||||
c0-1.3,0.4-2.6,1.1-3.7c0.7-1.1,1.8-2,3.2-2.6C147.3,14.1,148.9,13.8,150.9,13.8z"/>
|
|
||||||
</g>
|
|
||||||
</g>
|
|
||||||
</g>
|
|
||||||
</svg>
|
|
Before Width: | Height: | Size: 5.5 KiB |
|
@ -1,27 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
builds:
|
|
||||||
name: Builds
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
go_versions: [ '1.22', '1.23' ]
|
|
||||||
fail-fast: false
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '${{ matrix.go_versions }}'
|
|
||||||
|
|
||||||
- name: Build binary
|
|
||||||
run: make
|
|
||||||
|
|
||||||
- name: Check dirty suffix
|
|
||||||
run: if [[ $(make version) == *"dirty"* ]]; then echo "Version has dirty suffix" && exit 1; fi
|
|
|
@ -1,20 +0,0 @@
|
||||||
on: [pull_request]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
dco:
|
|
||||||
name: DCO
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.23'
|
|
||||||
|
|
||||||
- name: Run commit format checker
|
|
||||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
|
||||||
with:
|
|
||||||
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
|
|
@ -1,27 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
image:
|
|
||||||
name: OCI image
|
|
||||||
runs-on: docker
|
|
||||||
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
|
|
||||||
steps:
|
|
||||||
- name: Clone git repo
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Build OCI image
|
|
||||||
run: make image
|
|
||||||
|
|
||||||
- name: Push image to OCI registry
|
|
||||||
run: |
|
|
||||||
echo "$REGISTRY_PASSWORD" \
|
|
||||||
| docker login --username truecloudlab --password-stdin git.frostfs.info
|
|
||||||
make image-push
|
|
||||||
if: >-
|
|
||||||
startsWith(github.ref, 'refs/tags/v') &&
|
|
||||||
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
|
|
||||||
env:
|
|
||||||
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}
|
|
|
@ -1,45 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
lint:
|
|
||||||
name: Lint
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.23'
|
|
||||||
cache: true
|
|
||||||
|
|
||||||
- name: Install linters
|
|
||||||
run: make lint-install
|
|
||||||
|
|
||||||
- name: Run linters
|
|
||||||
run: make lint
|
|
||||||
|
|
||||||
tests:
|
|
||||||
name: Tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
go_versions: [ '1.22', '1.23' ]
|
|
||||||
fail-fast: false
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '${{ matrix.go_versions }}'
|
|
||||||
|
|
||||||
- name: Update Go modules
|
|
||||||
run: make dep
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: make test
|
|
|
@ -1,25 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
vulncheck:
|
|
||||||
name: Vulncheck
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.23.6'
|
|
||||||
|
|
||||||
- name: Install govulncheck
|
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
|
||||||
|
|
||||||
- name: Run govulncheck
|
|
||||||
run: govulncheck ./...
|
|
27
.gitignore
vendored
27
.gitignore
vendored
|
@ -1,27 +0,0 @@
|
||||||
# IDE
|
|
||||||
.idea
|
|
||||||
.vscode
|
|
||||||
|
|
||||||
# Vendoring
|
|
||||||
vendor
|
|
||||||
|
|
||||||
# tempfiles
|
|
||||||
.DS_Store
|
|
||||||
*~
|
|
||||||
.cache
|
|
||||||
|
|
||||||
# binary
|
|
||||||
bin/
|
|
||||||
|
|
||||||
# coverage
|
|
||||||
coverage.txt
|
|
||||||
coverage.html
|
|
||||||
|
|
||||||
# debhelpers
|
|
||||||
**/*debhelper*
|
|
||||||
|
|
||||||
# debian package build files
|
|
||||||
debian/files
|
|
||||||
debian/*.log
|
|
||||||
debian/*.substvars
|
|
||||||
debian/frostfs-s3-gw/
|
|
11
.gitlint
11
.gitlint
|
@ -1,11 +0,0 @@
|
||||||
[general]
|
|
||||||
fail-without-commits=True
|
|
||||||
regex-style-search=True
|
|
||||||
contrib=CC1
|
|
||||||
|
|
||||||
[title-match-regex]
|
|
||||||
regex=^\[\#[0-9Xx]+\]\s
|
|
||||||
|
|
||||||
[ignore-by-title]
|
|
||||||
regex=^Release(.*)
|
|
||||||
ignore=title-match-regex
|
|
|
@ -1,71 +0,0 @@
|
||||||
# This file contains all available configuration options
|
|
||||||
# with their default values.
|
|
||||||
|
|
||||||
# options for analysis running
|
|
||||||
run:
|
|
||||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
|
||||||
timeout: 15m
|
|
||||||
|
|
||||||
# include test files or not, default is true
|
|
||||||
tests: true
|
|
||||||
|
|
||||||
# output configuration options
|
|
||||||
output:
|
|
||||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
|
||||||
formats:
|
|
||||||
- format: tab
|
|
||||||
|
|
||||||
# all available settings of specific linters
|
|
||||||
linters-settings:
|
|
||||||
exhaustive:
|
|
||||||
# indicates that switch statements are to be considered exhaustive if a
|
|
||||||
# 'default' case is present, even if all enum members aren't listed in the
|
|
||||||
# switch
|
|
||||||
default-signifies-exhaustive: true
|
|
||||||
govet:
|
|
||||||
# report about shadowed variables
|
|
||||||
check-shadowing: false
|
|
||||||
custom:
|
|
||||||
truecloudlab-linters:
|
|
||||||
path: bin/external_linters.so
|
|
||||||
original-url: git.frostfs.info/TrueCloudLab/linters.git
|
|
||||||
settings:
|
|
||||||
noliteral:
|
|
||||||
enable: true
|
|
||||||
target-methods: ["Fatal"]
|
|
||||||
disable-packages: ["codes", "tc"]
|
|
||||||
constants-package: "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
|
|
||||||
linters:
|
|
||||||
enable:
|
|
||||||
# mandatory linters
|
|
||||||
- govet
|
|
||||||
- revive
|
|
||||||
|
|
||||||
# some default golangci-lint linters
|
|
||||||
- errcheck
|
|
||||||
- gosimple
|
|
||||||
- ineffassign
|
|
||||||
- staticcheck
|
|
||||||
- typecheck
|
|
||||||
- unused
|
|
||||||
|
|
||||||
# extra linters
|
|
||||||
- exhaustive
|
|
||||||
- godot
|
|
||||||
- gofmt
|
|
||||||
- whitespace
|
|
||||||
- goimports
|
|
||||||
- truecloudlab-linters
|
|
||||||
disable-all: true
|
|
||||||
fast: false
|
|
||||||
|
|
||||||
issues:
|
|
||||||
include:
|
|
||||||
- EXC0002 # should have a comment
|
|
||||||
- EXC0003 # test/Test ... consider calling this
|
|
||||||
- EXC0004 # govet
|
|
||||||
- EXC0005 # C-style breaks
|
|
||||||
exclude-dirs:
|
|
||||||
- api/auth/signer/v4asdk2
|
|
||||||
- api/auth/signer/v4sdk2
|
|
|
@ -1,52 +0,0 @@
|
||||||
ci:
|
|
||||||
autofix_prs: false
|
|
||||||
|
|
||||||
repos:
|
|
||||||
- repo: https://github.com/jorisroovers/gitlint
|
|
||||||
rev: v0.19.1
|
|
||||||
hooks:
|
|
||||||
- id: gitlint
|
|
||||||
stages: [commit-msg]
|
|
||||||
- id: gitlint-ci
|
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
||||||
rev: v4.4.0
|
|
||||||
hooks:
|
|
||||||
- id: check-added-large-files
|
|
||||||
- id: check-case-conflict
|
|
||||||
- id: check-executables-have-shebangs
|
|
||||||
- id: check-shebang-scripts-are-executable
|
|
||||||
- id: check-merge-conflict
|
|
||||||
- id: check-json
|
|
||||||
- id: check-xml
|
|
||||||
- id: check-yaml
|
|
||||||
- id: trailing-whitespace
|
|
||||||
args: [--markdown-linebreak-ext=md]
|
|
||||||
- id: end-of-file-fixer
|
|
||||||
exclude: ".key$"
|
|
||||||
|
|
||||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
|
||||||
rev: v0.9.0.2
|
|
||||||
hooks:
|
|
||||||
- id: shellcheck
|
|
||||||
|
|
||||||
- repo: local
|
|
||||||
hooks:
|
|
||||||
- id: make-lint-install
|
|
||||||
name: install linters
|
|
||||||
entry: make lint-install
|
|
||||||
language: system
|
|
||||||
pass_filenames: false
|
|
||||||
|
|
||||||
- id: make-lint
|
|
||||||
name: run linters
|
|
||||||
entry: make lint
|
|
||||||
language: system
|
|
||||||
pass_filenames: false
|
|
||||||
|
|
||||||
- id: go-unit-tests
|
|
||||||
name: go unit tests
|
|
||||||
entry: make test
|
|
||||||
pass_filenames: false
|
|
||||||
types: [go]
|
|
||||||
language: system
|
|
467
CHANGELOG.md
467
CHANGELOG.md
|
@ -1,467 +0,0 @@
|
||||||
# Changelog
|
|
||||||
|
|
||||||
This document outlines major changes between releases.
|
|
||||||
|
|
||||||
## [Unreleased]
|
|
||||||
|
|
||||||
## [0.32.11] - 2025-02-28
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- ListObjects could return empty result from priority storage node with failed shard (#651)
|
|
||||||
|
|
||||||
## [0.32.10] - 2025-02-14
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Chunk streaming empty body (#642)
|
|
||||||
|
|
||||||
## [0.32.9] - 2025-02-12
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Make `Content-Md5` header check optional (#612)
|
|
||||||
|
|
||||||
## [0.32.8] - 2025-02-11
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Return 404 instead of 500 when object is missing in object storage and available in the tree (#626)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- `tree_stream_timeout` configuration parameter (#627)
|
|
||||||
|
|
||||||
## [0.32.7] - 2025-02-06
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Correct passing copies number during multipart upload (#623)
|
|
||||||
|
|
||||||
## [0.32.6] - 2025-02-05
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Connection leak when `feature.tree_pool_netmap_support` is enabled (#622)
|
|
||||||
|
|
||||||
## [0.32.5] - 2025-02-04
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Support trailing headers signature during aws-chunk upload (#607)
|
|
||||||
|
|
||||||
## [0.32.4] - 2025-02-03
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Possible deadlock in tree pool component (#617)
|
|
||||||
- Possible memory leak in gRPC client (#617)
|
|
||||||
|
|
||||||
## [0.32.3] - 2025-01-29
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Use `UNSIGNED_PAYLOAD` as content hash to check signature if `x-amz-content-sha256` isn't signed header (#616)
|
|
||||||
|
|
||||||
## [0.32.2] - 2025-01-27
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Fix panic when payload discard (#605)
|
|
||||||
|
|
||||||
## [0.32.1] - 2025-01-17
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Response codes when checking ACL format (#531)
|
|
||||||
- CORS unmarshal without xmlns (#594)
|
|
||||||
- Response code for invalid Content-Md5 header (#598)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Derive encryption keys for accessbox with salt (#529)
|
|
||||||
- Debug log when bucket settings not found (#595)
|
|
||||||
- Context cancellation during tree node streaming (#569)
|
|
||||||
- Add LimitExceeded error (#589)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Docker image repository (#590, #587)
|
|
||||||
|
|
||||||
## [0.32.0] - Khumbu - 2024-12-20
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Metric of dropped logs by log sampler (#502)
|
|
||||||
- SigV4A signature algorithm (#339)
|
|
||||||
- TLS Termination header for SSE-C (#562)
|
|
||||||
- Kludge profile support (#147)
|
|
||||||
- Netmap support in tree pool (#577)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Improved multipart removal speed (#559)
|
|
||||||
- Updated tree service pool without api-go dependency (#570)
|
|
||||||
|
|
||||||
## [0.31.3] - 2024-12-17
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Return BucketAlreadyExists when global domain taken (#584)
|
|
||||||
- Fix list-buckets vhs routing (#583)
|
|
||||||
- Skip port when matching listen domains (#586)
|
|
||||||
|
|
||||||
## [0.31.2] - 2024-12-13
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Unable to remove EC object (#576)
|
|
||||||
|
|
||||||
## [0.31.1] - 2024-11-28
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Ignore precondition headers with invalid date format (#563)
|
|
||||||
- MD5 calculation of object-part with SSE-C (#543)
|
|
||||||
|
|
||||||
## [0.31.0] - Rongbuk - 2024-11-20
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Docker warnings during image build (#421)
|
|
||||||
- `PartNumberMarker` in ListMultipart response (#451)
|
|
||||||
- PostObject handling (#456)
|
|
||||||
- Tag logging errors (#452)
|
|
||||||
- Removing of duplicated parts in tree service during split brain (#448)
|
|
||||||
- Container resolving (#482)
|
|
||||||
- FrostFS to S3 error transformation (#488)
|
|
||||||
- Default bucket routing (#507)
|
|
||||||
- encoding-type in ListBucketObjectVersions (#404)
|
|
||||||
- SIGHUP support for `tracing.enabled` config parameter (#520)
|
|
||||||
- `trace_id` parameter in logs (#501)
|
|
||||||
- Listing marker processing (#539)
|
|
||||||
- Content-MD5 header check (#540)
|
|
||||||
- Precondition check (#538)
|
|
||||||
- Bucket name check during all S3 operations (#556)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Support for separate container for all CORS settings (#422)
|
|
||||||
- `X-Amz-Force-Delete-Bucket` header for forced bucket removal (#31)
|
|
||||||
- `Location` support in CompleteMultipart response (#451)
|
|
||||||
- Tree pool request duration metric (#447)
|
|
||||||
- Expiration lifecycle configuration support (#42, #412, #459, #460, #516, #536)
|
|
||||||
- Add support for virtual hosted style addressing (#446, #449, #493)
|
|
||||||
- Support `frostfs.graceful_close_on_switch_timeout` (#475)
|
|
||||||
- Vulnerability report document (#413)
|
|
||||||
- Support patch object method (#462, #473, #466, #479)
|
|
||||||
- Enhanced logging and request reproducer (#369)
|
|
||||||
- Root CA configuration for tracing (#484)
|
|
||||||
- Log sampling policy configuration (#461)
|
|
||||||
- `sign` command to `frostfs-s3-authmate` (#467)
|
|
||||||
- Support custom aws credentials (#509)
|
|
||||||
- Source IP binding configuration for FrostFS requests (#521)
|
|
||||||
- Tracing attributes (#549)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Split `FrostFS` interface into separate read methods (#427)
|
|
||||||
- golangci-lint v1.60 support (#474)
|
|
||||||
- Updated Go version to 1.22 (#470)
|
|
||||||
- Container removal after failed bucket creation (#434)
|
|
||||||
- Explicit check for `.` symbol in bucket name (#506)
|
|
||||||
- Transaction waiter in contract clients (#522)
|
|
||||||
- Avoid maintenance mode storage node during object operations (#524)
|
|
||||||
- Content-Type does not include in Presigned URL of s3-authmate (#505)
|
|
||||||
- Check owner ID before deleting bucket (#528)
|
|
||||||
- S3-Authmate now uses APE instead basic-ACL (#553)
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
- Reduce using mutex when update app settings (#329)
|
|
||||||
|
|
||||||
## [0.30.9] - 2024-12-13
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Unable to remove EC object (#576)
|
|
||||||
|
|
||||||
## [0.30.8] - 2024-10-18
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Error handling for correct connection switch in SDK Pool (#517)
|
|
||||||
|
|
||||||
## [0.30.7] - 2024-10-03
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Correct aws-chunk encoding size handling (#511)
|
|
||||||
|
|
||||||
|
|
||||||
## [0.30.6] - 2024-09-17
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Object size of objects upload with aws-chunked encoding (#450)
|
|
||||||
- Object size of objects upload with negative Content-Length (#486)
|
|
||||||
|
|
||||||
## [0.30.5] - 2024-09-16
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Panic catchers for fuzzing tests (#492)
|
|
||||||
|
|
||||||
## [0.30.4] - 2024-09-03
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Fuzzing tests (#480)
|
|
||||||
|
|
||||||
## [0.30.3] - 2024-08-27
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Empty listing when multipart upload contains more than 1000 parts (#471)
|
|
||||||
|
|
||||||
## [0.30.2] - 2024-08-20
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Error counting in pool component before connection switch (#468)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Log of endpoint address during tree pool errors (#468)
|
|
||||||
|
|
||||||
## [0.30.1] - 2024-07-25
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Redundant system node removal in tree service (#437)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Log details on SDK Pool health status change (#439)
|
|
||||||
|
|
||||||
## [0.30.0] - Kangshung -2024-07-19
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Fix HTTP/2 requests (#341)
|
|
||||||
- Fix Decoder.CharsetReader is nil (#379)
|
|
||||||
- Fix flaky ACL encode test (#340)
|
|
||||||
- Docs grammar (#432)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Add new `reconnect_interval` config param for server rebinding (#291)
|
|
||||||
- Support `GetBucketPolicyStatus` (#301)
|
|
||||||
- Support request IP filter with policy (#371, #377)
|
|
||||||
- Support tag checks in policies (#357, #365, #392, #403, #411)
|
|
||||||
- Support IAM-MFA checks (#367)
|
|
||||||
- More docs (#334, #353)
|
|
||||||
- Add `register-user` command to `authmate` (#414)
|
|
||||||
- `User` field in request log (#396)
|
|
||||||
- Erasure coding support in placement policy (#400)
|
|
||||||
- Improved test coverage (#402)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Update dependencies noted by govulncheck (#368)
|
|
||||||
- Improve test coverage (#380, #387)
|
|
||||||
- Support updated naming in native policy JSON (#385)
|
|
||||||
- Improve determining AccessBox latest version (#335)
|
|
||||||
- Don't set full_control policy for bucket owner (#407)
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
- Remove control api (#406)
|
|
||||||
- Remove notifications (#401)
|
|
||||||
- Remove `layer.Client` interface (#410)
|
|
||||||
- Remove extended ACL related code (#372)
|
|
||||||
|
|
||||||
## [0.29.3] - 2024-07-19
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Support tree split environment when multiple nodes
|
|
||||||
may be part of the same sub path (#430)
|
|
||||||
- Collision of multipart name and system data in the tree (#430)
|
|
||||||
- Workaround for removal of multiple null versions in unversioned bucket (#430)
|
|
||||||
|
|
||||||
## [0.29.2] - 2024-07-03
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Parsing of put-bucket-setting retry configuration (#398)
|
|
||||||
|
|
||||||
## [0.29.1] - 2024-06-20
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- OPTIONS request processing for object operations (#399)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Retries of put-bucket-setting operation during container creation (#398)
|
|
||||||
|
|
||||||
## [0.29.0] - Zemu - 2024-05-27
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Fix marshaling errors in `DeleteObjects` method (#222)
|
|
||||||
- Fix status code in GET/HEAD delete marker (#226)
|
|
||||||
- Fix `NextVersionIDMarker` in `list-object-versions` (#248)
|
|
||||||
- Fix possibility of panic during SIGHUP (#288)
|
|
||||||
- Fix flaky `TestErrorTimeoutChecking` (`make test` sometimes failed) (#290)
|
|
||||||
- Fix log-level change on SIGHUP (#313)
|
|
||||||
- Fix anonymous put request (#311)
|
|
||||||
- Fix routine leak from nns resolver (#324)
|
|
||||||
- Fix svace errors (#325, #328)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Add new `frostfs.buffer_max_size_for_put` config param and sync TZ hash for PUT operations (#197)
|
|
||||||
- Add `X-Amz-Version-Id` header after complete multipart upload (#227)
|
|
||||||
- Add handling of `X-Amz-Copy-Source-Server-Side-Encryption-Customer-*` headers during copy (#217)
|
|
||||||
- Add new `logger.destination` config param (#236)
|
|
||||||
- Add `X-Amz-Content-Sha256` header validation (#218)
|
|
||||||
- Support frostfsid contract. See `frostfsid` config section (#260)
|
|
||||||
- Support per namespace placement policies configuration (see `namespaces.config` config param) (#266)
|
|
||||||
- Support control api to manage policies. See `control` config section (#258)
|
|
||||||
- Add `namespace` label to billing metrics (#271)
|
|
||||||
- Support policy-engine (#257, #259, #282, #283, #302, #307, #345, #351, #358, #360, #362, #383, #354)
|
|
||||||
- Support `proxy` contract (#287)
|
|
||||||
- Authmate: support custom attributes (#292)
|
|
||||||
- Add FrostfsID cache (#269)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Generalise config param `use_default_xmlns_for_complete_multipart` to `use_default_xmlns` so that use default xmlns for all requests (#221)
|
|
||||||
- Set server IdleTimeout and ReadHeaderTimeout to `30s` and allow to configure them (#220)
|
|
||||||
- Return `ETag` value in quotes (#219)
|
|
||||||
- Use tombstone when delete multipart upload (#275)
|
|
||||||
- Support new parameter `cache.accessbox.removing_check_interval` (#305)
|
|
||||||
- Use APE rules instead of eACL in container creation (#306)
|
|
||||||
- Rework bucket policy with policy-engine (#261)
|
|
||||||
- Improved object listing speed (#165, #347)
|
|
||||||
- Logging improvement (#300, #318)
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
- Drop sending whitespace characters during complete multipart upload and related config param `kludge.complete_multipart_keepalive` (#227)
|
|
||||||
- Unused legacy minio related code (#299)
|
|
||||||
- Redundant output with journald logging (#298)
|
|
||||||
|
|
||||||
## [0.28.2] - 2024-05-27
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- `anon` user in billing metrics (#321)
|
|
||||||
- Parts are not removed when multipart object removed (#370)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Put request in duration metrics (#280)
|
|
||||||
|
|
||||||
## [0.28.1] - 2024-01-24
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- MD5 hash as ETag and response header (#205)
|
|
||||||
- Tree pool traversal limit (#262)
|
|
||||||
|
|
||||||
### Updating from 0.28.0
|
|
||||||
|
|
||||||
See new `features.md5.enabled` and `frostfs.tree_pool_max_attempts` config
|
|
||||||
parameters.
|
|
||||||
|
|
||||||
## [0.28.0] - Academy of Sciences - 2023-12-07
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Handle negative `Content-Length` on put (#125)
|
|
||||||
- Use `DisableURIPathEscaping` to presign urls (#125)
|
|
||||||
- Use specific s3 errors instead of `InternalError` where possible (#143)
|
|
||||||
- `grpc` schemas in tree configuration (#166)
|
|
||||||
- Return appropriate 404 code when object missed in storage but there is in gate cache (#158)
|
|
||||||
- Replace part on re-upload when use multipart upload (#176)
|
|
||||||
- Fix goroutine leak on put object error (#178)
|
|
||||||
- Fix parsing signed headers in presigned urls (#182)
|
|
||||||
- Fix url escaping (#188)
|
|
||||||
- Use correct keys in `list-multipart-uploads` response (#185)
|
|
||||||
- Fix parsing `key-marker` for object list versions (#237)
|
|
||||||
- `GetSubTree` failures (#179)
|
|
||||||
- Unexpected EOF during multipart download (#210)
|
|
||||||
- Produce clean version in debian build (#245)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Add `trace_id` value into log record when tracing is enabled (#142)
|
|
||||||
- Add basic error types and exit codes to `frostfs-s3-authmate` (#152)
|
|
||||||
- Add a metric with addresses of nodes of the same and highest priority that are currently healthy (#186)
|
|
||||||
- Support dump metrics descriptions (#80)
|
|
||||||
- Add `copies_numbers` section to `placement_policy` in config file and support vectors of copies numbers (#70, #101)
|
|
||||||
- Support impersonate bearer token (#81, #105)
|
|
||||||
- Reload default and custom copies numbers on SIGHUP (#104)
|
|
||||||
- Tracing support (#84, #140)
|
|
||||||
- Return bearer token in `s3-authmate obtain-secret` result (#132)
|
|
||||||
- Support multiple version credentials using GSet (#135)
|
|
||||||
- Implement chunk uploading (#106)
|
|
||||||
- Add new `kludge.bypass_content_encoding_check_in_chunks` config param (#146)
|
|
||||||
- Add new `frostfs.client_cut` config param (#192)
|
|
||||||
- Add selection of the node of the latest version of the object (#231)
|
|
||||||
- Soft memory limit with `runtime.soft_memory_limit` (#196)
|
|
||||||
- `server_health` metric for every S3 endpoint status (#199)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Update prometheus to v1.15.0 (#94)
|
|
||||||
- Update go version to go1.19 (#118)
|
|
||||||
- Remove object from tree and reset its cache on object deletion when it is already removed from storage (#78)
|
|
||||||
- Finish rebranding (#2)
|
|
||||||
- Timeout errors has code 504 now (#103)
|
|
||||||
- Use request scope logger (#111)
|
|
||||||
- Add `s3-authmate update-secret` command (#131)
|
|
||||||
- Use default registerer for app metrics (#155)
|
|
||||||
- Use chi router instead of archived gorlilla/mux (#149, #174, #188)
|
|
||||||
- Complete multipart upload doesn't unnecessary copy now. Thus, the total time of multipart upload was reduced by 2 times (#63)
|
|
||||||
- Use gate key to form object owner (#175)
|
|
||||||
- Apply placement policies and copies if there is at least one valid value (#168)
|
|
||||||
- `statistic_tx_bytes_total` and `statistic_rx_bytes_total` metric to `statistic_bytes_total` metric with `direction` label (#153)
|
|
||||||
- Refactor of context-stored data receivers (#137)
|
|
||||||
- Refactor fetch/parse config parameters functions (#117)
|
|
||||||
- Move all log messages to constants (#96)
|
|
||||||
- Allow zero value of `part-number-marker` (#207)
|
|
||||||
- Clean tag node in the tree service instead of removal (#233)
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
- Drop `tree.service` param (now endpoints from `peers` section are used) (#133)
|
|
||||||
|
|
||||||
## [0.27.0] - Karpinsky - 2023-07-12
|
|
||||||
|
|
||||||
This is a first FrostFS S3 Gateway release named after
|
|
||||||
[Karpinsky glacier](https://en.wikipedia.org/wiki/Karpinsky_Glacier).
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Using multiple servers require only one healthy (#12)
|
|
||||||
- Renew token before it expires (#20)
|
|
||||||
- Add generated deb builder files to .gitignore, and fix typo (#28)
|
|
||||||
- Get empty bucket CORS from frostfs (#36)
|
|
||||||
- Don't count pool error on client abort (#35)
|
|
||||||
- Handle request cancelling (#69)
|
|
||||||
- Clean up List and Name caches when object is missing in Tree service (#57)
|
|
||||||
- Don't create unnecessary delete-markers (#83)
|
|
||||||
- `Too many pings` error (#145)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Billing metrics (#5, #26, #29)
|
|
||||||
- Return container name in `head-bucket` response (#18)
|
|
||||||
- Multiple configs support (#21)
|
|
||||||
- Bucket name resolving policy (#25)
|
|
||||||
- Support string `Action` and `Resource` fields in `bucketPolicy.Statement` (#32)
|
|
||||||
- Add new `kludge.use_default_xmlns_for_complete_multipart` config param (#40)
|
|
||||||
- Return `X-Owner-Id` in `head-bucket` response (#79)
|
|
||||||
- Support multiple tree service endpoints (#74, #110, #114)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Repository rebranding (#1)
|
|
||||||
- Update neo-go to v0.101.0 (#14)
|
|
||||||
- Update viper to v1.15.0 (#14)
|
|
||||||
- Update go version to go1.18 (#16)
|
|
||||||
- Return error on invalid LocationConstraint (#23)
|
|
||||||
- Limit number of objects to delete at one time (#37)
|
|
||||||
- CompleteMultipartUpload handler now sends whitespace characters to keep alive client's connection (#60)
|
|
||||||
- Support new system attributes (#64)
|
|
||||||
- Abstract network communication in TreeClient (#59, #75)
|
|
||||||
- Changed values for `frostfs_s3_gw_state_health` metric (#91)
|
|
||||||
|
|
||||||
## Older versions
|
|
||||||
|
|
||||||
This project is a fork of [NeoFS S3 Gateway](https://github.com/nspcc-dev/neofs-s3-gw) from version v0.26.0.
|
|
||||||
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-s3-gw/blob/master/CHANGELOG.md.
|
|
||||||
|
|
||||||
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/b2148cc3...v0.27.0
|
|
||||||
[0.28.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.27.0...v0.28.0
|
|
||||||
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.0...v0.28.1
|
|
||||||
[0.28.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.1...v0.28.2
|
|
||||||
[0.29.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.2...v0.29.0
|
|
||||||
[0.29.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.0...v0.29.1
|
|
||||||
[0.29.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.1...v0.29.2
|
|
||||||
[0.29.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.2...v0.29.3
|
|
||||||
[0.30.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.3...v0.30.0
|
|
||||||
[0.30.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.0...v0.30.1
|
|
||||||
[0.30.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.1...v0.30.2
|
|
||||||
[0.30.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.2...v0.30.3
|
|
||||||
[0.30.4]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.3...v0.30.4
|
|
||||||
[0.30.5]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.4...v0.30.5
|
|
||||||
[0.30.6]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.5...v0.30.6
|
|
||||||
[0.30.7]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.6...v0.30.7
|
|
||||||
[0.30.8]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.7...v0.30.8
|
|
||||||
[0.30.9]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.8...v0.30.9
|
|
||||||
[0.31.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.9...v0.31.0
|
|
||||||
[0.31.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.0...v0.31.1
|
|
||||||
[0.31.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.1...v0.31.2
|
|
||||||
[0.31.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.2...v0.31.3
|
|
||||||
[0.32.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.3...v0.32.0
|
|
||||||
[0.32.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.0...v0.32.1
|
|
||||||
[0.32.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.1...v0.32.2
|
|
||||||
[0.32.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.2...v0.32.3
|
|
||||||
[0.32.4]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.3...v0.32.4
|
|
||||||
[0.32.5]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.4...v0.32.5
|
|
||||||
[0.32.6]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.5...v0.32.6
|
|
||||||
[0.32.7]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.6...v0.32.7
|
|
||||||
[0.32.8]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.7...v0.32.8
|
|
||||||
[0.32.9]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.8...v0.32.9
|
|
||||||
[0.32.10]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.9...v0.32.10
|
|
||||||
[0.32.11]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.10...v0.32.11
|
|
||||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.11...master
|
|
|
@ -1,3 +0,0 @@
|
||||||
.* @TrueCloudLab/storage-services-developers @TrueCloudLab/storage-services-committers
|
|
||||||
.forgejo/.* @potyarkin
|
|
||||||
Makefile @potyarkin
|
|
156
CONTRIBUTING.md
156
CONTRIBUTING.md
|
@ -1,156 +0,0 @@
|
||||||
# Contribution guide
|
|
||||||
|
|
||||||
First, thank you for contributing! We love and encourage pull requests from
|
|
||||||
everyone. Please follow the guidelines:
|
|
||||||
|
|
||||||
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/issues) and
|
|
||||||
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pulls) for existing
|
|
||||||
discussions.
|
|
||||||
|
|
||||||
- Open an issue first, to discuss a new feature or enhancement.
|
|
||||||
|
|
||||||
- Write tests and make sure the test suite passes locally and on CI.
|
|
||||||
|
|
||||||
- Open a pull request and reference the relevant issue(s).
|
|
||||||
|
|
||||||
- Make sure your commits are logically separated and have good comments
|
|
||||||
explaining the details of your change.
|
|
||||||
|
|
||||||
- After receiving a feedback, amend your commits or add new ones as
|
|
||||||
appropriate.
|
|
||||||
|
|
||||||
- **Have fun!**
|
|
||||||
|
|
||||||
## Development Workflow
|
|
||||||
|
|
||||||
Start by forking the `frostfs-s3-gw` repository, make changes in a branch and then
|
|
||||||
send a pull request. We encourage pull requests to discuss code changes. Here
|
|
||||||
are the steps in details:
|
|
||||||
|
|
||||||
### Set up your git repository
|
|
||||||
Fork [FrostFS S3 Gateway
|
|
||||||
upstream](https://git.frostfs.info/repo/fork/15) source repository
|
|
||||||
to your own personal repository. Copy the URL of your fork (you will need it for
|
|
||||||
the `git clone` command below).
|
|
||||||
|
|
||||||
```sh
|
|
||||||
$ git clone https://git.frostfs.info/<username>/frostfs-s3-gw.git
|
|
||||||
```
|
|
||||||
|
|
||||||
### Set up git remote as ``upstream``
|
|
||||||
```sh
|
|
||||||
$ cd frostfs-s3-gw
|
|
||||||
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw.git
|
|
||||||
$ git fetch upstream
|
|
||||||
$ git merge upstream/master
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
### Create your feature branch
|
|
||||||
Before making code changes, make sure you create a separate branch for these
|
|
||||||
changes. Maybe you will find it convenient to name a branch in
|
|
||||||
`<type>/<Issue>-<changes_topic>` format.
|
|
||||||
|
|
||||||
```
|
|
||||||
$ git checkout -b feature/123-something_awesome
|
|
||||||
```
|
|
||||||
|
|
||||||
### Test your changes
|
|
||||||
After your code changes, make sure
|
|
||||||
|
|
||||||
- To add test cases for the new code.
|
|
||||||
- To run `make lint`
|
|
||||||
- To squash your commits into a single commit or a series of logically separated
|
|
||||||
commits with `git rebase -i`. It's okay to force update your pull request.
|
|
||||||
- To run `make test` and `make all` successfully.
|
|
||||||
|
|
||||||
### Commit changes
|
|
||||||
After verification, commit your changes. There is a [great
|
|
||||||
post](https://chris.beams.io/posts/git-commit/) on how to write useful commit
|
|
||||||
messages. Try following this template:
|
|
||||||
|
|
||||||
```
|
|
||||||
[#Issue] <component> Summary
|
|
||||||
|
|
||||||
Description
|
|
||||||
|
|
||||||
<Macros>
|
|
||||||
|
|
||||||
<Sign-Off>
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
$ git commit -ams '[#123] Add some feature'
|
|
||||||
```
|
|
||||||
|
|
||||||
### Push to the branch
|
|
||||||
Push your locally committed changes to the remote origin (your fork)
|
|
||||||
```
|
|
||||||
$ git push origin feature/123-something_awesome
|
|
||||||
```
|
|
||||||
|
|
||||||
### Create a Pull Request
|
|
||||||
Pull requests can be created via Forgejo. Refer to [this
|
|
||||||
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
|
|
||||||
detailed steps on how to create a pull request. After a Pull Request gets peer
|
|
||||||
reviewed and approved, it will be merged.
|
|
||||||
|
|
||||||
## DCO Sign off
|
|
||||||
|
|
||||||
All authors to the project retain copyright to their work. However, to ensure
|
|
||||||
that they are only submitting work that they have rights to, we require
|
|
||||||
everyone to acknowledge this by signing their work.
|
|
||||||
|
|
||||||
Any copyright notices in this repository should specify the authors as "the
|
|
||||||
contributors".
|
|
||||||
|
|
||||||
To sign your work, just add a line like this at the end of your commit message:
|
|
||||||
|
|
||||||
```
|
|
||||||
Signed-off-by: Samii Sakisaka <samii@frostfs.info>
|
|
||||||
```
|
|
||||||
|
|
||||||
This can be easily done with the `--signoff` option to `git commit`.
|
|
||||||
|
|
||||||
By doing this you state that you can certify the following (from [The Developer
|
|
||||||
Certificate of Origin](https://developercertificate.org/)):
|
|
||||||
|
|
||||||
```
|
|
||||||
Developer Certificate of Origin
|
|
||||||
Version 1.1
|
|
||||||
|
|
||||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
|
||||||
1 Letterman Drive
|
|
||||||
Suite D4700
|
|
||||||
San Francisco, CA, 94129
|
|
||||||
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies of this
|
|
||||||
license document, but changing it is not allowed.
|
|
||||||
|
|
||||||
|
|
||||||
Developer's Certificate of Origin 1.1
|
|
||||||
|
|
||||||
By making a contribution to this project, I certify that:
|
|
||||||
|
|
||||||
(a) The contribution was created in whole or in part by me and I
|
|
||||||
have the right to submit it under the open source license
|
|
||||||
indicated in the file; or
|
|
||||||
|
|
||||||
(b) The contribution is based upon previous work that, to the best
|
|
||||||
of my knowledge, is covered under an appropriate open source
|
|
||||||
license and I have the right under that license to submit that
|
|
||||||
work with modifications, whether created in whole or in part
|
|
||||||
by me, under the same open source license (unless I am
|
|
||||||
permitted to submit under a different license), as indicated
|
|
||||||
in the file; or
|
|
||||||
|
|
||||||
(c) The contribution was provided directly to me by some other
|
|
||||||
person who certified (a), (b) or (c) and I have not modified
|
|
||||||
it.
|
|
||||||
|
|
||||||
(d) I understand and agree that this project and the contribution
|
|
||||||
are public and that a record of the contribution (including all
|
|
||||||
personal information I submit with it, including my sign-off) is
|
|
||||||
maintained indefinitely and may be redistributed consistent with
|
|
||||||
this project or the open source license(s) involved.
|
|
||||||
```
|
|
19
CREDITS.md
19
CREDITS.md
|
@ -1,19 +0,0 @@
|
||||||
# Credits
|
|
||||||
|
|
||||||
In alphabetical order:
|
|
||||||
|
|
||||||
- Alexey Vanin
|
|
||||||
- Angira Kekteeva
|
|
||||||
- Denis Kirillov
|
|
||||||
- Evgeniy Kulikov
|
|
||||||
- Pavel Korotkov
|
|
||||||
- Roman Khimov
|
|
||||||
|
|
||||||
# Contributors
|
|
||||||
|
|
||||||
In chronological order:
|
|
||||||
|
|
||||||
- Elizaveta Chichindaeva
|
|
||||||
- Stanislav Bogatyrev
|
|
||||||
- Anastasia Prasolova
|
|
||||||
- Leonard Liubich
|
|
661
LICENSE
661
LICENSE
|
@ -1,661 +0,0 @@
|
||||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
|
||||||
Version 3, 19 November 2007
|
|
||||||
|
|
||||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
|
||||||
of this license document, but changing it is not allowed.
|
|
||||||
|
|
||||||
Preamble
|
|
||||||
|
|
||||||
The GNU Affero General Public License is a free, copyleft license for
|
|
||||||
software and other kinds of works, specifically designed to ensure
|
|
||||||
cooperation with the community in the case of network server software.
|
|
||||||
|
|
||||||
The licenses for most software and other practical works are designed
|
|
||||||
to take away your freedom to share and change the works. By contrast,
|
|
||||||
our General Public Licenses are intended to guarantee your freedom to
|
|
||||||
share and change all versions of a program--to make sure it remains free
|
|
||||||
software for all its users.
|
|
||||||
|
|
||||||
When we speak of free software, we are referring to freedom, not
|
|
||||||
price. Our General Public Licenses are designed to make sure that you
|
|
||||||
have the freedom to distribute copies of free software (and charge for
|
|
||||||
them if you wish), that you receive source code or can get it if you
|
|
||||||
want it, that you can change the software or use pieces of it in new
|
|
||||||
free programs, and that you know you can do these things.
|
|
||||||
|
|
||||||
Developers that use our General Public Licenses protect your rights
|
|
||||||
with two steps: (1) assert copyright on the software, and (2) offer
|
|
||||||
you this License which gives you legal permission to copy, distribute
|
|
||||||
and/or modify the software.
|
|
||||||
|
|
||||||
A secondary benefit of defending all users' freedom is that
|
|
||||||
improvements made in alternate versions of the program, if they
|
|
||||||
receive widespread use, become available for other developers to
|
|
||||||
incorporate. Many developers of free software are heartened and
|
|
||||||
encouraged by the resulting cooperation. However, in the case of
|
|
||||||
software used on network servers, this result may fail to come about.
|
|
||||||
The GNU General Public License permits making a modified version and
|
|
||||||
letting the public access it on a server without ever releasing its
|
|
||||||
source code to the public.
|
|
||||||
|
|
||||||
The GNU Affero General Public License is designed specifically to
|
|
||||||
ensure that, in such cases, the modified source code becomes available
|
|
||||||
to the community. It requires the operator of a network server to
|
|
||||||
provide the source code of the modified version running there to the
|
|
||||||
users of that server. Therefore, public use of a modified version, on
|
|
||||||
a publicly accessible server, gives the public access to the source
|
|
||||||
code of the modified version.
|
|
||||||
|
|
||||||
An older license, called the Affero General Public License and
|
|
||||||
published by Affero, was designed to accomplish similar goals. This is
|
|
||||||
a different license, not a version of the Affero GPL, but Affero has
|
|
||||||
released a new version of the Affero GPL which permits relicensing under
|
|
||||||
this license.
|
|
||||||
|
|
||||||
The precise terms and conditions for copying, distribution and
|
|
||||||
modification follow.
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
0. Definitions.
|
|
||||||
|
|
||||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
|
||||||
|
|
||||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
|
||||||
works, such as semiconductor masks.
|
|
||||||
|
|
||||||
"The Program" refers to any copyrightable work licensed under this
|
|
||||||
License. Each licensee is addressed as "you". "Licensees" and
|
|
||||||
"recipients" may be individuals or organizations.
|
|
||||||
|
|
||||||
To "modify" a work means to copy from or adapt all or part of the work
|
|
||||||
in a fashion requiring copyright permission, other than the making of an
|
|
||||||
exact copy. The resulting work is called a "modified version" of the
|
|
||||||
earlier work or a work "based on" the earlier work.
|
|
||||||
|
|
||||||
A "covered work" means either the unmodified Program or a work based
|
|
||||||
on the Program.
|
|
||||||
|
|
||||||
To "propagate" a work means to do anything with it that, without
|
|
||||||
permission, would make you directly or secondarily liable for
|
|
||||||
infringement under applicable copyright law, except executing it on a
|
|
||||||
computer or modifying a private copy. Propagation includes copying,
|
|
||||||
distribution (with or without modification), making available to the
|
|
||||||
public, and in some countries other activities as well.
|
|
||||||
|
|
||||||
To "convey" a work means any kind of propagation that enables other
|
|
||||||
parties to make or receive copies. Mere interaction with a user through
|
|
||||||
a computer network, with no transfer of a copy, is not conveying.
|
|
||||||
|
|
||||||
An interactive user interface displays "Appropriate Legal Notices"
|
|
||||||
to the extent that it includes a convenient and prominently visible
|
|
||||||
feature that (1) displays an appropriate copyright notice, and (2)
|
|
||||||
tells the user that there is no warranty for the work (except to the
|
|
||||||
extent that warranties are provided), that licensees may convey the
|
|
||||||
work under this License, and how to view a copy of this License. If
|
|
||||||
the interface presents a list of user commands or options, such as a
|
|
||||||
menu, a prominent item in the list meets this criterion.
|
|
||||||
|
|
||||||
1. Source Code.
|
|
||||||
|
|
||||||
The "source code" for a work means the preferred form of the work
|
|
||||||
for making modifications to it. "Object code" means any non-source
|
|
||||||
form of a work.
|
|
||||||
|
|
||||||
A "Standard Interface" means an interface that either is an official
|
|
||||||
standard defined by a recognized standards body, or, in the case of
|
|
||||||
interfaces specified for a particular programming language, one that
|
|
||||||
is widely used among developers working in that language.
|
|
||||||
|
|
||||||
The "System Libraries" of an executable work include anything, other
|
|
||||||
than the work as a whole, that (a) is included in the normal form of
|
|
||||||
packaging a Major Component, but which is not part of that Major
|
|
||||||
Component, and (b) serves only to enable use of the work with that
|
|
||||||
Major Component, or to implement a Standard Interface for which an
|
|
||||||
implementation is available to the public in source code form. A
|
|
||||||
"Major Component", in this context, means a major essential component
|
|
||||||
(kernel, window system, and so on) of the specific operating system
|
|
||||||
(if any) on which the executable work runs, or a compiler used to
|
|
||||||
produce the work, or an object code interpreter used to run it.
|
|
||||||
|
|
||||||
The "Corresponding Source" for a work in object code form means all
|
|
||||||
the source code needed to generate, install, and (for an executable
|
|
||||||
work) run the object code and to modify the work, including scripts to
|
|
||||||
control those activities. However, it does not include the work's
|
|
||||||
System Libraries, or general-purpose tools or generally available free
|
|
||||||
programs which are used unmodified in performing those activities but
|
|
||||||
which are not part of the work. For example, Corresponding Source
|
|
||||||
includes interface definition files associated with source files for
|
|
||||||
the work, and the source code for shared libraries and dynamically
|
|
||||||
linked subprograms that the work is specifically designed to require,
|
|
||||||
such as by intimate data communication or control flow between those
|
|
||||||
subprograms and other parts of the work.
|
|
||||||
|
|
||||||
The Corresponding Source need not include anything that users
|
|
||||||
can regenerate automatically from other parts of the Corresponding
|
|
||||||
Source.
|
|
||||||
|
|
||||||
The Corresponding Source for a work in source code form is that
|
|
||||||
same work.
|
|
||||||
|
|
||||||
2. Basic Permissions.
|
|
||||||
|
|
||||||
All rights granted under this License are granted for the term of
|
|
||||||
copyright on the Program, and are irrevocable provided the stated
|
|
||||||
conditions are met. This License explicitly affirms your unlimited
|
|
||||||
permission to run the unmodified Program. The output from running a
|
|
||||||
covered work is covered by this License only if the output, given its
|
|
||||||
content, constitutes a covered work. This License acknowledges your
|
|
||||||
rights of fair use or other equivalent, as provided by copyright law.
|
|
||||||
|
|
||||||
You may make, run and propagate covered works that you do not
|
|
||||||
convey, without conditions so long as your license otherwise remains
|
|
||||||
in force. You may convey covered works to others for the sole purpose
|
|
||||||
of having them make modifications exclusively for you, or provide you
|
|
||||||
with facilities for running those works, provided that you comply with
|
|
||||||
the terms of this License in conveying all material for which you do
|
|
||||||
not control copyright. Those thus making or running the covered works
|
|
||||||
for you must do so exclusively on your behalf, under your direction
|
|
||||||
and control, on terms that prohibit them from making any copies of
|
|
||||||
your copyrighted material outside their relationship with you.
|
|
||||||
|
|
||||||
Conveying under any other circumstances is permitted solely under
|
|
||||||
the conditions stated below. Sublicensing is not allowed; section 10
|
|
||||||
makes it unnecessary.
|
|
||||||
|
|
||||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
|
||||||
|
|
||||||
No covered work shall be deemed part of an effective technological
|
|
||||||
measure under any applicable law fulfilling obligations under article
|
|
||||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
|
||||||
similar laws prohibiting or restricting circumvention of such
|
|
||||||
measures.
|
|
||||||
|
|
||||||
When you convey a covered work, you waive any legal power to forbid
|
|
||||||
circumvention of technological measures to the extent such circumvention
|
|
||||||
is effected by exercising rights under this License with respect to
|
|
||||||
the covered work, and you disclaim any intention to limit operation or
|
|
||||||
modification of the work as a means of enforcing, against the work's
|
|
||||||
users, your or third parties' legal rights to forbid circumvention of
|
|
||||||
technological measures.
|
|
||||||
|
|
||||||
4. Conveying Verbatim Copies.
|
|
||||||
|
|
||||||
You may convey verbatim copies of the Program's source code as you
|
|
||||||
receive it, in any medium, provided that you conspicuously and
|
|
||||||
appropriately publish on each copy an appropriate copyright notice;
|
|
||||||
keep intact all notices stating that this License and any
|
|
||||||
non-permissive terms added in accord with section 7 apply to the code;
|
|
||||||
keep intact all notices of the absence of any warranty; and give all
|
|
||||||
recipients a copy of this License along with the Program.
|
|
||||||
|
|
||||||
You may charge any price or no price for each copy that you convey,
|
|
||||||
and you may offer support or warranty protection for a fee.
|
|
||||||
|
|
||||||
5. Conveying Modified Source Versions.
|
|
||||||
|
|
||||||
You may convey a work based on the Program, or the modifications to
|
|
||||||
produce it from the Program, in the form of source code under the
|
|
||||||
terms of section 4, provided that you also meet all of these conditions:
|
|
||||||
|
|
||||||
a) The work must carry prominent notices stating that you modified
|
|
||||||
it, and giving a relevant date.
|
|
||||||
|
|
||||||
b) The work must carry prominent notices stating that it is
|
|
||||||
released under this License and any conditions added under section
|
|
||||||
7. This requirement modifies the requirement in section 4 to
|
|
||||||
"keep intact all notices".
|
|
||||||
|
|
||||||
c) You must license the entire work, as a whole, under this
|
|
||||||
License to anyone who comes into possession of a copy. This
|
|
||||||
License will therefore apply, along with any applicable section 7
|
|
||||||
additional terms, to the whole of the work, and all its parts,
|
|
||||||
regardless of how they are packaged. This License gives no
|
|
||||||
permission to license the work in any other way, but it does not
|
|
||||||
invalidate such permission if you have separately received it.
|
|
||||||
|
|
||||||
d) If the work has interactive user interfaces, each must display
|
|
||||||
Appropriate Legal Notices; however, if the Program has interactive
|
|
||||||
interfaces that do not display Appropriate Legal Notices, your
|
|
||||||
work need not make them do so.
|
|
||||||
|
|
||||||
A compilation of a covered work with other separate and independent
|
|
||||||
works, which are not by their nature extensions of the covered work,
|
|
||||||
and which are not combined with it such as to form a larger program,
|
|
||||||
in or on a volume of a storage or distribution medium, is called an
|
|
||||||
"aggregate" if the compilation and its resulting copyright are not
|
|
||||||
used to limit the access or legal rights of the compilation's users
|
|
||||||
beyond what the individual works permit. Inclusion of a covered work
|
|
||||||
in an aggregate does not cause this License to apply to the other
|
|
||||||
parts of the aggregate.
|
|
||||||
|
|
||||||
6. Conveying Non-Source Forms.
|
|
||||||
|
|
||||||
You may convey a covered work in object code form under the terms
|
|
||||||
of sections 4 and 5, provided that you also convey the
|
|
||||||
machine-readable Corresponding Source under the terms of this License,
|
|
||||||
in one of these ways:
|
|
||||||
|
|
||||||
a) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by the
|
|
||||||
Corresponding Source fixed on a durable physical medium
|
|
||||||
customarily used for software interchange.
|
|
||||||
|
|
||||||
b) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by a
|
|
||||||
written offer, valid for at least three years and valid for as
|
|
||||||
long as you offer spare parts or customer support for that product
|
|
||||||
model, to give anyone who possesses the object code either (1) a
|
|
||||||
copy of the Corresponding Source for all the software in the
|
|
||||||
product that is covered by this License, on a durable physical
|
|
||||||
medium customarily used for software interchange, for a price no
|
|
||||||
more than your reasonable cost of physically performing this
|
|
||||||
conveying of source, or (2) access to copy the
|
|
||||||
Corresponding Source from a network server at no charge.
|
|
||||||
|
|
||||||
c) Convey individual copies of the object code with a copy of the
|
|
||||||
written offer to provide the Corresponding Source. This
|
|
||||||
alternative is allowed only occasionally and noncommercially, and
|
|
||||||
only if you received the object code with such an offer, in accord
|
|
||||||
with subsection 6b.
|
|
||||||
|
|
||||||
d) Convey the object code by offering access from a designated
|
|
||||||
place (gratis or for a charge), and offer equivalent access to the
|
|
||||||
Corresponding Source in the same way through the same place at no
|
|
||||||
further charge. You need not require recipients to copy the
|
|
||||||
Corresponding Source along with the object code. If the place to
|
|
||||||
copy the object code is a network server, the Corresponding Source
|
|
||||||
may be on a different server (operated by you or a third party)
|
|
||||||
that supports equivalent copying facilities, provided you maintain
|
|
||||||
clear directions next to the object code saying where to find the
|
|
||||||
Corresponding Source. Regardless of what server hosts the
|
|
||||||
Corresponding Source, you remain obligated to ensure that it is
|
|
||||||
available for as long as needed to satisfy these requirements.
|
|
||||||
|
|
||||||
e) Convey the object code using peer-to-peer transmission, provided
|
|
||||||
you inform other peers where the object code and Corresponding
|
|
||||||
Source of the work are being offered to the general public at no
|
|
||||||
charge under subsection 6d.
|
|
||||||
|
|
||||||
A separable portion of the object code, whose source code is excluded
|
|
||||||
from the Corresponding Source as a System Library, need not be
|
|
||||||
included in conveying the object code work.
|
|
||||||
|
|
||||||
A "User Product" is either (1) a "consumer product", which means any
|
|
||||||
tangible personal property which is normally used for personal, family,
|
|
||||||
or household purposes, or (2) anything designed or sold for incorporation
|
|
||||||
into a dwelling. In determining whether a product is a consumer product,
|
|
||||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
|
||||||
product received by a particular user, "normally used" refers to a
|
|
||||||
typical or common use of that class of product, regardless of the status
|
|
||||||
of the particular user or of the way in which the particular user
|
|
||||||
actually uses, or expects or is expected to use, the product. A product
|
|
||||||
is a consumer product regardless of whether the product has substantial
|
|
||||||
commercial, industrial or non-consumer uses, unless such uses represent
|
|
||||||
the only significant mode of use of the product.
|
|
||||||
|
|
||||||
"Installation Information" for a User Product means any methods,
|
|
||||||
procedures, authorization keys, or other information required to install
|
|
||||||
and execute modified versions of a covered work in that User Product from
|
|
||||||
a modified version of its Corresponding Source. The information must
|
|
||||||
suffice to ensure that the continued functioning of the modified object
|
|
||||||
code is in no case prevented or interfered with solely because
|
|
||||||
modification has been made.
|
|
||||||
|
|
||||||
If you convey an object code work under this section in, or with, or
|
|
||||||
specifically for use in, a User Product, and the conveying occurs as
|
|
||||||
part of a transaction in which the right of possession and use of the
|
|
||||||
User Product is transferred to the recipient in perpetuity or for a
|
|
||||||
fixed term (regardless of how the transaction is characterized), the
|
|
||||||
Corresponding Source conveyed under this section must be accompanied
|
|
||||||
by the Installation Information. But this requirement does not apply
|
|
||||||
if neither you nor any third party retains the ability to install
|
|
||||||
modified object code on the User Product (for example, the work has
|
|
||||||
been installed in ROM).
|
|
||||||
|
|
||||||
The requirement to provide Installation Information does not include a
|
|
||||||
requirement to continue to provide support service, warranty, or updates
|
|
||||||
for a work that has been modified or installed by the recipient, or for
|
|
||||||
the User Product in which it has been modified or installed. Access to a
|
|
||||||
network may be denied when the modification itself materially and
|
|
||||||
adversely affects the operation of the network or violates the rules and
|
|
||||||
protocols for communication across the network.
|
|
||||||
|
|
||||||
Corresponding Source conveyed, and Installation Information provided,
|
|
||||||
in accord with this section must be in a format that is publicly
|
|
||||||
documented (and with an implementation available to the public in
|
|
||||||
source code form), and must require no special password or key for
|
|
||||||
unpacking, reading or copying.
|
|
||||||
|
|
||||||
7. Additional Terms.
|
|
||||||
|
|
||||||
"Additional permissions" are terms that supplement the terms of this
|
|
||||||
License by making exceptions from one or more of its conditions.
|
|
||||||
Additional permissions that are applicable to the entire Program shall
|
|
||||||
be treated as though they were included in this License, to the extent
|
|
||||||
that they are valid under applicable law. If additional permissions
|
|
||||||
apply only to part of the Program, that part may be used separately
|
|
||||||
under those permissions, but the entire Program remains governed by
|
|
||||||
this License without regard to the additional permissions.
|
|
||||||
|
|
||||||
When you convey a copy of a covered work, you may at your option
|
|
||||||
remove any additional permissions from that copy, or from any part of
|
|
||||||
it. (Additional permissions may be written to require their own
|
|
||||||
removal in certain cases when you modify the work.) You may place
|
|
||||||
additional permissions on material, added by you to a covered work,
|
|
||||||
for which you have or can give appropriate copyright permission.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, for material you
|
|
||||||
add to a covered work, you may (if authorized by the copyright holders of
|
|
||||||
that material) supplement the terms of this License with terms:
|
|
||||||
|
|
||||||
a) Disclaiming warranty or limiting liability differently from the
|
|
||||||
terms of sections 15 and 16 of this License; or
|
|
||||||
|
|
||||||
b) Requiring preservation of specified reasonable legal notices or
|
|
||||||
author attributions in that material or in the Appropriate Legal
|
|
||||||
Notices displayed by works containing it; or
|
|
||||||
|
|
||||||
c) Prohibiting misrepresentation of the origin of that material, or
|
|
||||||
requiring that modified versions of such material be marked in
|
|
||||||
reasonable ways as different from the original version; or
|
|
||||||
|
|
||||||
d) Limiting the use for publicity purposes of names of licensors or
|
|
||||||
authors of the material; or
|
|
||||||
|
|
||||||
e) Declining to grant rights under trademark law for use of some
|
|
||||||
trade names, trademarks, or service marks; or
|
|
||||||
|
|
||||||
f) Requiring indemnification of licensors and authors of that
|
|
||||||
material by anyone who conveys the material (or modified versions of
|
|
||||||
it) with contractual assumptions of liability to the recipient, for
|
|
||||||
any liability that these contractual assumptions directly impose on
|
|
||||||
those licensors and authors.
|
|
||||||
|
|
||||||
All other non-permissive additional terms are considered "further
|
|
||||||
restrictions" within the meaning of section 10. If the Program as you
|
|
||||||
received it, or any part of it, contains a notice stating that it is
|
|
||||||
governed by this License along with a term that is a further
|
|
||||||
restriction, you may remove that term. If a license document contains
|
|
||||||
a further restriction but permits relicensing or conveying under this
|
|
||||||
License, you may add to a covered work material governed by the terms
|
|
||||||
of that license document, provided that the further restriction does
|
|
||||||
not survive such relicensing or conveying.
|
|
||||||
|
|
||||||
If you add terms to a covered work in accord with this section, you
|
|
||||||
must place, in the relevant source files, a statement of the
|
|
||||||
additional terms that apply to those files, or a notice indicating
|
|
||||||
where to find the applicable terms.
|
|
||||||
|
|
||||||
Additional terms, permissive or non-permissive, may be stated in the
|
|
||||||
form of a separately written license, or stated as exceptions;
|
|
||||||
the above requirements apply either way.
|
|
||||||
|
|
||||||
8. Termination.
|
|
||||||
|
|
||||||
You may not propagate or modify a covered work except as expressly
|
|
||||||
provided under this License. Any attempt otherwise to propagate or
|
|
||||||
modify it is void, and will automatically terminate your rights under
|
|
||||||
this License (including any patent licenses granted under the third
|
|
||||||
paragraph of section 11).
|
|
||||||
|
|
||||||
However, if you cease all violation of this License, then your
|
|
||||||
license from a particular copyright holder is reinstated (a)
|
|
||||||
provisionally, unless and until the copyright holder explicitly and
|
|
||||||
finally terminates your license, and (b) permanently, if the copyright
|
|
||||||
holder fails to notify you of the violation by some reasonable means
|
|
||||||
prior to 60 days after the cessation.
|
|
||||||
|
|
||||||
Moreover, your license from a particular copyright holder is
|
|
||||||
reinstated permanently if the copyright holder notifies you of the
|
|
||||||
violation by some reasonable means, this is the first time you have
|
|
||||||
received notice of violation of this License (for any work) from that
|
|
||||||
copyright holder, and you cure the violation prior to 30 days after
|
|
||||||
your receipt of the notice.
|
|
||||||
|
|
||||||
Termination of your rights under this section does not terminate the
|
|
||||||
licenses of parties who have received copies or rights from you under
|
|
||||||
this License. If your rights have been terminated and not permanently
|
|
||||||
reinstated, you do not qualify to receive new licenses for the same
|
|
||||||
material under section 10.
|
|
||||||
|
|
||||||
9. Acceptance Not Required for Having Copies.
|
|
||||||
|
|
||||||
You are not required to accept this License in order to receive or
|
|
||||||
run a copy of the Program. Ancillary propagation of a covered work
|
|
||||||
occurring solely as a consequence of using peer-to-peer transmission
|
|
||||||
to receive a copy likewise does not require acceptance. However,
|
|
||||||
nothing other than this License grants you permission to propagate or
|
|
||||||
modify any covered work. These actions infringe copyright if you do
|
|
||||||
not accept this License. Therefore, by modifying or propagating a
|
|
||||||
covered work, you indicate your acceptance of this License to do so.
|
|
||||||
|
|
||||||
10. Automatic Licensing of Downstream Recipients.
|
|
||||||
|
|
||||||
Each time you convey a covered work, the recipient automatically
|
|
||||||
receives a license from the original licensors, to run, modify and
|
|
||||||
propagate that work, subject to this License. You are not responsible
|
|
||||||
for enforcing compliance by third parties with this License.
|
|
||||||
|
|
||||||
An "entity transaction" is a transaction transferring control of an
|
|
||||||
organization, or substantially all assets of one, or subdividing an
|
|
||||||
organization, or merging organizations. If propagation of a covered
|
|
||||||
work results from an entity transaction, each party to that
|
|
||||||
transaction who receives a copy of the work also receives whatever
|
|
||||||
licenses to the work the party's predecessor in interest had or could
|
|
||||||
give under the previous paragraph, plus a right to possession of the
|
|
||||||
Corresponding Source of the work from the predecessor in interest, if
|
|
||||||
the predecessor has it or can get it with reasonable efforts.
|
|
||||||
|
|
||||||
You may not impose any further restrictions on the exercise of the
|
|
||||||
rights granted or affirmed under this License. For example, you may
|
|
||||||
not impose a license fee, royalty, or other charge for exercise of
|
|
||||||
rights granted under this License, and you may not initiate litigation
|
|
||||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
|
||||||
any patent claim is infringed by making, using, selling, offering for
|
|
||||||
sale, or importing the Program or any portion of it.
|
|
||||||
|
|
||||||
11. Patents.
|
|
||||||
|
|
||||||
A "contributor" is a copyright holder who authorizes use under this
|
|
||||||
License of the Program or a work on which the Program is based. The
|
|
||||||
work thus licensed is called the contributor's "contributor version".
|
|
||||||
|
|
||||||
A contributor's "essential patent claims" are all patent claims
|
|
||||||
owned or controlled by the contributor, whether already acquired or
|
|
||||||
hereafter acquired, that would be infringed by some manner, permitted
|
|
||||||
by this License, of making, using, or selling its contributor version,
|
|
||||||
but do not include claims that would be infringed only as a
|
|
||||||
consequence of further modification of the contributor version. For
|
|
||||||
purposes of this definition, "control" includes the right to grant
|
|
||||||
patent sublicenses in a manner consistent with the requirements of
|
|
||||||
this License.
|
|
||||||
|
|
||||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
|
||||||
patent license under the contributor's essential patent claims, to
|
|
||||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
|
||||||
propagate the contents of its contributor version.
|
|
||||||
|
|
||||||
In the following three paragraphs, a "patent license" is any express
|
|
||||||
agreement or commitment, however denominated, not to enforce a patent
|
|
||||||
(such as an express permission to practice a patent or covenant not to
|
|
||||||
sue for patent infringement). To "grant" such a patent license to a
|
|
||||||
party means to make such an agreement or commitment not to enforce a
|
|
||||||
patent against the party.
|
|
||||||
|
|
||||||
If you convey a covered work, knowingly relying on a patent license,
|
|
||||||
and the Corresponding Source of the work is not available for anyone
|
|
||||||
to copy, free of charge and under the terms of this License, through a
|
|
||||||
publicly available network server or other readily accessible means,
|
|
||||||
then you must either (1) cause the Corresponding Source to be so
|
|
||||||
available, or (2) arrange to deprive yourself of the benefit of the
|
|
||||||
patent license for this particular work, or (3) arrange, in a manner
|
|
||||||
consistent with the requirements of this License, to extend the patent
|
|
||||||
license to downstream recipients. "Knowingly relying" means you have
|
|
||||||
actual knowledge that, but for the patent license, your conveying the
|
|
||||||
covered work in a country, or your recipient's use of the covered work
|
|
||||||
in a country, would infringe one or more identifiable patents in that
|
|
||||||
country that you have reason to believe are valid.
|
|
||||||
|
|
||||||
If, pursuant to or in connection with a single transaction or
|
|
||||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
|
||||||
covered work, and grant a patent license to some of the parties
|
|
||||||
receiving the covered work authorizing them to use, propagate, modify
|
|
||||||
or convey a specific copy of the covered work, then the patent license
|
|
||||||
you grant is automatically extended to all recipients of the covered
|
|
||||||
work and works based on it.
|
|
||||||
|
|
||||||
A patent license is "discriminatory" if it does not include within
|
|
||||||
the scope of its coverage, prohibits the exercise of, or is
|
|
||||||
conditioned on the non-exercise of one or more of the rights that are
|
|
||||||
specifically granted under this License. You may not convey a covered
|
|
||||||
work if you are a party to an arrangement with a third party that is
|
|
||||||
in the business of distributing software, under which you make payment
|
|
||||||
to the third party based on the extent of your activity of conveying
|
|
||||||
the work, and under which the third party grants, to any of the
|
|
||||||
parties who would receive the covered work from you, a discriminatory
|
|
||||||
patent license (a) in connection with copies of the covered work
|
|
||||||
conveyed by you (or copies made from those copies), or (b) primarily
|
|
||||||
for and in connection with specific products or compilations that
|
|
||||||
contain the covered work, unless you entered into that arrangement,
|
|
||||||
or that patent license was granted, prior to 28 March 2007.
|
|
||||||
|
|
||||||
Nothing in this License shall be construed as excluding or limiting
|
|
||||||
any implied license or other defenses to infringement that may
|
|
||||||
otherwise be available to you under applicable patent law.
|
|
||||||
|
|
||||||
12. No Surrender of Others' Freedom.
|
|
||||||
|
|
||||||
If conditions are imposed on you (whether by court order, agreement or
|
|
||||||
otherwise) that contradict the conditions of this License, they do not
|
|
||||||
excuse you from the conditions of this License. If you cannot convey a
|
|
||||||
covered work so as to satisfy simultaneously your obligations under this
|
|
||||||
License and any other pertinent obligations, then as a consequence you may
|
|
||||||
not convey it at all. For example, if you agree to terms that obligate you
|
|
||||||
to collect a royalty for further conveying from those to whom you convey
|
|
||||||
the Program, the only way you could satisfy both those terms and this
|
|
||||||
License would be to refrain entirely from conveying the Program.
|
|
||||||
|
|
||||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, if you modify the
|
|
||||||
Program, your modified version must prominently offer all users
|
|
||||||
interacting with it remotely through a computer network (if your version
|
|
||||||
supports such interaction) an opportunity to receive the Corresponding
|
|
||||||
Source of your version by providing access to the Corresponding Source
|
|
||||||
from a network server at no charge, through some standard or customary
|
|
||||||
means of facilitating copying of software. This Corresponding Source
|
|
||||||
shall include the Corresponding Source for any work covered by version 3
|
|
||||||
of the GNU General Public License that is incorporated pursuant to the
|
|
||||||
following paragraph.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, you have
|
|
||||||
permission to link or combine any covered work with a work licensed
|
|
||||||
under version 3 of the GNU General Public License into a single
|
|
||||||
combined work, and to convey the resulting work. The terms of this
|
|
||||||
License will continue to apply to the part which is the covered work,
|
|
||||||
but the work with which it is combined will remain governed by version
|
|
||||||
3 of the GNU General Public License.
|
|
||||||
|
|
||||||
14. Revised Versions of this License.
|
|
||||||
|
|
||||||
The Free Software Foundation may publish revised and/or new versions of
|
|
||||||
the GNU Affero General Public License from time to time. Such new versions
|
|
||||||
will be similar in spirit to the present version, but may differ in detail to
|
|
||||||
address new problems or concerns.
|
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the
|
|
||||||
Program specifies that a certain numbered version of the GNU Affero General
|
|
||||||
Public License "or any later version" applies to it, you have the
|
|
||||||
option of following the terms and conditions either of that numbered
|
|
||||||
version or of any later version published by the Free Software
|
|
||||||
Foundation. If the Program does not specify a version number of the
|
|
||||||
GNU Affero General Public License, you may choose any version ever published
|
|
||||||
by the Free Software Foundation.
|
|
||||||
|
|
||||||
If the Program specifies that a proxy can decide which future
|
|
||||||
versions of the GNU Affero General Public License can be used, that proxy's
|
|
||||||
public statement of acceptance of a version permanently authorizes you
|
|
||||||
to choose that version for the Program.
|
|
||||||
|
|
||||||
Later license versions may give you additional or different
|
|
||||||
permissions. However, no additional obligations are imposed on any
|
|
||||||
author or copyright holder as a result of your choosing to follow a
|
|
||||||
later version.
|
|
||||||
|
|
||||||
15. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
|
||||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
|
||||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
|
||||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
||||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
|
||||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
|
||||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
|
||||||
|
|
||||||
16. Limitation of Liability.
|
|
||||||
|
|
||||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
|
||||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
|
||||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
|
||||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
|
||||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
|
||||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
|
||||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
|
||||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
|
||||||
SUCH DAMAGES.
|
|
||||||
|
|
||||||
17. Interpretation of Sections 15 and 16.
|
|
||||||
|
|
||||||
If the disclaimer of warranty and limitation of liability provided
|
|
||||||
above cannot be given local legal effect according to their terms,
|
|
||||||
reviewing courts shall apply local law that most closely approximates
|
|
||||||
an absolute waiver of all civil liability in connection with the
|
|
||||||
Program, unless a warranty or assumption of liability accompanies a
|
|
||||||
copy of the Program in return for a fee.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
How to Apply These Terms to Your New Programs
|
|
||||||
|
|
||||||
If you develop a new program, and you want it to be of the greatest
|
|
||||||
possible use to the public, the best way to achieve this is to make it
|
|
||||||
free software which everyone can redistribute and change under these terms.
|
|
||||||
|
|
||||||
To do so, attach the following notices to the program. It is safest
|
|
||||||
to attach them to the start of each source file to most effectively
|
|
||||||
state the exclusion of warranty; and each file should have at least
|
|
||||||
the "copyright" line and a pointer to where the full notice is found.
|
|
||||||
|
|
||||||
<one line to give the program's name and a brief idea of what it does.>
|
|
||||||
Copyright (C) <year> <name of author>
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU Affero General Public License as published by
|
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU Affero General Public License
|
|
||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
Also add information on how to contact you by electronic and paper mail.
|
|
||||||
|
|
||||||
If your software can interact with users remotely through a computer
|
|
||||||
network, you should also make sure that it provides a way for users to
|
|
||||||
get its source. For example, if your program is a web application, its
|
|
||||||
interface could display a "Source" link that leads users to an archive
|
|
||||||
of the code. There are many ways you could offer source, and different
|
|
||||||
solutions will be better for different programs; see section 13 for the
|
|
||||||
specific requirements.
|
|
||||||
|
|
||||||
You should also get your employer (if you work as a programmer) or school,
|
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
|
||||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
|
||||||
<https://www.gnu.org/licenses/>.
|
|
222
Makefile
222
Makefile
|
@ -1,222 +0,0 @@
|
||||||
#!/usr/bin/make -f
|
|
||||||
|
|
||||||
# Common variables
|
|
||||||
REPO ?= $(shell go list -m)
|
|
||||||
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
|
||||||
GO_VERSION ?= 1.22
|
|
||||||
LINT_VERSION ?= 1.60.1
|
|
||||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
|
|
||||||
BINDIR = bin
|
|
||||||
|
|
||||||
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
|
||||||
|
|
||||||
# Binaries to build
|
|
||||||
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
|
|
||||||
BINS = $(addprefix $(BINDIR)/, $(CMDS))
|
|
||||||
|
|
||||||
GOFLAGS ?=
|
|
||||||
|
|
||||||
# Variables for docker
|
|
||||||
REPO_BASENAME = $(shell basename `go list -m`)
|
|
||||||
HUB_IMAGE ?= "git.frostfs.info/truecloudlab/$(REPO_BASENAME)"
|
|
||||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
|
||||||
|
|
||||||
OUTPUT_LINT_DIR ?= $(shell pwd)/bin
|
|
||||||
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
|
|
||||||
TMP_DIR := .cache
|
|
||||||
|
|
||||||
# Variables for fuzzing
|
|
||||||
FUZZ_NGFUZZ_DIR ?= ""
|
|
||||||
FUZZ_TIMEOUT ?= 30
|
|
||||||
FUZZ_FUNCTIONS ?= "all"
|
|
||||||
FUZZ_AUX ?= ""
|
|
||||||
|
|
||||||
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean protoc
|
|
||||||
|
|
||||||
# .deb package versioning
|
|
||||||
OS_RELEASE = $(shell lsb_release -cs)
|
|
||||||
PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
|
|
||||||
sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \
|
|
||||||
sed "s/-/~/")-${OS_RELEASE}
|
|
||||||
.PHONY: debpackage debclean
|
|
||||||
|
|
||||||
# Make all binaries
|
|
||||||
all: $(BINS)
|
|
||||||
|
|
||||||
$(BINS): $(BINDIR) dep
|
|
||||||
@echo "⇒ Build $@"
|
|
||||||
CGO_ENABLED=0 \
|
|
||||||
GOFLAGS=$(GOFLAGS) \
|
|
||||||
go build -v -trimpath \
|
|
||||||
-ldflags "-X $(REPO)/internal/version.Version=$(VERSION)" \
|
|
||||||
-o $@ ./cmd/$(subst frostfs-,,$(notdir $@))
|
|
||||||
|
|
||||||
$(BINDIR):
|
|
||||||
@echo "⇒ Ensure dir: $@"
|
|
||||||
@mkdir -p $@
|
|
||||||
|
|
||||||
# Pull go dependencies
|
|
||||||
dep:
|
|
||||||
@printf "⇒ Download requirements: "
|
|
||||||
@CGO_ENABLED=0 \
|
|
||||||
go mod download && echo OK
|
|
||||||
@printf "⇒ Tidy requirements: "
|
|
||||||
@CGO_ENABLED=0 \
|
|
||||||
go mod tidy -v && echo OK
|
|
||||||
|
|
||||||
# Run `make %` in Golang container, for more information run `make help.docker/%`
|
|
||||||
docker/%:
|
|
||||||
$(if $(filter $*,all $(BINS)), \
|
|
||||||
@echo "=> Running 'make $*' in clean Docker environment" && \
|
|
||||||
docker run --rm -t \
|
|
||||||
-v `pwd`:/src \
|
|
||||||
-w /src \
|
|
||||||
-u `stat -c "%u:%g" .` \
|
|
||||||
--env HOME=/src \
|
|
||||||
golang:$(GO_VERSION) make GOFLAGS=$(GOFLAGS) $*,\
|
|
||||||
@echo "supported docker targets: all $(BINS) lint")
|
|
||||||
|
|
||||||
# Run tests
|
|
||||||
test:
|
|
||||||
@go test ./... -cover
|
|
||||||
|
|
||||||
# Run tests with race detection and produce coverage output
|
|
||||||
cover:
|
|
||||||
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
|
|
||||||
@go tool cover -html=coverage.txt -o coverage.html
|
|
||||||
|
|
||||||
# Run fuzzing
|
|
||||||
CLANG := $(shell which clang-17 2>/dev/null)
|
|
||||||
.PHONY: check-clang all
|
|
||||||
check-clang:
|
|
||||||
ifeq ($(CLANG),)
|
|
||||||
@echo "clang-17 is not installed. Please install it before proceeding - https://apt.llvm.org/llvm.sh "
|
|
||||||
@exit 1
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: check-ngfuzz all
|
|
||||||
check-ngfuzz:
|
|
||||||
@if [ -z "$(FUZZ_NGFUZZ_DIR)" ]; then \
|
|
||||||
echo "Please set a variable FUZZ_NGFUZZ_DIR to specify path to the ngfuzz"; \
|
|
||||||
exit 1; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
.PHONY: install-fuzzing-deps
|
|
||||||
install-fuzzing-deps: check-clang check-ngfuzz
|
|
||||||
|
|
||||||
.PHONY: fuzz
|
|
||||||
fuzz: install-fuzzing-deps
|
|
||||||
@START_PATH=$$(pwd); \
|
|
||||||
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
|
|
||||||
cd $(FUZZ_NGFUZZ_DIR) && \
|
|
||||||
./ngfuzz -clean && \
|
|
||||||
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
|
|
||||||
./ngfuzz -report
|
|
||||||
|
|
||||||
# Reformat code
|
|
||||||
format:
|
|
||||||
@echo "⇒ Processing gofmt check"
|
|
||||||
@gofmt -s -w ./
|
|
||||||
|
|
||||||
# Build clean Docker image
|
|
||||||
image:
|
|
||||||
@echo "⇒ Build FrostFS S3 Gateway docker image "
|
|
||||||
@docker build \
|
|
||||||
--build-arg REPO=$(REPO) \
|
|
||||||
--build-arg VERSION=$(VERSION) \
|
|
||||||
--build-arg GOFLAGS=$(GOFLAGS) \
|
|
||||||
--rm \
|
|
||||||
-f .docker/Dockerfile \
|
|
||||||
-t $(HUB_IMAGE):$(HUB_TAG) .
|
|
||||||
|
|
||||||
# Push Docker image to the hub
|
|
||||||
image-push:
|
|
||||||
@echo "⇒ Publish image"
|
|
||||||
@docker push $(HUB_IMAGE):$(HUB_TAG)
|
|
||||||
|
|
||||||
# Build dirty Docker image
|
|
||||||
dirty-image:
|
|
||||||
@echo "⇒ Build FrostFS S3 Gateway dirty docker image "
|
|
||||||
@docker build \
|
|
||||||
--build-arg REPO=$(REPO) \
|
|
||||||
--build-arg VERSION=$(VERSION) \
|
|
||||||
--rm \
|
|
||||||
-f .docker/Dockerfile.dirty \
|
|
||||||
-t $(HUB_IMAGE)-dirty:$(HUB_TAG) .
|
|
||||||
|
|
||||||
# Install linters
|
|
||||||
lint-install:
|
|
||||||
@mkdir -p $(TMP_DIR)
|
|
||||||
@rm -rf $(TMP_DIR)/linters
|
|
||||||
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
|
|
||||||
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
|
|
||||||
@rm -rf $(TMP_DIR)/linters
|
|
||||||
@rmdir $(TMP_DIR) 2>/dev/null || true
|
|
||||||
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
|
|
||||||
|
|
||||||
# Run linters
|
|
||||||
lint:
|
|
||||||
@if [ ! -d "$(LINT_DIR)" ]; then \
|
|
||||||
echo "Run make lint-install"; \
|
|
||||||
exit 1; \
|
|
||||||
fi
|
|
||||||
$(LINT_DIR)/golangci-lint --timeout=5m run
|
|
||||||
|
|
||||||
# Run linters in Docker
|
|
||||||
docker/lint:
|
|
||||||
docker run --rm -it \
|
|
||||||
-v `pwd`:/src \
|
|
||||||
-u `stat -c "%u:%g" .` \
|
|
||||||
--env HOME=/src \
|
|
||||||
golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint'
|
|
||||||
|
|
||||||
# Activate pre-commit hooks
|
|
||||||
pre-commit:
|
|
||||||
pre-commit install -t pre-commit -t commit-msg
|
|
||||||
|
|
||||||
# Deactivate pre-commit hooks
|
|
||||||
unpre-commit:
|
|
||||||
pre-commit uninstall -t pre-commit -t commit-msg
|
|
||||||
|
|
||||||
# Show current version
|
|
||||||
version:
|
|
||||||
@echo $(VERSION)
|
|
||||||
|
|
||||||
# Clean up files
|
|
||||||
clean:
|
|
||||||
rm -rf .cache
|
|
||||||
rm -rf $(BINDIR)
|
|
||||||
|
|
||||||
# Generate code from .proto files
|
|
||||||
protoc:
|
|
||||||
# Install specific version for protobuf lib
|
|
||||||
@GOBIN=$(abspath $(BINDIR)) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen
|
|
||||||
@for f in `find . -type f -name '*.proto' -not -path './vendor/*'`; do \
|
|
||||||
echo "⇒ Processing $$f "; \
|
|
||||||
protoc \
|
|
||||||
--go_out=paths=source_relative:. \
|
|
||||||
--plugin=protoc-gen-go-frostfs=$(BINDIR)/protogen \
|
|
||||||
--go-frostfs_out=. --go-frostfs_opt=paths=source_relative \
|
|
||||||
--go-grpc_opt=require_unimplemented_servers=false \
|
|
||||||
--go-grpc_out=. --go-grpc_opt=paths=source_relative $$f; \
|
|
||||||
done
|
|
||||||
rm -rf vendor
|
|
||||||
|
|
||||||
# Package for Debian
|
|
||||||
debpackage:
|
|
||||||
dch --package frostfs-s3-gw \
|
|
||||||
--controlmaint \
|
|
||||||
--newversion $(PKG_VERSION) \
|
|
||||||
--distribution $(OS_RELEASE) \
|
|
||||||
"Please see CHANGELOG.md for code changes for $(VERSION)"
|
|
||||||
dpkg-buildpackage --no-sign -b
|
|
||||||
|
|
||||||
debclean:
|
|
||||||
dh clean
|
|
||||||
|
|
||||||
# Dump metrics (use METRICS_DUMP_OUT variable to override default out file './metrics-dump.json')
|
|
||||||
.PHONY: dump-metrics
|
|
||||||
dump-metrics:
|
|
||||||
@go test ./metrics -run TestDescribeAll --tags=dump_metrics --out=$(abspath $(METRICS_DUMP_OUT))
|
|
||||||
|
|
||||||
include help.mk
|
|
126
README.md
126
README.md
|
@ -1,125 +1,3 @@
|
||||||
<p align="center">
|
# WIP area: this repo is just a fork!
|
||||||
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo">
|
|
||||||
</p>
|
|
||||||
<p align="center">
|
|
||||||
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
---
|
Useful things may be published only in [other branches](../../../branches)
|
||||||
[](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-s3-gw)
|
|
||||||

|
|
||||||

|
|
||||||
|
|
||||||
# FrostFS S3 Gateway
|
|
||||||
|
|
||||||
FrostFS S3 gateway provides API compatible with Amazon S3 cloud storage service.
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
```go get -u git.frostfs.info/TrueCloudLab/frostfs-s3-gw```
|
|
||||||
|
|
||||||
Or you can call `make` to build it from the cloned repository (the binary will
|
|
||||||
end up in `bin/frostfs-s3-gw` with authmate helper in `bin/frostfs-s3-authmate`).
|
|
||||||
To build binaries in clean docker environment, call `make docker/all`.
|
|
||||||
|
|
||||||
Other notable make targets:
|
|
||||||
|
|
||||||
```
|
|
||||||
dep Check and ensure dependencies
|
|
||||||
image Build clean docker image
|
|
||||||
dirty-image Build dirty docker image with host-built binaries
|
|
||||||
format Run all code formatters
|
|
||||||
lint Run linters
|
|
||||||
version Show current version
|
|
||||||
```
|
|
||||||
|
|
||||||
Or you can also use a [Docker
|
|
||||||
image](https://hub.docker.com/r/truecloudlab/frostfs-s3-gw) provided for released
|
|
||||||
(and occasionally unreleased) versions of gateway (`:latest` points to the
|
|
||||||
latest stable release).
|
|
||||||
|
|
||||||
## Execution
|
|
||||||
|
|
||||||
Minimalistic S3 gateway setup needs:
|
|
||||||
* FrostFS node(s) address (S3 gateway itself is not a FrostFS node)
|
|
||||||
Passed via `-p` parameter or via `S3_GW_PEERS_<N>_ADDRESS` and
|
|
||||||
`S3_GW_PEERS_<N>_WEIGHT` environment variables (gateway supports multiple
|
|
||||||
FrostFS nodes with weighted load balancing).
|
|
||||||
* a wallet used to fetch key and communicate with FrostFS nodes
|
|
||||||
Passed via `--wallet` parameter or `S3_GW_WALLET_PATH` environment variable.
|
|
||||||
|
|
||||||
These two commands are functionally equivalent, they run the gate with one
|
|
||||||
backend node, some keys and otherwise default settings:
|
|
||||||
```
|
|
||||||
$ frostfs-s3-gw -p 192.168.130.72:8080 --wallet wallet.json
|
|
||||||
|
|
||||||
$ S3_GW_PEERS_0_ADDRESS=192.168.130.72:8080 \
|
|
||||||
S3_GW_WALLET=wallet.json \
|
|
||||||
frostfs-s3-gw
|
|
||||||
```
|
|
||||||
It's also possible to specify uri scheme (grpc or grpcs) when using `-p` or environment variables:
|
|
||||||
```
|
|
||||||
$ frostfs-s3-gw -p grpc://192.168.130.72:8080 --wallet wallet.json
|
|
||||||
|
|
||||||
$ S3_GW_PEERS_0_ADDRESS=grpcs://192.168.130.72:8080 \
|
|
||||||
S3_GW_WALLET=wallet.json \
|
|
||||||
frostfs-s3-gw
|
|
||||||
```
|
|
||||||
|
|
||||||
## Domains
|
|
||||||
|
|
||||||
By default, s3-gw enable only `path-style access`.
|
|
||||||
To be able to use both: `virtual-hosted-style` and `path-style` access you must configure `listen_domains`:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ frostfs-s3-gw -p 192.168.130.72:8080 --wallet wallet.json --listen_domains your.first.domain --listen_domains your.second.domain
|
|
||||||
```
|
|
||||||
|
|
||||||
So now you can use (e.g. `HeadBucket`. Make sure DNS is properly configured):
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ curl --head http://bucket-name.your.first.domain:8080
|
|
||||||
HTTP/1.1 200 OK
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
or
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ curl --head http://your.second.domain:8080/bucket-name
|
|
||||||
HTTP/1.1 200 OK
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
Also, you can configure domains using `.env` variables or `yaml` file.
|
|
||||||
|
|
||||||
## Fuzzing
|
|
||||||
To run fuzzing tests use the following command:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ make fuzz
|
|
||||||
```
|
|
||||||
|
|
||||||
This command will install dependencies for the fuzzing process and run existing fuzzing tests.
|
|
||||||
|
|
||||||
You can also use the following arguments:
|
|
||||||
|
|
||||||
```
|
|
||||||
FUZZ_TIMEOUT - time to run each fuzzing test (default 30)
|
|
||||||
FUZZ_FUNCTIONS - fuzzing tests that will be started (default "all")
|
|
||||||
FUZZ_AUX - additional parameters for the fuzzer (for example, "-debug")
|
|
||||||
FUZZ_NGFUZZ_DIR - path to ngfuzz tool
|
|
||||||
````
|
|
||||||
|
|
||||||
## Documentation
|
|
||||||
|
|
||||||
- [Configuration](./docs/configuration.md)
|
|
||||||
- [FrostFS S3 AuthMate](./docs/authmate.md)
|
|
||||||
- [FrostFS Tree service](./docs/tree_service.md)
|
|
||||||
- [AWS CLI basic usage](./docs/aws_cli.md)
|
|
||||||
- [AWS S3 API compatibility](./docs/aws_s3_compat.md)
|
|
||||||
- [AWS S3 Compatibility test results](./docs/s3_test_results.md)
|
|
||||||
|
|
||||||
## Credits
|
|
||||||
|
|
||||||
Please see [CREDITS](CREDITS.md) for details.
|
|
||||||
|
|
26
SECURITY.md
26
SECURITY.md
|
@ -1,26 +0,0 @@
|
||||||
# Security Policy
|
|
||||||
|
|
||||||
|
|
||||||
## How To Report a Vulnerability
|
|
||||||
|
|
||||||
If you think you have found a vulnerability in this repository, please report it to us through coordinated disclosure.
|
|
||||||
|
|
||||||
**Please do not report security vulnerabilities through public issues, discussions, or change requests.**
|
|
||||||
|
|
||||||
Instead, you can report it using one of the following ways:
|
|
||||||
|
|
||||||
* Contact the [TrueCloudLab Security Team](mailto:security@frostfs.info) via email
|
|
||||||
|
|
||||||
Please include as much of the information listed below as you can to help us better understand and resolve the issue:
|
|
||||||
|
|
||||||
* The type of issue (e.g., buffer overflow, or cross-site scripting)
|
|
||||||
* Affected version(s)
|
|
||||||
* Impact of the issue, including how an attacker might exploit the issue
|
|
||||||
* Step-by-step instructions to reproduce the issue
|
|
||||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
|
||||||
* Full paths of source file(s) related to the manifestation of the issue
|
|
||||||
* Any special configuration required to reproduce the issue
|
|
||||||
* Any log files that are related to this issue (if possible)
|
|
||||||
* Proof-of-concept or exploit code (if possible)
|
|
||||||
|
|
||||||
This information will help us triage your report more quickly.
|
|
1
VERSION
1
VERSION
|
@ -1 +0,0 @@
|
||||||
v0.32.11
|
|
|
@ -1,578 +0,0 @@
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto"
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"regexp"
|
|
||||||
"slices"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
|
|
||||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/v4"
|
|
||||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// AuthorizationFieldRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
|
|
||||||
AuthorizationFieldRegexp = regexp.MustCompile(`AWS4-HMAC-SHA256 Credential=(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request,\s*SignedHeaders=(?P<signed_header_fields>.+),\s*Signature=(?P<v4_signature>.+)`)
|
|
||||||
|
|
||||||
// AuthorizationFieldV4aRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
|
|
||||||
AuthorizationFieldV4aRegexp = regexp.MustCompile(`AWS4-ECDSA-P256-SHA256 Credential=(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<service>[^/]+)/aws4_request,\s*SignedHeaders=(?P<signed_header_fields>.+),\s*Signature=(?P<v4_signature>.+)`)
|
|
||||||
|
|
||||||
// postPolicyCredentialRegexp -- is regexp for credentials when uploading file using POST with policy.
|
|
||||||
postPolicyCredentialRegexp = regexp.MustCompile(`(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request`)
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
Center struct {
|
|
||||||
reg *RegexpSubmatcher
|
|
||||||
regV4a *RegexpSubmatcher
|
|
||||||
postReg *RegexpSubmatcher
|
|
||||||
cli tokens.Credentials
|
|
||||||
allowedAccessKeyIDPrefixes []string // empty slice means all access key ids are allowed
|
|
||||||
settings CenterSettings
|
|
||||||
}
|
|
||||||
|
|
||||||
CenterSettings interface {
|
|
||||||
AccessBoxContainer() (cid.ID, bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
//nolint:revive
|
|
||||||
AuthHeader struct {
|
|
||||||
AccessKeyID string
|
|
||||||
Service string
|
|
||||||
Region string
|
|
||||||
Signature string
|
|
||||||
SignedFields []string
|
|
||||||
Date string
|
|
||||||
IsPresigned bool
|
|
||||||
Expiration time.Duration
|
|
||||||
Preamble string
|
|
||||||
PayloadHash string
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
authHeaderPartsNum = 6
|
|
||||||
authHeaderV4aPartsNum = 5
|
|
||||||
maxFormSizeMemory = 50 * 1048576 // 50 MB
|
|
||||||
|
|
||||||
AmzAlgorithm = "X-Amz-Algorithm"
|
|
||||||
AmzCredential = "X-Amz-Credential"
|
|
||||||
AmzSignature = "X-Amz-Signature"
|
|
||||||
AmzSignedHeaders = "X-Amz-SignedHeaders"
|
|
||||||
AmzRegionSet = "X-Amz-Region-Set"
|
|
||||||
AmzExpires = "X-Amz-Expires"
|
|
||||||
AmzDate = "X-Amz-Date"
|
|
||||||
AmzContentSHA256 = "X-Amz-Content-Sha256"
|
|
||||||
AuthorizationHdr = "Authorization"
|
|
||||||
ContentTypeHdr = "Content-Type"
|
|
||||||
|
|
||||||
UnsignedPayload = "UNSIGNED-PAYLOAD"
|
|
||||||
StreamingUnsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
|
|
||||||
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
|
||||||
StreamingContentSHA256Trailer = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
|
|
||||||
StreamingContentECDSASHA256 = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"
|
|
||||||
StreamingContentECDSASHA256Trailer = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER"
|
|
||||||
)
|
|
||||||
|
|
||||||
var ContentSHA256HeaderStandardValue = map[string]struct{}{
|
|
||||||
UnsignedPayload: {},
|
|
||||||
StreamingUnsignedPayloadTrailer: {},
|
|
||||||
StreamingContentSHA256: {},
|
|
||||||
StreamingContentSHA256Trailer: {},
|
|
||||||
StreamingContentECDSASHA256: {},
|
|
||||||
StreamingContentECDSASHA256Trailer: {},
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates an instance of AuthCenter.
|
|
||||||
func New(creds tokens.Credentials, prefixes []string, settings CenterSettings) *Center {
|
|
||||||
return &Center{
|
|
||||||
cli: creds,
|
|
||||||
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
|
||||||
regV4a: NewRegexpMatcher(AuthorizationFieldV4aRegexp),
|
|
||||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
|
||||||
allowedAccessKeyIDPrefixes: prefixes,
|
|
||||||
settings: settings,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
SignaturePreambleSigV4 = "AWS4-HMAC-SHA256"
|
|
||||||
SignaturePreambleSigV4A = "AWS4-ECDSA-P256-SHA256"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (c *Center) parseAuthHeader(authHeader string, headers http.Header) (*AuthHeader, error) {
|
|
||||||
preamble, _, _ := strings.Cut(authHeader, " ")
|
|
||||||
|
|
||||||
var (
|
|
||||||
submatches map[string]string
|
|
||||||
region string
|
|
||||||
)
|
|
||||||
|
|
||||||
switch preamble {
|
|
||||||
case SignaturePreambleSigV4:
|
|
||||||
submatches = c.reg.GetSubmatches(authHeader)
|
|
||||||
if len(submatches) != authHeaderPartsNum {
|
|
||||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), authHeader)
|
|
||||||
}
|
|
||||||
region = submatches["region"]
|
|
||||||
case SignaturePreambleSigV4A:
|
|
||||||
submatches = c.regV4a.GetSubmatches(authHeader)
|
|
||||||
if len(submatches) != authHeaderV4aPartsNum {
|
|
||||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), authHeader)
|
|
||||||
}
|
|
||||||
region = headers.Get(AmzRegionSet)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), authHeader)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &AuthHeader{
|
|
||||||
AccessKeyID: submatches["access_key_id"],
|
|
||||||
Service: submatches["service"],
|
|
||||||
Region: region,
|
|
||||||
Signature: submatches["v4_signature"],
|
|
||||||
SignedFields: strings.Split(submatches["signed_header_fields"], ";"),
|
|
||||||
Date: submatches["date"],
|
|
||||||
Preamble: preamble,
|
|
||||||
PayloadHash: headers.Get(AmzContentSHA256),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsStandardContentSHA256(key string) bool {
|
|
||||||
_, ok := ContentSHA256HeaderStandardValue[key]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Center) Authenticate(ctx context.Context, r *http.Request) (*middleware.Box, error) {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
authHdr *AuthHeader
|
|
||||||
signatureDateTimeStr string
|
|
||||||
needClientTime bool
|
|
||||||
)
|
|
||||||
|
|
||||||
queryValues := r.URL.Query()
|
|
||||||
if queryValues.Get(AmzAlgorithm) == SignaturePreambleSigV4 {
|
|
||||||
creds := strings.Split(queryValues.Get(AmzCredential), "/")
|
|
||||||
if len(creds) != 5 || creds[4] != "aws4_request" {
|
|
||||||
return nil, fmt.Errorf("bad X-Amz-Credential")
|
|
||||||
}
|
|
||||||
authHdr = &AuthHeader{
|
|
||||||
AccessKeyID: creds[0],
|
|
||||||
Service: creds[3],
|
|
||||||
Region: creds[2],
|
|
||||||
Signature: queryValues.Get(AmzSignature),
|
|
||||||
SignedFields: strings.Split(queryValues.Get(AmzSignedHeaders), ";"),
|
|
||||||
Date: creds[1],
|
|
||||||
IsPresigned: true,
|
|
||||||
Preamble: SignaturePreambleSigV4,
|
|
||||||
PayloadHash: r.Header.Get(AmzContentSHA256),
|
|
||||||
}
|
|
||||||
authHdr.Expiration, err = time.ParseDuration(queryValues.Get(AmzExpires) + "s")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%w: couldn't parse X-Amz-Expires %v", apierr.GetAPIError(apierr.ErrMalformedExpires), err)
|
|
||||||
}
|
|
||||||
signatureDateTimeStr = queryValues.Get(AmzDate)
|
|
||||||
} else if queryValues.Get(AmzAlgorithm) == SignaturePreambleSigV4A {
|
|
||||||
creds := strings.Split(queryValues.Get(AmzCredential), "/")
|
|
||||||
if len(creds) != 4 || creds[3] != "aws4_request" {
|
|
||||||
return nil, fmt.Errorf("bad X-Amz-Credential")
|
|
||||||
}
|
|
||||||
authHdr = &AuthHeader{
|
|
||||||
AccessKeyID: creds[0],
|
|
||||||
Service: creds[2],
|
|
||||||
Region: queryValues.Get(AmzRegionSet),
|
|
||||||
Signature: queryValues.Get(AmzSignature),
|
|
||||||
SignedFields: strings.Split(queryValues.Get(AmzSignedHeaders), ";"),
|
|
||||||
Date: creds[1],
|
|
||||||
IsPresigned: true,
|
|
||||||
Preamble: SignaturePreambleSigV4A,
|
|
||||||
PayloadHash: r.Header.Get(AmzContentSHA256),
|
|
||||||
}
|
|
||||||
authHdr.Expiration, err = time.ParseDuration(queryValues.Get(AmzExpires) + "s")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%w: couldn't parse X-Amz-Expires %v", apierr.GetAPIError(apierr.ErrMalformedExpires), err)
|
|
||||||
}
|
|
||||||
signatureDateTimeStr = queryValues.Get(AmzDate)
|
|
||||||
} else {
|
|
||||||
authHeaderField := r.Header[AuthorizationHdr]
|
|
||||||
if len(authHeaderField) != 1 {
|
|
||||||
if strings.HasPrefix(r.Header.Get(ContentTypeHdr), "multipart/form-data") {
|
|
||||||
return c.checkFormData(ctx, r)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("%w: %v", middleware.ErrNoAuthorizationHeader, authHeaderField)
|
|
||||||
}
|
|
||||||
authHdr, err = c.parseAuthHeader(authHeaderField[0], r.Header)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
signatureDateTimeStr = r.Header.Get(AmzDate)
|
|
||||||
needClientTime = true
|
|
||||||
}
|
|
||||||
|
|
||||||
signatureDateTime, err := time.Parse("20060102T150405Z", signatureDateTimeStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse x-amz-date header field: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = c.checkAccessKeyID(authHdr.AccessKeyID); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cnrID, err := c.getAccessBoxContainer(authHdr.AccessKeyID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
box, attrs, err := c.cli.GetBox(ctx, cnrID, authHdr.AccessKeyID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("get box by access key '%s': %w", authHdr.AccessKeyID, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = checkFormatHashContentSHA256(r.Header.Get(AmzContentSHA256)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
clonedRequest := cloneRequest(r, authHdr)
|
|
||||||
if err = c.checkSign(r.Context(), authHdr, box, clonedRequest, signatureDateTime); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
result := &middleware.Box{
|
|
||||||
AccessBox: box,
|
|
||||||
AuthHeaders: &middleware.AuthHeader{
|
|
||||||
AccessKeyID: authHdr.AccessKeyID,
|
|
||||||
Region: authHdr.Region,
|
|
||||||
SignatureV4: authHdr.Signature,
|
|
||||||
},
|
|
||||||
Attributes: attrs,
|
|
||||||
}
|
|
||||||
if needClientTime {
|
|
||||||
result.ClientTime = signatureDateTime
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Center) getAccessBoxContainer(accessKeyID string) (cid.ID, error) {
|
|
||||||
var addr oid.Address
|
|
||||||
if err := addr.DecodeString(strings.ReplaceAll(accessKeyID, "0", "/")); err == nil {
|
|
||||||
return addr.Container(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
cnrID, ok := c.settings.AccessBoxContainer()
|
|
||||||
if ok {
|
|
||||||
return cnrID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return cid.ID{}, fmt.Errorf("%w: unknown container for creds '%s'", apierr.GetAPIError(apierr.ErrInvalidAccessKeyID), accessKeyID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkFormatHashContentSHA256(hash string) error {
|
|
||||||
if !IsStandardContentSHA256(hash) {
|
|
||||||
hashBinary, err := hex.DecodeString(hash)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%w: decode hash: %s: %s", apierr.GetAPIError(apierr.ErrContentSHA256Mismatch),
|
|
||||||
hash, err.Error())
|
|
||||||
}
|
|
||||||
if len(hashBinary) != sha256.Size && len(hash) != 0 {
|
|
||||||
return fmt.Errorf("%w: invalid hash size %d", apierr.GetAPIError(apierr.ErrContentSHA256Mismatch), len(hashBinary))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Center) checkAccessKeyID(accessKeyID string) error {
|
|
||||||
if len(c.allowedAccessKeyIDPrefixes) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, prefix := range c.allowedAccessKeyIDPrefixes {
|
|
||||||
if strings.HasPrefix(accessKeyID, prefix) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("%w: accesskeyID prefix isn't allowed", apierr.GetAPIError(apierr.ErrAccessDenied))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Center) checkFormData(ctx context.Context, r *http.Request) (*middleware.Box, error) {
|
|
||||||
if err := r.ParseMultipartForm(maxFormSizeMemory); err != nil {
|
|
||||||
return nil, fmt.Errorf("%w: parse multipart form with max size %d", apierr.GetAPIError(apierr.ErrInvalidArgument), maxFormSizeMemory)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := prepareForm(r.MultipartForm); err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't parse form: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
policy := MultipartFormValue(r, "policy")
|
|
||||||
if policy == "" {
|
|
||||||
return nil, fmt.Errorf("%w: missing policy", middleware.ErrNoAuthorizationHeader)
|
|
||||||
}
|
|
||||||
|
|
||||||
creds := MultipartFormValue(r, "x-amz-credential")
|
|
||||||
submatches := c.postReg.GetSubmatches(creds)
|
|
||||||
if len(submatches) != 4 {
|
|
||||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), creds)
|
|
||||||
}
|
|
||||||
|
|
||||||
signatureDateTime, err := time.Parse("20060102T150405Z", MultipartFormValue(r, "x-amz-date"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse x-amz-date field: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
accessKeyID := submatches["access_key_id"]
|
|
||||||
|
|
||||||
cnrID, err := c.getAccessBoxContainer(accessKeyID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
box, attrs, err := c.cli.GetBox(ctx, cnrID, accessKeyID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("get box by accessKeyID '%s': %w", accessKeyID, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
secret := box.Gate.SecretKey
|
|
||||||
service, region := submatches["service"], submatches["region"]
|
|
||||||
|
|
||||||
signature := SignStr(secret, service, region, signatureDateTime, policy)
|
|
||||||
reqSignature := MultipartFormValue(r, "x-amz-signature")
|
|
||||||
if signature != reqSignature {
|
|
||||||
return nil, fmt.Errorf("%w: %s != %s", apierr.GetAPIError(apierr.ErrSignatureDoesNotMatch),
|
|
||||||
reqSignature, signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &middleware.Box{
|
|
||||||
AccessBox: box,
|
|
||||||
AuthHeaders: &middleware.AuthHeader{
|
|
||||||
AccessKeyID: accessKeyID,
|
|
||||||
Region: region,
|
|
||||||
SignatureV4: signature,
|
|
||||||
},
|
|
||||||
Attributes: attrs,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
|
||||||
otherRequest := r.Clone(context.TODO())
|
|
||||||
otherRequest.Header = make(http.Header)
|
|
||||||
|
|
||||||
for key, val := range r.Header {
|
|
||||||
for _, name := range authHeader.SignedFields {
|
|
||||||
if strings.EqualFold(key, name) {
|
|
||||||
otherRequest.Header[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if authHeader.IsPresigned {
|
|
||||||
otherQuery := otherRequest.URL.Query()
|
|
||||||
otherQuery.Del(AmzSignature)
|
|
||||||
otherRequest.URL.RawQuery = otherQuery.Encode()
|
|
||||||
}
|
|
||||||
|
|
||||||
return otherRequest
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Center) checkSign(ctx context.Context, authHeader *AuthHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
|
|
||||||
var signature string
|
|
||||||
|
|
||||||
if !slices.Contains(authHeader.SignedFields, "x-amz-content-sha256") && authHeader.PayloadHash == "" {
|
|
||||||
authHeader.PayloadHash = UnsignedPayload
|
|
||||||
}
|
|
||||||
|
|
||||||
switch authHeader.Preamble {
|
|
||||||
case SignaturePreambleSigV4:
|
|
||||||
creds := aws.Credentials{
|
|
||||||
AccessKeyID: authHeader.AccessKeyID,
|
|
||||||
SecretAccessKey: box.Gate.SecretKey,
|
|
||||||
}
|
|
||||||
signer := v4.NewSigner(func(options *v4.SignerOptions) {
|
|
||||||
options.DisableURIPathEscaping = true
|
|
||||||
})
|
|
||||||
|
|
||||||
if authHeader.IsPresigned {
|
|
||||||
if err := checkPresignedDate(authHeader, signatureDateTime); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
signedURI, _, err := signer.PresignHTTP(ctx, creds, request, authHeader.PayloadHash, authHeader.Service, authHeader.Region, signatureDateTime)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to pre-sign temporary HTTP request: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
u, err := url.ParseRequestURI(signedURI)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
signature = u.Query().Get(AmzSignature)
|
|
||||||
} else {
|
|
||||||
if err := signer.SignHTTP(ctx, creds, request, authHeader.PayloadHash, authHeader.Service, authHeader.Region, signatureDateTime); err != nil {
|
|
||||||
return fmt.Errorf("failed to sign temporary HTTP request: %w", err)
|
|
||||||
}
|
|
||||||
signature = c.reg.GetSubmatches(request.Header.Get(AuthorizationHdr))["v4_signature"]
|
|
||||||
}
|
|
||||||
if authHeader.Signature != signature {
|
|
||||||
return fmt.Errorf("%w: %s != %s: headers %v", apierr.GetAPIError(apierr.ErrSignatureDoesNotMatch),
|
|
||||||
authHeader.Signature, signature, authHeader.SignedFields)
|
|
||||||
}
|
|
||||||
|
|
||||||
case SignaturePreambleSigV4A:
|
|
||||||
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
|
|
||||||
options.DisableURIPathEscaping = true
|
|
||||||
})
|
|
||||||
|
|
||||||
credAdapter := v4a.SymmetricCredentialAdaptor{
|
|
||||||
SymmetricProvider: credentials.NewStaticCredentialsProvider(authHeader.AccessKeyID, box.Gate.SecretKey, ""),
|
|
||||||
}
|
|
||||||
|
|
||||||
creds, err := credAdapter.RetrievePrivateKey(request.Context())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to derive assymetric key from credentials: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !authHeader.IsPresigned {
|
|
||||||
return signer.VerifySignature(creds, request, authHeader.PayloadHash, authHeader.Service,
|
|
||||||
strings.Split(authHeader.Region, ","), signatureDateTime, authHeader.Signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = checkPresignedDate(authHeader, signatureDateTime); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return signer.VerifyPresigned(creds, request, authHeader.PayloadHash, authHeader.Service,
|
|
||||||
strings.Split(authHeader.Region, ","), signatureDateTime, authHeader.Signature)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid preamble: %s", authHeader.Preamble)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkPresignedDate(authHeader *AuthHeader, signatureDateTime time.Time) error {
|
|
||||||
now := time.Now()
|
|
||||||
if signatureDateTime.Add(authHeader.Expiration).Before(now) {
|
|
||||||
return fmt.Errorf("%w: expired: now %s, signature %s", apierr.GetAPIError(apierr.ErrExpiredPresignRequest),
|
|
||||||
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
|
|
||||||
}
|
|
||||||
if now.Before(signatureDateTime) {
|
|
||||||
return fmt.Errorf("%w: signature time from the future: now %s, signature %s", apierr.GetAPIError(apierr.ErrBadRequest),
|
|
||||||
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func SignStr(secret, service, region string, t time.Time, strToSign string) string {
|
|
||||||
creds := deriveKey(secret, service, region, t)
|
|
||||||
signature := hmacSHA256(creds, []byte(strToSign))
|
|
||||||
return hex.EncodeToString(signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
func SignStrV4A(ctx context.Context, cred aws.Credentials, strToSign string) (string, error) {
|
|
||||||
credAdapter := v4a.SymmetricCredentialAdaptor{
|
|
||||||
SymmetricProvider: credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, ""),
|
|
||||||
}
|
|
||||||
|
|
||||||
creds, err := credAdapter.RetrievePrivateKey(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
hash := sha256.New()
|
|
||||||
hash.Write([]byte(strToSign))
|
|
||||||
|
|
||||||
sig, err := creds.PrivateKey.Sign(rand.Reader, hash.Sum(nil), crypto.SHA256)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return hex.EncodeToString(sig), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func deriveKey(secret, service, region string, t time.Time) []byte {
|
|
||||||
hmacDate := hmacSHA256([]byte("AWS4"+secret), []byte(t.UTC().Format("20060102")))
|
|
||||||
hmacRegion := hmacSHA256(hmacDate, []byte(region))
|
|
||||||
hmacService := hmacSHA256(hmacRegion, []byte(service))
|
|
||||||
return hmacSHA256(hmacService, []byte("aws4_request"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func hmacSHA256(key []byte, data []byte) []byte {
|
|
||||||
hash := hmac.New(sha256.New, key)
|
|
||||||
hash.Write(data)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultipartFormValue gets value by key from multipart form.
|
|
||||||
func MultipartFormValue(r *http.Request, key string) string {
|
|
||||||
if r.MultipartForm == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
if vs := r.MultipartForm.Value[key]; len(vs) > 0 {
|
|
||||||
return vs[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareForm(form *multipart.Form) error {
|
|
||||||
var oldKeysValue []string
|
|
||||||
var oldKeysFile []string
|
|
||||||
|
|
||||||
for k, v := range form.Value {
|
|
||||||
lowerKey := strings.ToLower(k)
|
|
||||||
if lowerKey != k {
|
|
||||||
form.Value[lowerKey] = v
|
|
||||||
oldKeysValue = append(oldKeysValue, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, k := range oldKeysValue {
|
|
||||||
delete(form.Value, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range form.File {
|
|
||||||
lowerKey := strings.ToLower(k)
|
|
||||||
if lowerKey != "file" {
|
|
||||||
oldKeysFile = append(oldKeysFile, k)
|
|
||||||
if len(v) > 0 {
|
|
||||||
field, err := v[0].Open()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("file header open: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := io.ReadAll(field)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("read field: %w", err)
|
|
||||||
}
|
|
||||||
form.Value[lowerKey] = []string{string(data)}
|
|
||||||
}
|
|
||||||
} else if lowerKey != k {
|
|
||||||
form.File[lowerKey] = v
|
|
||||||
oldKeysFile = append(oldKeysFile, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, k := range oldKeysFile {
|
|
||||||
delete(form.File, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,93 +0,0 @@
|
||||||
//go:build gofuzz
|
|
||||||
// +build gofuzz
|
|
||||||
|
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
utils "github.com/trailofbits/go-fuzz-utils"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
fuzzSuccessExitCode = 0
|
|
||||||
fuzzFailExitCode = -1
|
|
||||||
)
|
|
||||||
|
|
||||||
func InitFuzzAuthenticate() {
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzAuthenticate(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
var accessKeyAddr oid.Address
|
|
||||||
err = tp.Fill(accessKeyAddr)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
|
||||||
secretKey, err := tp.GetString()
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secretKey}
|
|
||||||
|
|
||||||
reqData := RequestData{
|
|
||||||
Method: "GET",
|
|
||||||
Endpoint: "http://localhost:8084",
|
|
||||||
Bucket: "my-bucket",
|
|
||||||
Object: "@obj/name",
|
|
||||||
}
|
|
||||||
presignData := PresignData{
|
|
||||||
Service: "s3",
|
|
||||||
Region: "spb",
|
|
||||||
Lifetime: 10 * time.Minute,
|
|
||||||
SignTime: time.Now().UTC(),
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := PresignRequest(context.Background(), awsCreds, reqData, presignData, zap.NewNop())
|
|
||||||
if req == nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
expBox := &accessbox.Box{
|
|
||||||
Gate: &accessbox.GateData{
|
|
||||||
SecretKey: secretKey,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
mock := newTokensFrostfsMock()
|
|
||||||
mock.addBox(accessKeyAddr, expBox)
|
|
||||||
|
|
||||||
c := &Center{
|
|
||||||
cli: mock,
|
|
||||||
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
|
||||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _ = c.Authenticate(req)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzAuthenticate(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzAuthenticate(data)
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,800 +0,0 @@
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
|
|
||||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/v4"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
|
||||||
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
|
||||||
smithyauth "github.com/aws/smithy-go/auth"
|
|
||||||
"github.com/aws/smithy-go/logging"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"go.uber.org/zap/zaptest"
|
|
||||||
)
|
|
||||||
|
|
||||||
type centerSettingsMock struct {
|
|
||||||
accessBoxContainer *cid.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *centerSettingsMock) AccessBoxContainer() (cid.ID, bool) {
|
|
||||||
if c.accessBoxContainer == nil {
|
|
||||||
return cid.ID{}, false
|
|
||||||
}
|
|
||||||
return *c.accessBoxContainer, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAuthHeaderParse(t *testing.T) {
|
|
||||||
defaultHeader := "AWS4-HMAC-SHA256 Credential=oid0cid/20210809/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=2811ccb9e242f41426738fb1f"
|
|
||||||
|
|
||||||
center := &Center{
|
|
||||||
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
|
||||||
settings: ¢erSettingsMock{},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
header string
|
|
||||||
err error
|
|
||||||
expected *AuthHeader
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
header: defaultHeader,
|
|
||||||
err: nil,
|
|
||||||
expected: &AuthHeader{
|
|
||||||
AccessKeyID: "oid0cid",
|
|
||||||
Service: "s3",
|
|
||||||
Region: "us-east-1",
|
|
||||||
Signature: "2811ccb9e242f41426738fb1f",
|
|
||||||
SignedFields: []string{"host", "x-amz-content-sha256", "x-amz-date"},
|
|
||||||
Date: "20210809",
|
|
||||||
Preamble: SignaturePreambleSigV4,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: strings.ReplaceAll(defaultHeader, "Signature=2811ccb9e242f41426738fb1f", ""),
|
|
||||||
err: errors.GetAPIError(errors.ErrAuthorizationHeaderMalformed),
|
|
||||||
expected: nil,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
authHeader, err := center.parseAuthHeader(tc.header, nil)
|
|
||||||
require.ErrorIs(t, err, tc.err, tc.header)
|
|
||||||
require.Equal(t, tc.expected, authHeader, tc.header)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignature(t *testing.T) {
|
|
||||||
secret := "66be461c3cd429941c55daf42fad2b8153e5a2016ba89c9494d97677cc9d3872"
|
|
||||||
strToSign := "eyAiZXhwaXJhdGlvbiI6ICIyMDE1LTEyLTMwVDEyOjAwOjAwLjAwMFoiLAogICJjb25kaXRpb25zIjogWwogICAgeyJidWNrZXQiOiAiYWNsIn0sCiAgICBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS8iXSwKICAgIHsic3VjY2Vzc19hY3Rpb25fcmVkaXJlY3QiOiAiaHR0cDovL2xvY2FsaG9zdDo4MDg0L2FjbCJ9LAogICAgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLAogICAgeyJ4LWFtei1tZXRhLXV1aWQiOiAiMTQzNjUxMjM2NTEyNzQifSwKICAgIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLAoKICAgIHsiWC1BbXotQ3JlZGVudGlhbCI6ICI4Vmk0MVBIbjVGMXNzY2J4OUhqMXdmMUU2aERUYURpNndxOGhxTU05NllKdTA1QzVDeUVkVlFoV1E2aVZGekFpTkxXaTlFc3BiUTE5ZDRuR3pTYnZVZm10TS8yMDE1MTIyOS91cy1lYXN0LTEvczMvYXdzNF9yZXF1ZXN0In0sCiAgICB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sCiAgICB7IlgtQW16LURhdGUiOiAiMjAxNTEyMjlUMDAwMDAwWiIgfSwKICAgIHsieC1pZ25vcmUtdG1wIjogInNvbWV0aGluZyIgfQogIF0KfQ=="
|
|
||||||
|
|
||||||
signTime, err := time.Parse("20060102T150405Z", "20151229T000000Z")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
signature := SignStr(secret, "s3", "us-east-1", signTime, strToSign)
|
|
||||||
require.Equal(t, "dfbe886241d9e369cf4b329ca0f15eb27306c97aa1022cc0bb5a914c4ef87634", signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignatureV4A(t *testing.T) {
|
|
||||||
accessKeyID := "2XEbqH4M3ym7a3E3esxfZ2gRLnMwDXrCN4y1SkQg5fHa09sThVmVL3EE6xeKsyMzaqu5jPi41YCaVbnwbwCTF3bx1"
|
|
||||||
secretKey := "00637f53f842573aaa06c2164c598973cd986880987111416cf71f1619def537"
|
|
||||||
|
|
||||||
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
|
|
||||||
options.DisableURIPathEscaping = true
|
|
||||||
options.Logger = zaptest.NewLogger(t)
|
|
||||||
options.LogSigning = true
|
|
||||||
})
|
|
||||||
|
|
||||||
credAdapter := v4a.SymmetricCredentialAdaptor{
|
|
||||||
SymmetricProvider: credentials.NewStaticCredentialsProvider(accessKeyID, secretKey, ""),
|
|
||||||
}
|
|
||||||
|
|
||||||
bodyStr := `
|
|
||||||
1b;chunk-signature=3045022100b63692a1b20759bdabd342011823427a8952df75c93174d98ad043abca8052e002201695228a91ba986171b8d0ad20856d3d94ca3614d0a90a50a531ba8e52447b9b**
|
|
||||||
Testing with the {sdk-java}
|
|
||||||
0;chunk-signature=30440220455885a2d4e9f705256ca6b0a5a22f7f784780ccbd1c0a371e5db3059c91745b022073259dd44746cbd63261d628a04d25be5a32a974c077c5c2d83c8157fb323b9f****
|
|
||||||
|
|
||||||
`
|
|
||||||
body := bytes.NewBufferString(bodyStr)
|
|
||||||
|
|
||||||
req, err := http.NewRequest("PUT", "http://localhost:8084/test/tmp", body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.Header.Set("Amz-Sdk-Invocation-Id", "ca3a3cde-7d26-fce6-ed9c-82f7a0573824")
|
|
||||||
req.Header.Set("Amz-Sdk-Request", "attempt=2; max=2")
|
|
||||||
req.Header.Set("Authorization", "AWS4-ECDSA-P256-SHA256 Credential=2XEbqH4M3ym7a3E3esxfZ2gRLnMwDXrCN4y1SkQg5fHa09sThVmVL3EE6xeKsyMzaqu5jPi41YCaVbnwbwCTF3bx1/20240904/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-region-set, Signature=30440220574244c5ff5deba388c4e3b0541a42113179b6839b3e6b4212d255a118fa9089022056f7b9b72c93f67dbcd25fe9ca67950b5913fc00bb7a62bc276c21e828c0b6c7")
|
|
||||||
req.Header.Set("Content-Length", "360")
|
|
||||||
req.Header.Set("Content-Type", "text/plain; charset=UTF-8")
|
|
||||||
req.Header.Set("X-Amz-Content-Sha256", "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD")
|
|
||||||
req.Header.Set("X-Amz-Date", "20240904T133253Z")
|
|
||||||
req.Header.Set("X-Amz-Decoded-Content-Length", "27")
|
|
||||||
req.Header.Set("X-Amz-Region-Set", "us-east-1")
|
|
||||||
|
|
||||||
service := "s3"
|
|
||||||
regionSet := []string{"us-east-1"}
|
|
||||||
signature := "30440220574244c5ff5deba388c4e3b0541a42113179b6839b3e6b4212d255a118fa9089022056f7b9b72c93f67dbcd25fe9ca67950b5913fc00bb7a62bc276c21e828c0b6c7"
|
|
||||||
signingTime, err := time.Parse("20060102T150405Z", "20240904T133253Z")
|
|
||||||
require.NoError(t, err)
|
|
||||||
creds, err := credAdapter.RetrievePrivateKey(req.Context())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = signer.VerifySignature(creds, req, "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD", service, regionSet, signingTime, signature)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignatureV4(t *testing.T) {
|
|
||||||
signer := v4.NewSigner(func(options *v4.SignerOptions) {
|
|
||||||
options.DisableURIPathEscaping = true
|
|
||||||
options.Logger = zaptest.NewLogger(t)
|
|
||||||
options.LogSigning = true
|
|
||||||
})
|
|
||||||
|
|
||||||
creds := aws.Credentials{
|
|
||||||
AccessKeyID: "9CBEGH8T9XfLin2pg7LG8ZxBH1PnZc1yoioViKngrUnu0CbC2mcjpcw9t4Y7AS6zsF5cJGkDhXAx5hxFDKwfZzgj7",
|
|
||||||
SecretAccessKey: "8742218da7f905de24f633f44efe02f82c6d2a317ed6f99592627215d17816e3",
|
|
||||||
}
|
|
||||||
|
|
||||||
bodyStr := `tmp2
|
|
||||||
`
|
|
||||||
body := bytes.NewBufferString(bodyStr)
|
|
||||||
|
|
||||||
req, err := http.NewRequest("PUT", "http://localhost:8084/main/tmp2", body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.Header.Set("Authorization", "AWS4-HMAC-SHA256 Credential=9CBEGH8T9XfLin2pg7LG8ZxBH1PnZc1yoioViKngrUnu0CbC2mcjpcw9t4Y7AS6zsF5cJGkDhXAx5hxFDKwfZzgj7/20241210/ru/s3/aws4_request, SignedHeaders=content-md5;host;x-amz-content-sha256;x-amz-date, Signature=945664a5bccfd37a1167ca5e718e2b883f68a7ccf7f1044768e7fe58b737b7ed")
|
|
||||||
req.Header.Set("Content-Length", "5")
|
|
||||||
req.Header.Set("User-Agent", "aws-cli/2.13.2 Python/3.11.4 Linux/6.4.5-x64v1-xanmod1 exe/x86_64.debian.11 prompt/off command/s3api.put-object")
|
|
||||||
req.Header.Set("Content-MD5", "DstU4KxdzBj5jTGltfyqgA==")
|
|
||||||
req.Header.Set("Expect", "101-continue")
|
|
||||||
req.Header.Set("X-Amz-Content-Sha256", "1f9b7417ee5445c41dbe904c3651eb0ba1c12fecff16c1bccd8df3db6e390b5f")
|
|
||||||
req.Header.Set("X-Amz-Date", "20241210T114611Z")
|
|
||||||
|
|
||||||
service := "s3"
|
|
||||||
region := "ru"
|
|
||||||
signature := "945664a5bccfd37a1167ca5e718e2b883f68a7ccf7f1044768e7fe58b737b7ed"
|
|
||||||
signingTime, err := time.Parse("20060102T150405Z", "20241210T114611Z")
|
|
||||||
require.NoError(t, err)
|
|
||||||
cloned := cloneRequest(req, &AuthHeader{SignedFields: []string{"content-md5", "host", "x-amz-content-sha256", "x-amz-date"}})
|
|
||||||
|
|
||||||
err = signer.SignHTTP(cloned.Context(), creds, cloned, "1f9b7417ee5445c41dbe904c3651eb0ba1c12fecff16c1bccd8df3db6e390b5f", service, region, signingTime)
|
|
||||||
require.NoError(t, err)
|
|
||||||
signatureComputed := NewRegexpMatcher(AuthorizationFieldRegexp).GetSubmatches(cloned.Header.Get(AuthorizationHdr))["v4_signature"]
|
|
||||||
require.Equal(t, signature, signatureComputed, "signature mismatched")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckFormatContentSHA256(t *testing.T) {
|
|
||||||
defaultErr := errors.GetAPIError(errors.ErrContentSHA256Mismatch)
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
hash string
|
|
||||||
error error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "invalid hash format: length and character",
|
|
||||||
hash: "invalid-hash",
|
|
||||||
error: defaultErr,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid hash format: length (63 characters)",
|
|
||||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f7",
|
|
||||||
error: defaultErr,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid hash format: character",
|
|
||||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f7s",
|
|
||||||
error: defaultErr,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid hash format: hash size",
|
|
||||||
hash: "5aadb45520dcd8726b2822a7a78bb53d794f557199d5d4abdedd2c55a4bd6ca73607605c558de3db80c8e86c3196484566163ed1327e82e8b6757d1932113cb8",
|
|
||||||
error: defaultErr,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unsigned payload",
|
|
||||||
hash: "UNSIGNED-PAYLOAD",
|
|
||||||
error: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no hash",
|
|
||||||
hash: "",
|
|
||||||
error: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "correct hash format",
|
|
||||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73",
|
|
||||||
error: nil,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
err := checkFormatHashContentSHA256(tc.hash)
|
|
||||||
require.ErrorIs(t, err, tc.error)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type frostFSMock struct {
|
|
||||||
objects map[string]*object.Object
|
|
||||||
}
|
|
||||||
|
|
||||||
func newFrostFSMock() *frostFSMock {
|
|
||||||
return &frostFSMock{
|
|
||||||
objects: map[string]*object.Object{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *frostFSMock) GetCredsObject(_ context.Context, prm tokens.PrmGetCredsObject) (*object.Object, error) {
|
|
||||||
obj, ok := f.objects[prm.AccessKeyID]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *frostFSMock) CreateObject(context.Context, tokens.PrmObjectCreate) (oid.ID, error) {
|
|
||||||
return oid.ID{}, fmt.Errorf("the mock method is not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAuthenticate(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
key, err := keys.NewPrivateKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cfg := &cache.Config{
|
|
||||||
Size: 10,
|
|
||||||
Lifetime: 24 * time.Hour,
|
|
||||||
Logger: zaptest.NewLogger(t),
|
|
||||||
}
|
|
||||||
|
|
||||||
gateData := []*accessbox.GateData{{
|
|
||||||
BearerToken: &bearer.Token{},
|
|
||||||
GateKey: key.PublicKey(),
|
|
||||||
}}
|
|
||||||
|
|
||||||
accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"), false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
data, err := accessBox.Marshal()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var obj object.Object
|
|
||||||
obj.SetPayload(data)
|
|
||||||
addr := oidtest.Address()
|
|
||||||
obj.SetContainerID(addr.Container())
|
|
||||||
obj.SetID(addr.Object())
|
|
||||||
|
|
||||||
accessKeyID := getAccessKeyID(addr)
|
|
||||||
|
|
||||||
frostfs := newFrostFSMock()
|
|
||||||
frostfs.objects[accessKeyID] = &obj
|
|
||||||
|
|
||||||
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secret.SecretKey}
|
|
||||||
defaultSigner := v4.NewSigner()
|
|
||||||
|
|
||||||
service, region := "s3", "default"
|
|
||||||
invalidValue := "invalid-value"
|
|
||||||
|
|
||||||
bigConfig := tokens.Config{
|
|
||||||
FrostFS: frostfs,
|
|
||||||
Key: key,
|
|
||||||
CacheConfig: cfg,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
region string
|
|
||||||
prefixes []string
|
|
||||||
request *http.Request
|
|
||||||
err bool
|
|
||||||
errCode errors.ErrorCode
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "valid sign",
|
|
||||||
prefixes: []string{addr.Container().String()},
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
err = defaultSigner.SignHTTP(ctx, awsCreds, r, UnsignedPayload, service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
region: region,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid sign with hash",
|
|
||||||
prefixes: []string{addr.Container().String()},
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
r.Header.Set(AmzContentSHA256, "")
|
|
||||||
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
region: region,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no authorization header",
|
|
||||||
request: func() *http.Request {
|
|
||||||
return httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid authorization header",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
r.Header.Set(AuthorizationHdr, invalidValue)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrAuthorizationHeaderMalformed,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid access key id format",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
cred := aws.Credentials{AccessKeyID: addr.Object().String(), SecretAccessKey: secret.SecretKey}
|
|
||||||
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrInvalidAccessKeyID,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "not allowed access key id",
|
|
||||||
prefixes: []string{addr.Object().String()},
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrAccessDenied,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid access key id value",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
cred := aws.Credentials{AccessKeyID: accessKeyID[:len(accessKeyID)-4], SecretAccessKey: secret.SecretKey}
|
|
||||||
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrInvalidAccessKeyID,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unknown access key id",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
cred := aws.Credentials{AccessKeyID: addr.Object().String() + "0" + addr.Container().String(), SecretAccessKey: secret.SecretKey}
|
|
||||||
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid signature",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
cred := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: "secret"}
|
|
||||||
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrSignatureDoesNotMatch,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid signature - AmzDate",
|
|
||||||
prefixes: []string{addr.Container().String()},
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
|
|
||||||
r.Header.Set(AmzDate, invalidValue)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid AmzContentSHA256",
|
|
||||||
prefixes: []string{addr.Container().String()},
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
|
|
||||||
r.Header.Set(AmzContentSHA256, invalidValue)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid presign",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
r.Header.Set(AmzExpires, "60")
|
|
||||||
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, UnsignedPayload, service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
r.URL, err = url.ParseRequestURI(signedURI)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
region: region,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid presign with hash",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
r.Header.Set(AmzExpires, "60")
|
|
||||||
r.Header.Set(AmzContentSHA256, "")
|
|
||||||
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
r.URL, err = url.ParseRequestURI(signedURI)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
region: region,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "presign, bad X-Amz-Credential",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
query := url.Values{
|
|
||||||
AmzAlgorithm: []string{"AWS4-HMAC-SHA256"},
|
|
||||||
AmzCredential: []string{invalidValue},
|
|
||||||
}
|
|
||||||
r.URL.RawQuery = query.Encode()
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "presign, bad X-Amz-Expires",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
r.Header.Set(AmzExpires, invalidValue)
|
|
||||||
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, UnsignedPayload, service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
r.URL, err = url.ParseRequestURI(signedURI)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrMalformedExpires,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "presign, expired",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
r.Header.Set(AmzExpires, "60")
|
|
||||||
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, UnsignedPayload, service, region, time.Now().Add(-time.Minute))
|
|
||||||
require.NoError(t, err)
|
|
||||||
r.URL, err = url.ParseRequestURI(signedURI)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrExpiredPresignRequest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "presign, signature from future",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
r.Header.Set(AmzExpires, "60")
|
|
||||||
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, UnsignedPayload, service, region, time.Now().Add(time.Minute))
|
|
||||||
require.NoError(t, err)
|
|
||||||
r.URL, err = url.ParseRequestURI(signedURI)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrBadRequest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "presign using original aws sdk",
|
|
||||||
request: func() *http.Request {
|
|
||||||
cli := s3.NewPresignClient(s3.New(s3.Options{
|
|
||||||
Credentials: credentials.NewStaticCredentialsProvider(awsCreds.AccessKeyID, awsCreds.SecretAccessKey, ""),
|
|
||||||
UsePathStyle: true,
|
|
||||||
BaseEndpoint: aws.String("http://localhost"),
|
|
||||||
Region: region,
|
|
||||||
Logger: logging.NewStandardLogger(os.Stdout),
|
|
||||||
ClientLogMode: aws.LogSigning,
|
|
||||||
}))
|
|
||||||
|
|
||||||
res, err := cli.PresignGetObject(ctx, &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("object"),
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
r := httptest.NewRequest(http.MethodGet, "http://localhost", nil)
|
|
||||||
r.URL, err = url.ParseRequestURI(res.URL)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
region: region,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "presign sigv4a using original aws sdk",
|
|
||||||
request: func() *http.Request {
|
|
||||||
cli := s3.NewPresignClient(s3.New(s3.Options{
|
|
||||||
Credentials: credentials.NewStaticCredentialsProvider(awsCreds.AccessKeyID, awsCreds.SecretAccessKey, ""),
|
|
||||||
UsePathStyle: true,
|
|
||||||
BaseEndpoint: aws.String("http://localhost"),
|
|
||||||
Region: region,
|
|
||||||
Logger: logging.NewStandardLogger(os.Stdout),
|
|
||||||
ClientLogMode: aws.LogSigning,
|
|
||||||
AuthSchemeResolver: resolver{},
|
|
||||||
}))
|
|
||||||
|
|
||||||
res, err := cli.PresignGetObject(ctx, &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("object"),
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
r := httptest.NewRequest(http.MethodGet, "http://localhost", nil)
|
|
||||||
r.URL, err = url.ParseRequestURI(res.URL)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
creds := tokens.New(bigConfig)
|
|
||||||
cntr := New(creds, tc.prefixes, ¢erSettingsMock{})
|
|
||||||
box, err := cntr.Authenticate(ctx, tc.request)
|
|
||||||
|
|
||||||
if tc.err {
|
|
||||||
require.Error(t, err)
|
|
||||||
if tc.errCode > 0 {
|
|
||||||
err = frosterr.UnwrapErr(err)
|
|
||||||
require.Equal(t, errors.GetAPIError(tc.errCode), err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, accessKeyID, box.AuthHeaders.AccessKeyID)
|
|
||||||
require.Equal(t, tc.region, box.AuthHeaders.Region)
|
|
||||||
require.Equal(t, secret.SecretKey, box.AccessBox.Gate.SecretKey)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type resolver struct{}
|
|
||||||
|
|
||||||
func (r resolver) ResolveAuthSchemes(context.Context, *s3.AuthResolverParameters) ([]*smithyauth.Option, error) {
|
|
||||||
return []*smithyauth.Option{{SchemeID: smithyauth.SchemeIDSigV4A}}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHTTPPostAuthenticate(t *testing.T) {
|
|
||||||
const (
|
|
||||||
policyBase64 = "eyJleHBpcmF0aW9uIjogIjIwMjUtMTItMDFUMTI6MDA6MDAuMDAwWiIsImNvbmRpdGlvbnMiOiBbCiBbInN0YXJ0cy13aXRoIiwgIiR4LWFtei1jcmVkZW50aWFsIiwgIiJdLAogWyJzdGFydHMtd2l0aCIsICIkeC1hbXotZGF0ZSIsICIiXQpdfQ=="
|
|
||||||
invalidValue = "invalid-value"
|
|
||||||
defaultFieldName = "file"
|
|
||||||
service = "s3"
|
|
||||||
region = "default"
|
|
||||||
)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
key, err := keys.NewPrivateKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cfg := &cache.Config{
|
|
||||||
Size: 10,
|
|
||||||
Lifetime: 24 * time.Hour,
|
|
||||||
Logger: zaptest.NewLogger(t),
|
|
||||||
}
|
|
||||||
|
|
||||||
gateData := []*accessbox.GateData{{
|
|
||||||
BearerToken: &bearer.Token{},
|
|
||||||
GateKey: key.PublicKey(),
|
|
||||||
}}
|
|
||||||
|
|
||||||
accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"), false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
data, err := accessBox.Marshal()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var obj object.Object
|
|
||||||
obj.SetPayload(data)
|
|
||||||
addr := oidtest.Address()
|
|
||||||
obj.SetContainerID(addr.Container())
|
|
||||||
obj.SetID(addr.Object())
|
|
||||||
|
|
||||||
accessKeyID := getAccessKeyID(addr)
|
|
||||||
|
|
||||||
frostfs := newFrostFSMock()
|
|
||||||
frostfs.objects[accessKeyID] = &obj
|
|
||||||
|
|
||||||
invalidAccessKeyID := oidtest.Address().String() + "0" + oidtest.Address().Object().String()
|
|
||||||
|
|
||||||
timeToSign := time.Now()
|
|
||||||
timeToSignStr := timeToSign.Format("20060102T150405Z")
|
|
||||||
|
|
||||||
bigConfig := tokens.Config{
|
|
||||||
FrostFS: frostfs,
|
|
||||||
Key: key,
|
|
||||||
CacheConfig: cfg,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
prefixes []string
|
|
||||||
request *http.Request
|
|
||||||
err bool
|
|
||||||
errCode errors.ErrorCode
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "HTTP POST valid",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
|
||||||
}(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST valid with custom field name",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, "files")
|
|
||||||
}(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST valid with field name with a capital letter",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, "File")
|
|
||||||
}(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST invalid multipart form",
|
|
||||||
request: func() *http.Request {
|
|
||||||
req := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
req.Header.Set(ContentTypeHdr, "multipart/form-data")
|
|
||||||
|
|
||||||
return req
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrInvalidArgument,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST invalid signature date time",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, invalidValue, sign, defaultFieldName)
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST invalid creds",
|
|
||||||
request: func() *http.Request {
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, invalidValue, timeToSignStr, sign, defaultFieldName)
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrAuthorizationHeaderMalformed,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST missing policy",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, "", creds, timeToSignStr, sign, defaultFieldName)
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST invalid accessKeyId",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(invalidValue, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST invalid accessKeyId - a non-existent box",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(invalidAccessKeyID, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST invalid signature",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, invalidValue)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrSignatureDoesNotMatch,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
creds := tokens.New(bigConfig)
|
|
||||||
cntr := New(creds, tc.prefixes, ¢erSettingsMock{})
|
|
||||||
box, err := cntr.Authenticate(ctx, tc.request)
|
|
||||||
|
|
||||||
if tc.err {
|
|
||||||
require.Error(t, err)
|
|
||||||
if tc.errCode > 0 {
|
|
||||||
err = frosterr.UnwrapErr(err)
|
|
||||||
require.Equal(t, errors.GetAPIError(tc.errCode), err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, secret.SecretKey, box.AccessBox.Gate.SecretKey)
|
|
||||||
require.Equal(t, accessKeyID, box.AuthHeaders.AccessKeyID)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCredsStr(accessKeyID, timeToSign, region, service string) string {
|
|
||||||
return accessKeyID + "/" + timeToSign + "/" + region + "/" + service + "/aws4_request"
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRequestWithMultipartForm(t *testing.T, policy, creds, date, sign, fieldName string) *http.Request {
|
|
||||||
body := &bytes.Buffer{}
|
|
||||||
writer := multipart.NewWriter(body)
|
|
||||||
defer writer.Close()
|
|
||||||
|
|
||||||
err := writer.WriteField("policy", policy)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = writer.WriteField(AmzCredential, creds)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = writer.WriteField(AmzDate, date)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = writer.WriteField(AmzSignature, sign)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = writer.CreateFormFile(fieldName, "test.txt")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
req := httptest.NewRequest(http.MethodPost, "/", body)
|
|
||||||
req.Header.Set(ContentTypeHdr, writer.FormDataContentType())
|
|
||||||
|
|
||||||
return req
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAccessKeyID(addr oid.Address) string {
|
|
||||||
return strings.ReplaceAll(addr.EncodeToString(), "/", "0")
|
|
||||||
}
|
|
|
@ -1,113 +0,0 @@
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/smithy/encoding/httpbinding"
|
|
||||||
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
|
|
||||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/v4"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type RequestData struct {
|
|
||||||
Method string
|
|
||||||
Endpoint string
|
|
||||||
Bucket string
|
|
||||||
Object string
|
|
||||||
}
|
|
||||||
|
|
||||||
type PresignData struct {
|
|
||||||
Service string
|
|
||||||
Region string
|
|
||||||
Lifetime time.Duration
|
|
||||||
SignTime time.Time
|
|
||||||
Headers map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// PresignRequest forms pre-signed request to access objects without aws credentials.
|
|
||||||
func PresignRequest(ctx context.Context, creds aws.Credentials, reqData RequestData, presignData PresignData, log *zap.Logger) (*http.Request, error) {
|
|
||||||
urlStr := fmt.Sprintf("%s/%s/%s", reqData.Endpoint, httpbinding.EscapePath(reqData.Bucket, false), httpbinding.EscapePath(reqData.Object, false))
|
|
||||||
req, err := http.NewRequest(strings.ToUpper(reqData.Method), urlStr, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create new request: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range presignData.Headers {
|
|
||||||
req.Header.Set(k, v) // maybe we should filter system header (or keep responsibility on caller)
|
|
||||||
}
|
|
||||||
req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z"))
|
|
||||||
req.Header.Set(AmzExpires, strconv.FormatFloat(presignData.Lifetime.Round(time.Second).Seconds(), 'f', 0, 64))
|
|
||||||
|
|
||||||
signer := v4.NewSigner(func(options *v4.SignerOptions) {
|
|
||||||
options.DisableURIPathEscaping = true
|
|
||||||
options.LogSigning = true
|
|
||||||
options.Logger = log
|
|
||||||
})
|
|
||||||
|
|
||||||
payloadHash := presignData.Headers[AmzContentSHA256]
|
|
||||||
if payloadHash == "" {
|
|
||||||
payloadHash = UnsignedPayload
|
|
||||||
}
|
|
||||||
|
|
||||||
signedURI, _, err := signer.PresignHTTP(ctx, creds, req, payloadHash, presignData.Service, presignData.Region, presignData.SignTime)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("presign: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.URL, err = url.ParseRequestURI(signedURI); err != nil {
|
|
||||||
return nil, fmt.Errorf("parse signed URI: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PresignRequestV4a forms pre-signed request to access objects without aws credentials.
|
|
||||||
func PresignRequestV4a(cred aws.Credentials, reqData RequestData, presignData PresignData, log *zap.Logger) (*http.Request, error) {
|
|
||||||
urlStr := fmt.Sprintf("%s/%s/%s", reqData.Endpoint, httpbinding.EscapePath(reqData.Bucket, false), httpbinding.EscapePath(reqData.Object, false))
|
|
||||||
req, err := http.NewRequest(strings.ToUpper(reqData.Method), urlStr, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create new request: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range presignData.Headers {
|
|
||||||
req.Header.Set(k, v) // maybe we should filter system header (or keep responsibility on caller)
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z"))
|
|
||||||
req.Header.Set(AmzExpires, strconv.FormatFloat(presignData.Lifetime.Round(time.Second).Seconds(), 'f', 0, 64))
|
|
||||||
|
|
||||||
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
|
|
||||||
options.DisableURIPathEscaping = true
|
|
||||||
options.LogSigning = true
|
|
||||||
options.Logger = log
|
|
||||||
})
|
|
||||||
|
|
||||||
credAdapter := v4a.SymmetricCredentialAdaptor{
|
|
||||||
SymmetricProvider: credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, ""),
|
|
||||||
}
|
|
||||||
|
|
||||||
creds, err := credAdapter.RetrievePrivateKey(req.Context())
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to derive assymetric key from credentials: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
payloadHash := presignData.Headers[AmzContentSHA256]
|
|
||||||
if payloadHash == "" {
|
|
||||||
payloadHash = UnsignedPayload
|
|
||||||
}
|
|
||||||
|
|
||||||
presignedURL, _, err := signer.PresignHTTP(req.Context(), creds, req, payloadHash, presignData.Service, []string{presignData.Region}, presignData.SignTime)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("presign: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return http.NewRequest(reqData.Method, presignedURL, nil)
|
|
||||||
}
|
|
|
@ -1,203 +0,0 @@
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
credentialsv2 "github.com/aws/aws-sdk-go-v2/credentials"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"go.uber.org/zap/zaptest"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ tokens.Credentials = (*credentialsMock)(nil)
|
|
||||||
|
|
||||||
type credentialsMock struct {
|
|
||||||
boxes map[string]*accessbox.Box
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTokensFrostfsMock() *credentialsMock {
|
|
||||||
return &credentialsMock{
|
|
||||||
boxes: make(map[string]*accessbox.Box),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m credentialsMock) addBox(addr oid.Address, box *accessbox.Box) {
|
|
||||||
m.boxes[getAccessKeyID(addr)] = box
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m credentialsMock) GetBox(_ context.Context, _ cid.ID, accessKeyID string) (*accessbox.Box, []object.Attribute, error) {
|
|
||||||
box, ok := m.boxes[accessKeyID]
|
|
||||||
if !ok {
|
|
||||||
return nil, nil, &apistatus.ObjectNotFound{}
|
|
||||||
}
|
|
||||||
|
|
||||||
return box, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m credentialsMock) Put(context.Context, tokens.CredentialsParam) (oid.Address, error) {
|
|
||||||
return oid.Address{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m credentialsMock) Update(context.Context, tokens.CredentialsParam) (oid.Address, error) {
|
|
||||||
return oid.Address{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckSign(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
var accessKeyAddr oid.Address
|
|
||||||
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
|
||||||
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
|
|
||||||
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secretKey}
|
|
||||||
|
|
||||||
reqData := RequestData{
|
|
||||||
Method: "GET",
|
|
||||||
Endpoint: "http://localhost:8084",
|
|
||||||
Bucket: "my-bucket",
|
|
||||||
Object: "@obj/name",
|
|
||||||
}
|
|
||||||
presignData := PresignData{
|
|
||||||
Service: "s3",
|
|
||||||
Region: "spb",
|
|
||||||
Lifetime: 10 * time.Minute,
|
|
||||||
SignTime: time.Now().UTC(),
|
|
||||||
Headers: map[string]string{
|
|
||||||
ContentTypeHdr: "text/plain",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := PresignRequest(ctx, awsCreds, reqData, presignData, zaptest.NewLogger(t))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
expBox := &accessbox.Box{
|
|
||||||
Gate: &accessbox.GateData{
|
|
||||||
SecretKey: secretKey,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
mock := newTokensFrostfsMock()
|
|
||||||
mock.addBox(accessKeyAddr, expBox)
|
|
||||||
|
|
||||||
c := &Center{
|
|
||||||
cli: mock,
|
|
||||||
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
|
||||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
|
||||||
settings: ¢erSettingsMock{},
|
|
||||||
}
|
|
||||||
box, err := c.Authenticate(ctx, req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, expBox, box.AccessBox)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckSignV4a(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
var accessKeyAddr oid.Address
|
|
||||||
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
|
||||||
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
|
|
||||||
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secretKey}
|
|
||||||
|
|
||||||
reqData := RequestData{
|
|
||||||
Method: "GET",
|
|
||||||
Endpoint: "http://localhost:8084",
|
|
||||||
Bucket: "my-bucket",
|
|
||||||
Object: "@obj/name",
|
|
||||||
}
|
|
||||||
presignData := PresignData{
|
|
||||||
Service: "s3",
|
|
||||||
Region: "spb",
|
|
||||||
Lifetime: 10 * time.Minute,
|
|
||||||
SignTime: time.Now().UTC(),
|
|
||||||
Headers: map[string]string{
|
|
||||||
ContentTypeHdr: "text/plain",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := PresignRequestV4a(awsCreds, reqData, presignData, zaptest.NewLogger(t))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
req.Header.Set(ContentTypeHdr, "text/plain")
|
|
||||||
|
|
||||||
expBox := &accessbox.Box{
|
|
||||||
Gate: &accessbox.GateData{
|
|
||||||
SecretKey: secretKey,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
mock := newTokensFrostfsMock()
|
|
||||||
mock.addBox(accessKeyAddr, expBox)
|
|
||||||
|
|
||||||
c := &Center{
|
|
||||||
cli: mock,
|
|
||||||
regV4a: NewRegexpMatcher(AuthorizationFieldV4aRegexp),
|
|
||||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
|
||||||
}
|
|
||||||
box, err := c.Authenticate(ctx, req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, expBox, box.AccessBox)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPresignRequestV4a(t *testing.T) {
|
|
||||||
var accessKeyAddr oid.Address
|
|
||||||
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
|
||||||
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
|
|
||||||
|
|
||||||
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
|
|
||||||
options.DisableURIPathEscaping = true
|
|
||||||
options.LogSigning = true
|
|
||||||
options.Logger = zaptest.NewLogger(t)
|
|
||||||
})
|
|
||||||
|
|
||||||
credAdapter := v4a.SymmetricCredentialAdaptor{
|
|
||||||
SymmetricProvider: credentialsv2.NewStaticCredentialsProvider(accessKeyID, secretKey, ""),
|
|
||||||
}
|
|
||||||
|
|
||||||
creds, err := credAdapter.RetrievePrivateKey(context.TODO())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
signingTime := time.Now()
|
|
||||||
service := "s3"
|
|
||||||
regionSet := []string{"spb"}
|
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", "http://localhost:8084/bucket/object", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.Header.Set(AmzExpires, "600")
|
|
||||||
|
|
||||||
presignedURL, hdr, err := signer.PresignHTTP(req.Context(), creds, req, "", service, regionSet, signingTime)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
fmt.Println(presignedURL)
|
|
||||||
fmt.Println(hdr)
|
|
||||||
|
|
||||||
signature := req.URL.Query().Get(AmzSignature)
|
|
||||||
|
|
||||||
r, err := http.NewRequest("GET", presignedURL, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
query := r.URL.Query()
|
|
||||||
query.Del(AmzSignature)
|
|
||||||
r.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
err = signer.VerifyPresigned(creds, r, "", service, regionSet, signingTime, signature)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
|
@ -1,26 +0,0 @@
|
||||||
package auth
|
|
||||||
|
|
||||||
import "regexp"
|
|
||||||
|
|
||||||
type RegexpSubmatcher struct {
|
|
||||||
re *regexp.Regexp
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRegexpMatcher creates a new regexp sub matcher.
|
|
||||||
func NewRegexpMatcher(re *regexp.Regexp) *RegexpSubmatcher {
|
|
||||||
return &RegexpSubmatcher{re: re}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSubmatches returns matches from provided string. Zero length indicates no match.
|
|
||||||
func (r *RegexpSubmatcher) GetSubmatches(target string) map[string]string {
|
|
||||||
matches := r.re.FindStringSubmatch(target)
|
|
||||||
l := len(matches)
|
|
||||||
|
|
||||||
sub := make(map[string]string, l)
|
|
||||||
for i, name := range r.re.SubexpNames() {
|
|
||||||
if i > 0 && i <= l {
|
|
||||||
sub[name] = matches[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return sub
|
|
||||||
}
|
|
|
@ -1,37 +0,0 @@
|
||||||
// This file is part of https://github.com/aws/smithy-go/blob/f0c6adfdec6e40bb8bb2920a40d016943b4ad762/encoding/httpbinding/path_replace.go
|
|
||||||
|
|
||||||
package httpbinding
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EscapePath escapes part of a URL path in Amazon style.
|
|
||||||
func EscapePath(path string, encodeSep bool) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
for i := 0; i < len(path); i++ {
|
|
||||||
c := path[i]
|
|
||||||
if noEscape[c] || (c == '/' && !encodeSep) {
|
|
||||||
buf.WriteByte(c)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(&buf, "%%%02X", c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
var noEscape [256]bool
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
for i := 0; i < len(noEscape); i++ {
|
|
||||||
// AWS expects every character except these to be escaped
|
|
||||||
noEscape[i] = (i >= 'A' && i <= 'Z') ||
|
|
||||||
(i >= 'a' && i <= 'z') ||
|
|
||||||
(i >= '0' && i <= '9') ||
|
|
||||||
i == '-' ||
|
|
||||||
i == '.' ||
|
|
||||||
i == '_' ||
|
|
||||||
i == '~'
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,144 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/credentials.go
|
|
||||||
// with changes:
|
|
||||||
// * use `time.Now()` instead of `sdk.NowTime()`
|
|
||||||
|
|
||||||
package v4a
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Credentials is Context, ECDSA, and Optional Session Token that can be used
|
|
||||||
// to sign requests using SigV4a
|
|
||||||
type Credentials struct {
|
|
||||||
Context string
|
|
||||||
PrivateKey *ecdsa.PrivateKey
|
|
||||||
SessionToken string
|
|
||||||
|
|
||||||
// Time the credentials will expire.
|
|
||||||
CanExpire bool
|
|
||||||
Expires time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expired returns if the credentials have expired.
|
|
||||||
func (v Credentials) Expired() bool {
|
|
||||||
if v.CanExpire {
|
|
||||||
return !v.Expires.After(time.Now())
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasKeys returns if the credentials keys are set.
|
|
||||||
func (v Credentials) HasKeys() bool {
|
|
||||||
return len(v.Context) > 0 && v.PrivateKey != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SymmetricCredentialAdaptor wraps a SigV4 AccessKey/SecretKey provider and adapts the credentials
|
|
||||||
// to a ECDSA PrivateKey for signing with SiV4a
|
|
||||||
type SymmetricCredentialAdaptor struct {
|
|
||||||
SymmetricProvider aws.CredentialsProvider
|
|
||||||
|
|
||||||
asymmetric atomic.Value
|
|
||||||
m sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve retrieves symmetric credentials from the underlying provider.
|
|
||||||
func (s *SymmetricCredentialAdaptor) Retrieve(ctx context.Context) (aws.Credentials, error) {
|
|
||||||
symCreds, err := s.retrieveFromSymmetricProvider(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return aws.Credentials{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if asymCreds := s.getCreds(); asymCreds == nil {
|
|
||||||
return symCreds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.m.Lock()
|
|
||||||
defer s.m.Unlock()
|
|
||||||
|
|
||||||
asymCreds := s.getCreds()
|
|
||||||
if asymCreds == nil {
|
|
||||||
return symCreds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the context does not match the access key id clear it
|
|
||||||
if asymCreds.Context != symCreds.AccessKeyID {
|
|
||||||
s.asymmetric.Store((*Credentials)(nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
return symCreds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RetrievePrivateKey returns credentials suitable for SigV4a signing
|
|
||||||
func (s *SymmetricCredentialAdaptor) RetrievePrivateKey(ctx context.Context) (Credentials, error) {
|
|
||||||
if asymCreds := s.getCreds(); asymCreds != nil {
|
|
||||||
return *asymCreds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.m.Lock()
|
|
||||||
defer s.m.Unlock()
|
|
||||||
|
|
||||||
if asymCreds := s.getCreds(); asymCreds != nil {
|
|
||||||
return *asymCreds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
symmetricCreds, err := s.retrieveFromSymmetricProvider(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return Credentials{}, fmt.Errorf("failed to retrieve symmetric credentials: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
privateKey, err := deriveKeyFromAccessKeyPair(symmetricCreds.AccessKeyID, symmetricCreds.SecretAccessKey)
|
|
||||||
if err != nil {
|
|
||||||
return Credentials{}, fmt.Errorf("failed to derive assymetric key from credentials")
|
|
||||||
}
|
|
||||||
|
|
||||||
creds := Credentials{
|
|
||||||
Context: symmetricCreds.AccessKeyID,
|
|
||||||
PrivateKey: privateKey,
|
|
||||||
SessionToken: symmetricCreds.SessionToken,
|
|
||||||
CanExpire: symmetricCreds.CanExpire,
|
|
||||||
Expires: symmetricCreds.Expires,
|
|
||||||
}
|
|
||||||
|
|
||||||
s.asymmetric.Store(&creds)
|
|
||||||
|
|
||||||
return creds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SymmetricCredentialAdaptor) getCreds() *Credentials {
|
|
||||||
v := s.asymmetric.Load()
|
|
||||||
|
|
||||||
if v == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
c := v.(*Credentials)
|
|
||||||
if c != nil && c.HasKeys() && !c.Expired() {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SymmetricCredentialAdaptor) retrieveFromSymmetricProvider(ctx context.Context) (aws.Credentials, error) {
|
|
||||||
credentials, err := s.SymmetricProvider.Retrieve(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return aws.Credentials{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return credentials, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CredentialsProvider is the interface for a provider to retrieve credentials
|
|
||||||
// to sign requests with.
|
|
||||||
type CredentialsProvider interface {
|
|
||||||
RetrievePrivateKey(context.Context) (Credentials, error)
|
|
||||||
}
|
|
|
@ -1,79 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/credentials_test.go
|
|
||||||
|
|
||||||
package v4a
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
)
|
|
||||||
|
|
||||||
type rotatingCredsProvider struct {
|
|
||||||
count int
|
|
||||||
fail chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *rotatingCredsProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {
|
|
||||||
select {
|
|
||||||
case <-r.fail:
|
|
||||||
return aws.Credentials{}, fmt.Errorf("rotatingCredsProvider error")
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
credentials := aws.Credentials{
|
|
||||||
AccessKeyID: fmt.Sprintf("ACCESS_KEY_ID_%d", r.count),
|
|
||||||
SecretAccessKey: fmt.Sprintf("SECRET_ACCESS_KEY_%d", r.count),
|
|
||||||
SessionToken: fmt.Sprintf("SESSION_TOKEN_%d", r.count),
|
|
||||||
}
|
|
||||||
return credentials, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSymmetricCredentialAdaptor(t *testing.T) {
|
|
||||||
provider := &rotatingCredsProvider{
|
|
||||||
count: 0,
|
|
||||||
fail: make(chan struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
adaptor := &SymmetricCredentialAdaptor{SymmetricProvider: provider}
|
|
||||||
|
|
||||||
if symCreds, err := adaptor.Retrieve(context.Background()); err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
} else if !symCreds.HasKeys() {
|
|
||||||
t.Fatalf("expect symmetric credentials to have keys")
|
|
||||||
}
|
|
||||||
|
|
||||||
if load := adaptor.asymmetric.Load(); load != nil {
|
|
||||||
t.Errorf("expect asymmetric credentials to be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
if asymCreds, err := adaptor.RetrievePrivateKey(context.Background()); err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
} else if !asymCreds.HasKeys() {
|
|
||||||
t.Fatalf("expect asymmetric credentials to have keys")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := adaptor.Retrieve(context.Background()); err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if load := adaptor.asymmetric.Load(); load.(*Credentials) == nil {
|
|
||||||
t.Errorf("expect asymmetric credentials to be not nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
provider.count++
|
|
||||||
|
|
||||||
if _, err := adaptor.Retrieve(context.Background()); err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if load := adaptor.asymmetric.Load(); load.(*Credentials) != nil {
|
|
||||||
t.Errorf("expect asymmetric credentials to be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
close(provider.fail) // All requests to the original provider will now fail from this point-on.
|
|
||||||
_, err := adaptor.Retrieve(context.Background())
|
|
||||||
if err == nil {
|
|
||||||
t.Error("expect error, got nil")
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,32 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/compare.go
|
|
||||||
|
|
||||||
package crypto
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison
|
|
||||||
// if the two byte slices assuming they represent a big-endian number.
|
|
||||||
//
|
|
||||||
// error if len(x) != len(y)
|
|
||||||
// -1 if x < y
|
|
||||||
// 0 if x == y
|
|
||||||
// +1 if x > y
|
|
||||||
func ConstantTimeByteCompare(x, y []byte) (int, error) {
|
|
||||||
if len(x) != len(y) {
|
|
||||||
return 0, fmt.Errorf("slice lengths do not match")
|
|
||||||
}
|
|
||||||
|
|
||||||
xLarger, yLarger := 0, 0
|
|
||||||
|
|
||||||
for i := 0; i < len(x); i++ {
|
|
||||||
xByte, yByte := int(x[i]), int(y[i])
|
|
||||||
|
|
||||||
x := ((yByte - xByte) >> 8) & 1
|
|
||||||
y := ((xByte - yByte) >> 8) & 1
|
|
||||||
|
|
||||||
xLarger |= x &^ yLarger
|
|
||||||
yLarger |= y &^ xLarger
|
|
||||||
}
|
|
||||||
|
|
||||||
return xLarger - yLarger, nil
|
|
||||||
}
|
|
|
@ -1,62 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/compare_test.go
|
|
||||||
|
|
||||||
package crypto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"math/big"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConstantTimeByteCompare(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
x, y []byte
|
|
||||||
r int
|
|
||||||
expectErr bool
|
|
||||||
}{
|
|
||||||
{x: []byte{}, y: []byte{}, r: 0},
|
|
||||||
{x: []byte{40}, y: []byte{30}, r: 1},
|
|
||||||
{x: []byte{30}, y: []byte{40}, r: -1},
|
|
||||||
{x: []byte{60, 40, 30, 10, 20}, y: []byte{50, 30, 20, 0, 10}, r: 1},
|
|
||||||
{x: []byte{50, 30, 20, 0, 10}, y: []byte{60, 40, 30, 10, 20}, r: -1},
|
|
||||||
{x: nil, y: []byte{}, r: 0},
|
|
||||||
{x: []byte{}, y: nil, r: 0},
|
|
||||||
{x: []byte{}, y: []byte{10}, expectErr: true},
|
|
||||||
{x: []byte{10}, y: []byte{}, expectErr: true},
|
|
||||||
{x: []byte{10, 20}, y: []byte{10}, expectErr: true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range cases {
|
|
||||||
compare, err := ConstantTimeByteCompare(tt.x, tt.y)
|
|
||||||
if (err != nil) != tt.expectErr {
|
|
||||||
t.Fatalf("expectErr=%v, got %v", tt.expectErr, err)
|
|
||||||
}
|
|
||||||
if e, a := tt.r, compare; e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkConstantTimeCompare(b *testing.B) {
|
|
||||||
x, y := big.NewInt(1023), big.NewInt(1024)
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
ConstantTimeByteCompare(x.Bytes(), y.Bytes())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCompare(b *testing.B) {
|
|
||||||
x, y := big.NewInt(1023).Bytes(), big.NewInt(1024).Bytes()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
bytes.Compare(x, y)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustBigInt(s string) *big.Int {
|
|
||||||
b, ok := (&big.Int{}).SetString(s, 16)
|
|
||||||
if !ok {
|
|
||||||
panic("can't parse as big.Int")
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
|
@ -1,115 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/ecc.go
|
|
||||||
|
|
||||||
package crypto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/hmac"
|
|
||||||
"encoding/asn1"
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"math"
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ecdsaSignature struct {
|
|
||||||
R, S *big.Int
|
|
||||||
}
|
|
||||||
|
|
||||||
// ECDSAKey takes the given elliptic curve, and private key (d) byte slice
|
|
||||||
// and returns the private ECDSA key.
|
|
||||||
func ECDSAKey(curve elliptic.Curve, d []byte) *ecdsa.PrivateKey {
|
|
||||||
return ECDSAKeyFromPoint(curve, (&big.Int{}).SetBytes(d))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ECDSAKeyFromPoint takes the given elliptic curve and point and returns the
|
|
||||||
// private and public keypair
|
|
||||||
func ECDSAKeyFromPoint(curve elliptic.Curve, d *big.Int) *ecdsa.PrivateKey {
|
|
||||||
pX, pY := curve.ScalarBaseMult(d.Bytes())
|
|
||||||
|
|
||||||
privKey := &ecdsa.PrivateKey{
|
|
||||||
PublicKey: ecdsa.PublicKey{
|
|
||||||
Curve: curve,
|
|
||||||
X: pX,
|
|
||||||
Y: pY,
|
|
||||||
},
|
|
||||||
D: d,
|
|
||||||
}
|
|
||||||
|
|
||||||
return privKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// ECDSAPublicKey takes the provide curve and (x, y) coordinates and returns
|
|
||||||
// *ecdsa.PublicKey. Returns an error if the given points are not on the curve.
|
|
||||||
func ECDSAPublicKey(curve elliptic.Curve, x, y []byte) (*ecdsa.PublicKey, error) {
|
|
||||||
xPoint := (&big.Int{}).SetBytes(x)
|
|
||||||
yPoint := (&big.Int{}).SetBytes(y)
|
|
||||||
|
|
||||||
if !curve.IsOnCurve(xPoint, yPoint) {
|
|
||||||
return nil, fmt.Errorf("point(%v, %v) is not on the given curve", xPoint.String(), yPoint.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ecdsa.PublicKey{
|
|
||||||
Curve: curve,
|
|
||||||
X: xPoint,
|
|
||||||
Y: yPoint,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifySignature takes the provided public key, hash, and asn1 encoded signature and returns
|
|
||||||
// whether the given signature is valid.
|
|
||||||
func VerifySignature(key *ecdsa.PublicKey, hash []byte, signature []byte) (bool, error) {
|
|
||||||
var ecdsaSignature ecdsaSignature
|
|
||||||
|
|
||||||
_, err := asn1.Unmarshal(signature, &ecdsaSignature)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ecdsa.Verify(key, hash, ecdsaSignature.R, ecdsaSignature.S), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HMACKeyDerivation provides an implementation of a NIST-800-108 of a KDF (Key Derivation Function) in Counter Mode.
|
|
||||||
// For the purposes of this implantation HMAC is used as the PRF (Pseudorandom function), where the value of
|
|
||||||
// `r` is defined as a 4 byte counter.
|
|
||||||
func HMACKeyDerivation(hash func() hash.Hash, bitLen int, key []byte, label, context []byte) ([]byte, error) {
|
|
||||||
// verify that we won't overflow the counter
|
|
||||||
n := int64(math.Ceil((float64(bitLen) / 8) / float64(hash().Size())))
|
|
||||||
if n > 0x7FFFFFFF {
|
|
||||||
return nil, fmt.Errorf("unable to derive key of size %d using 32-bit counter", bitLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
// verify the requested bit length is not larger then the length encoding size
|
|
||||||
if int64(bitLen) > 0x7FFFFFFF {
|
|
||||||
return nil, fmt.Errorf("bitLen is greater than 32-bits")
|
|
||||||
}
|
|
||||||
|
|
||||||
fixedInput := bytes.NewBuffer(nil)
|
|
||||||
fixedInput.Write(label)
|
|
||||||
fixedInput.WriteByte(0x00)
|
|
||||||
fixedInput.Write(context)
|
|
||||||
if err := binary.Write(fixedInput, binary.BigEndian, int32(bitLen)); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to write bit length to fixed input string: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var output []byte
|
|
||||||
|
|
||||||
h := hmac.New(hash, key)
|
|
||||||
|
|
||||||
for i := int64(1); i <= n; i++ {
|
|
||||||
h.Reset()
|
|
||||||
if err := binary.Write(h, binary.BigEndian, int32(i)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
_, err := h.Write(fixedInput.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
output = append(output, h.Sum(nil)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return output[:bitLen/8], nil
|
|
||||||
}
|
|
|
@ -1,279 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/ecc_test.go
|
|
||||||
|
|
||||||
package crypto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha256"
|
|
||||||
"io"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestECDSAPublicKeyDerivation_P256(t *testing.T) {
|
|
||||||
d := []byte{
|
|
||||||
0xc9, 0x80, 0x68, 0x98, 0xa0, 0x33, 0x49, 0x16, 0xc8, 0x60, 0x74, 0x88, 0x80, 0xa5, 0x41, 0xf0,
|
|
||||||
0x93, 0xb5, 0x79, 0xa9, 0xb1, 0xf3, 0x29, 0x34, 0xd8, 0x6c, 0x36, 0x3c, 0x39, 0x80, 0x03, 0x57,
|
|
||||||
}
|
|
||||||
|
|
||||||
x := []byte{
|
|
||||||
0xd0, 0x72, 0x0d, 0xc6, 0x91, 0xaa, 0x80, 0x09, 0x6b, 0xa3, 0x2f, 0xed, 0x1c, 0xb9, 0x7c, 0x2b,
|
|
||||||
0x62, 0x06, 0x90, 0xd0, 0x6d, 0xe0, 0x31, 0x7b, 0x86, 0x18, 0xd5, 0xce, 0x65, 0xeb, 0x72, 0x8f,
|
|
||||||
}
|
|
||||||
|
|
||||||
y := []byte{
|
|
||||||
0x96, 0x81, 0xb5, 0x17, 0xb1, 0xcd, 0xa1, 0x7d, 0x0d, 0x83, 0xd3, 0x35, 0xd9, 0xc4, 0xa8, 0xa9,
|
|
||||||
0xa9, 0xb0, 0xb1, 0xb3, 0xc7, 0x10, 0x6d, 0x8f, 0x3c, 0x72, 0xbc, 0x50, 0x93, 0xdc, 0x27, 0x5f,
|
|
||||||
}
|
|
||||||
|
|
||||||
testKeyDerivation(t, elliptic.P256(), d, x, y)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSAPublicKeyDerivation_P384(t *testing.T) {
|
|
||||||
d := []byte{
|
|
||||||
0x53, 0x94, 0xf7, 0x97, 0x3e, 0xa8, 0x68, 0xc5, 0x2b, 0xf3, 0xff, 0x8d, 0x8c, 0xee, 0xb4, 0xdb,
|
|
||||||
0x90, 0xa6, 0x83, 0x65, 0x3b, 0x12, 0x48, 0x5d, 0x5f, 0x62, 0x7c, 0x3c, 0xe5, 0xab, 0xd8, 0x97,
|
|
||||||
0x8f, 0xc9, 0x67, 0x3d, 0x14, 0xa7, 0x1d, 0x92, 0x57, 0x47, 0x93, 0x16, 0x62, 0x49, 0x3c, 0x37,
|
|
||||||
}
|
|
||||||
|
|
||||||
x := []byte{
|
|
||||||
0xfd, 0x3c, 0x84, 0xe5, 0x68, 0x9b, 0xed, 0x27, 0x0e, 0x60, 0x1b, 0x3d, 0x80, 0xf9, 0x0d, 0x67,
|
|
||||||
0xa9, 0xae, 0x45, 0x1c, 0xce, 0x89, 0x0f, 0x53, 0xe5, 0x83, 0x22, 0x9a, 0xd0, 0xe2, 0xee, 0x64,
|
|
||||||
0x56, 0x11, 0xfa, 0x99, 0x36, 0xdf, 0xa4, 0x53, 0x06, 0xec, 0x18, 0x06, 0x67, 0x74, 0xaa, 0x24,
|
|
||||||
}
|
|
||||||
|
|
||||||
y := []byte{
|
|
||||||
0xb8, 0x3c, 0xa4, 0x12, 0x6c, 0xfc, 0x4c, 0x4d, 0x1d, 0x18, 0xa4, 0xb6, 0xc2, 0x1c, 0x7f, 0x69,
|
|
||||||
0x9d, 0x51, 0x23, 0xdd, 0x9c, 0x24, 0xf6, 0x6f, 0x83, 0x38, 0x46, 0xee, 0xb5, 0x82, 0x96, 0x19,
|
|
||||||
0x6b, 0x42, 0xec, 0x06, 0x42, 0x5d, 0xb5, 0xb7, 0x0a, 0x4b, 0x81, 0xb7, 0xfc, 0xf7, 0x05, 0xa0,
|
|
||||||
}
|
|
||||||
|
|
||||||
testKeyDerivation(t, elliptic.P384(), d, x, y)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSAKnownSigningValue_P256(t *testing.T) {
|
|
||||||
d := []byte{
|
|
||||||
0x51, 0x9b, 0x42, 0x3d, 0x71, 0x5f, 0x8b, 0x58, 0x1f, 0x4f, 0xa8, 0xee, 0x59, 0xf4, 0x77, 0x1a,
|
|
||||||
0x5b, 0x44, 0xc8, 0x13, 0x0b, 0x4e, 0x3e, 0xac, 0xca, 0x54, 0xa5, 0x6d, 0xda, 0x72, 0xb4, 0x64,
|
|
||||||
}
|
|
||||||
|
|
||||||
testKnownSigningValue(t, elliptic.P256(), d)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSAKnownSigningValue_P384(t *testing.T) {
|
|
||||||
d := []byte{
|
|
||||||
0x53, 0x94, 0xf7, 0x97, 0x3e, 0xa8, 0x68, 0xc5, 0x2b, 0xf3, 0xff, 0x8d, 0x8c, 0xee, 0xb4, 0xdb,
|
|
||||||
0x90, 0xa6, 0x83, 0x65, 0x3b, 0x12, 0x48, 0x5d, 0x5f, 0x62, 0x7c, 0x3c, 0xe5, 0xab, 0xd8, 0x97,
|
|
||||||
0x8f, 0xc9, 0x67, 0x3d, 0x14, 0xa7, 0x1d, 0x92, 0x57, 0x47, 0x93, 0x16, 0x62, 0x49, 0x3c, 0x37,
|
|
||||||
}
|
|
||||||
|
|
||||||
testKnownSigningValue(t, elliptic.P384(), d)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testKeyDerivation(t *testing.T, curve elliptic.Curve, d, expectedX, expectedY []byte) {
|
|
||||||
privKey := ECDSAKey(curve, d)
|
|
||||||
|
|
||||||
if e, a := d, privKey.D.Bytes(); bytes.Compare(e, a) != 0 {
|
|
||||||
t.Errorf("expected % x, got % x", e, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
if e, a := expectedX, privKey.X.Bytes(); bytes.Compare(e, a) != 0 {
|
|
||||||
t.Errorf("expected % x, got % x", e, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
if e, a := expectedY, privKey.Y.Bytes(); bytes.Compare(e, a) != 0 {
|
|
||||||
t.Errorf("expected % x, got % x", e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testKnownSigningValue(t *testing.T, curve elliptic.Curve, d []byte) {
|
|
||||||
signingKey := ECDSAKey(curve, d)
|
|
||||||
|
|
||||||
message := []byte{
|
|
||||||
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
|
|
||||||
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
|
|
||||||
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
|
|
||||||
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
|
|
||||||
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
|
|
||||||
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
|
|
||||||
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
|
|
||||||
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
|
|
||||||
}
|
|
||||||
|
|
||||||
sha256Hash := sha256.New()
|
|
||||||
_, err := io.Copy(sha256Hash, bytes.NewReader(message))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msgHash := sha256Hash.Sum(nil)
|
|
||||||
msgSignature, err := signingKey.Sign(rand.Reader, msgHash, crypto.SHA256)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
verified, err := VerifySignature(&signingKey.PublicKey, msgHash, msgSignature)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !verified {
|
|
||||||
t.Fatalf("failed to verify message msgSignature")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSAInvalidSignature_P256(t *testing.T) {
|
|
||||||
testInvalidSignature(t, elliptic.P256())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSAInvalidSignature_P384(t *testing.T) {
|
|
||||||
testInvalidSignature(t, elliptic.P384())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSAGenKeySignature_P256(t *testing.T) {
|
|
||||||
testGenKeySignature(t, elliptic.P256())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSAGenKeySignature_P384(t *testing.T) {
|
|
||||||
testGenKeySignature(t, elliptic.P384())
|
|
||||||
}
|
|
||||||
|
|
||||||
func testInvalidSignature(t *testing.T, curve elliptic.Curve) {
|
|
||||||
privateKey, err := ecdsa.GenerateKey(curve, rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to generate key: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
message := []byte{
|
|
||||||
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
|
|
||||||
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
|
|
||||||
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
|
|
||||||
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
|
|
||||||
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
|
|
||||||
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
|
|
||||||
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
|
|
||||||
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
|
|
||||||
}
|
|
||||||
|
|
||||||
sha256Hash := sha256.New()
|
|
||||||
_, err = io.Copy(sha256Hash, bytes.NewReader(message))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msgHash := sha256Hash.Sum(nil)
|
|
||||||
msgSignature, err := privateKey.Sign(rand.Reader, msgHash, crypto.SHA256)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
byteToFlip := 15
|
|
||||||
switch msgSignature[byteToFlip] {
|
|
||||||
case 0:
|
|
||||||
msgSignature[byteToFlip] = 0x0a
|
|
||||||
default:
|
|
||||||
msgSignature[byteToFlip] &^= msgSignature[byteToFlip]
|
|
||||||
}
|
|
||||||
|
|
||||||
verified, err := VerifySignature(&privateKey.PublicKey, msgHash, msgSignature)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if verified {
|
|
||||||
t.Fatalf("expected message verification to fail")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testGenKeySignature(t *testing.T, curve elliptic.Curve) {
|
|
||||||
privateKey, err := ecdsa.GenerateKey(curve, rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to generate key: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
message := []byte{
|
|
||||||
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
|
|
||||||
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
|
|
||||||
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
|
|
||||||
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
|
|
||||||
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
|
|
||||||
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
|
|
||||||
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
|
|
||||||
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
|
|
||||||
}
|
|
||||||
|
|
||||||
sha256Hash := sha256.New()
|
|
||||||
_, err = io.Copy(sha256Hash, bytes.NewReader(message))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msgHash := sha256Hash.Sum(nil)
|
|
||||||
msgSignature, err := privateKey.Sign(rand.Reader, msgHash, crypto.SHA256)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
verified, err := VerifySignature(&privateKey.PublicKey, msgHash, msgSignature)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !verified {
|
|
||||||
t.Fatalf("expected message verification to fail")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSASignatureFormat(t *testing.T) {
|
|
||||||
asn1Signature := []byte{
|
|
||||||
0x30, 0x45, 0x02, 0x21, 0x00, 0xd7, 0xc5, 0xb9, 0x9e, 0x0b, 0xb1, 0x1a, 0x1f, 0x32, 0xda, 0x66, 0xe0, 0xff,
|
|
||||||
0x59, 0xb7, 0x8a, 0x5e, 0xb3, 0x94, 0x9c, 0x23, 0xb3, 0xfc, 0x1f, 0x18, 0xcc, 0xf6, 0x61, 0x67, 0x8b, 0xf1,
|
|
||||||
0xc1, 0x02, 0x20, 0x26, 0x4d, 0x8b, 0x7c, 0xaa, 0x52, 0x4c, 0xc0, 0x2e, 0x5f, 0xf6, 0x7e, 0x24, 0x82, 0xe5,
|
|
||||||
0xfb, 0xcb, 0xc7, 0x9b, 0x83, 0x0d, 0x19, 0x7e, 0x7a, 0x40, 0x37, 0x87, 0xdd, 0x1c, 0x93, 0x13, 0xc4,
|
|
||||||
}
|
|
||||||
|
|
||||||
x := []byte{
|
|
||||||
0x1c, 0xcb, 0xe9, 0x1c, 0x07, 0x5f, 0xc7, 0xf4, 0xf0, 0x33, 0xbf, 0xa2, 0x48, 0xdb, 0x8f, 0xcc,
|
|
||||||
0xd3, 0x56, 0x5d, 0xe9, 0x4b, 0xbf, 0xb1, 0x2f, 0x3c, 0x59, 0xff, 0x46, 0xc2, 0x71, 0xbf, 0x83,
|
|
||||||
}
|
|
||||||
|
|
||||||
y := []byte{
|
|
||||||
0xce, 0x40, 0x14, 0xc6, 0x88, 0x11, 0xf9, 0xa2, 0x1a, 0x1f, 0xdb, 0x2c, 0x0e, 0x61, 0x13, 0xe0,
|
|
||||||
0x6d, 0xb7, 0xca, 0x93, 0xb7, 0x40, 0x4e, 0x78, 0xdc, 0x7c, 0xcd, 0x5c, 0xa8, 0x9a, 0x4c, 0xa9,
|
|
||||||
}
|
|
||||||
|
|
||||||
publicKey, err := ECDSAPublicKey(elliptic.P256(), x, y)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
message := []byte{
|
|
||||||
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
|
|
||||||
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
|
|
||||||
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
|
|
||||||
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
|
|
||||||
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
|
|
||||||
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
|
|
||||||
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
|
|
||||||
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
|
|
||||||
}
|
|
||||||
|
|
||||||
hash := sha256.New()
|
|
||||||
_, err = io.Copy(hash, bytes.NewReader(message))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
msgHash := hash.Sum(nil)
|
|
||||||
|
|
||||||
verifySignature, err := VerifySignature(publicKey, msgHash, asn1Signature)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !verifySignature {
|
|
||||||
t.Fatalf("failed to verify signature")
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,38 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/const.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
const (
|
|
||||||
// EmptyStringSHA256 is the hex encoded sha256 value of an empty string
|
|
||||||
EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
|
|
||||||
|
|
||||||
// UnsignedPayload indicates that the request payload body is unsigned
|
|
||||||
UnsignedPayload = "UNSIGNED-PAYLOAD"
|
|
||||||
|
|
||||||
// AmzAlgorithmKey indicates the signing algorithm
|
|
||||||
AmzAlgorithmKey = "X-Amz-Algorithm"
|
|
||||||
|
|
||||||
// AmzSecurityTokenKey indicates the security token to be used with temporary credentials
|
|
||||||
AmzSecurityTokenKey = "X-Amz-Security-Token"
|
|
||||||
|
|
||||||
// AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z'
|
|
||||||
AmzDateKey = "X-Amz-Date"
|
|
||||||
|
|
||||||
// AmzCredentialKey is the access key ID and credential scope
|
|
||||||
AmzCredentialKey = "X-Amz-Credential"
|
|
||||||
|
|
||||||
// AmzSignedHeadersKey is the set of headers signed for the request
|
|
||||||
AmzSignedHeadersKey = "X-Amz-SignedHeaders"
|
|
||||||
|
|
||||||
// AmzSignatureKey is the query parameter to store the SigV4 signature
|
|
||||||
AmzSignatureKey = "X-Amz-Signature"
|
|
||||||
|
|
||||||
// TimeFormat is the time format to be used in the X-Amz-Date header or query parameter
|
|
||||||
TimeFormat = "20060102T150405Z"
|
|
||||||
|
|
||||||
// ShortTimeFormat is the shorten time format used in the credential scope
|
|
||||||
ShortTimeFormat = "20060102"
|
|
||||||
|
|
||||||
// ContentSHAKey is the SHA256 of request body
|
|
||||||
ContentSHAKey = "X-Amz-Content-Sha256"
|
|
||||||
)
|
|
|
@ -1,90 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/header_rules.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Rules houses a set of Rule needed for validation of a
|
|
||||||
// string value
|
|
||||||
type Rules []Rule
|
|
||||||
|
|
||||||
// Rule interface allows for more flexible rules and just simply
|
|
||||||
// checks whether or not a value adheres to that Rule
|
|
||||||
type Rule interface {
|
|
||||||
IsValid(value string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid will iterate through all rules and see if any rules
|
|
||||||
// apply to the value and supports nested rules
|
|
||||||
func (r Rules) IsValid(value string) bool {
|
|
||||||
for _, rule := range r {
|
|
||||||
if rule.IsValid(value) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapRule generic Rule for maps
|
|
||||||
type MapRule map[string]struct{}
|
|
||||||
|
|
||||||
// IsValid for the map Rule satisfies whether it exists in the map
|
|
||||||
func (m MapRule) IsValid(value string) bool {
|
|
||||||
_, ok := m[value]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowList is a generic Rule for whitelisting
|
|
||||||
type AllowList struct {
|
|
||||||
Rule
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid for AllowList checks if the value is within the AllowList
|
|
||||||
func (w AllowList) IsValid(value string) bool {
|
|
||||||
return w.Rule.IsValid(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DenyList is a generic Rule for blacklisting
|
|
||||||
type DenyList struct {
|
|
||||||
Rule
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid for AllowList checks if the value is within the AllowList
|
|
||||||
func (b DenyList) IsValid(value string) bool {
|
|
||||||
return !b.Rule.IsValid(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Patterns is a list of strings to match against
|
|
||||||
type Patterns []string
|
|
||||||
|
|
||||||
// IsValid for Patterns checks each pattern and returns if a match has
|
|
||||||
// been found
|
|
||||||
func (p Patterns) IsValid(value string) bool {
|
|
||||||
for _, pattern := range p {
|
|
||||||
if HasPrefixFold(value, pattern) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// InclusiveRules rules allow for rules to depend on one another
|
|
||||||
type InclusiveRules []Rule
|
|
||||||
|
|
||||||
// IsValid will return true if all rules are true
|
|
||||||
func (r InclusiveRules) IsValid(value string) bool {
|
|
||||||
for _, rule := range r {
|
|
||||||
if !rule.IsValid(value) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
|
|
||||||
// under Unicode case-folding.
|
|
||||||
func HasPrefixFold(s, prefix string) bool {
|
|
||||||
return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
|
|
||||||
}
|
|
|
@ -1,83 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/header.go
|
|
||||||
// with changes:
|
|
||||||
// * drop User-Agent header from ignored
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
// IgnoredPresignedHeaders is a list of headers that are ignored during signing
|
|
||||||
var IgnoredPresignedHeaders = Rules{
|
|
||||||
DenyList{
|
|
||||||
MapRule{
|
|
||||||
"Authorization": struct{}{},
|
|
||||||
"User-Agent": struct{}{},
|
|
||||||
"X-Amzn-Trace-Id": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoredHeaders is a list of headers that are ignored during signing
|
|
||||||
// drop User-Agent header to be compatible with aws sdk java v1.
|
|
||||||
var IgnoredHeaders = Rules{
|
|
||||||
DenyList{
|
|
||||||
MapRule{
|
|
||||||
"Authorization": struct{}{},
|
|
||||||
//"User-Agent": struct{}{},
|
|
||||||
"X-Amzn-Trace-Id": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequiredSignedHeaders is a whitelist for Build canonical headers.
|
|
||||||
var RequiredSignedHeaders = Rules{
|
|
||||||
AllowList{
|
|
||||||
MapRule{
|
|
||||||
"Cache-Control": struct{}{},
|
|
||||||
"Content-Disposition": struct{}{},
|
|
||||||
"Content-Encoding": struct{}{},
|
|
||||||
"Content-Language": struct{}{},
|
|
||||||
"Content-Md5": struct{}{},
|
|
||||||
"Content-Type": struct{}{},
|
|
||||||
"Expires": struct{}{},
|
|
||||||
"If-Match": struct{}{},
|
|
||||||
"If-Modified-Since": struct{}{},
|
|
||||||
"If-None-Match": struct{}{},
|
|
||||||
"If-Unmodified-Since": struct{}{},
|
|
||||||
"Range": struct{}{},
|
|
||||||
"X-Amz-Acl": struct{}{},
|
|
||||||
"X-Amz-Copy-Source": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Match": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Range": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
|
||||||
"X-Amz-Grant-Full-control": struct{}{},
|
|
||||||
"X-Amz-Grant-Read": struct{}{},
|
|
||||||
"X-Amz-Grant-Read-Acp": struct{}{},
|
|
||||||
"X-Amz-Grant-Write": struct{}{},
|
|
||||||
"X-Amz-Grant-Write-Acp": struct{}{},
|
|
||||||
"X-Amz-Metadata-Directive": struct{}{},
|
|
||||||
"X-Amz-Mfa": struct{}{},
|
|
||||||
"X-Amz-Request-Payer": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
|
||||||
"X-Amz-Storage-Class": struct{}{},
|
|
||||||
"X-Amz-Website-Redirect-Location": struct{}{},
|
|
||||||
"X-Amz-Content-Sha256": struct{}{},
|
|
||||||
"X-Amz-Tagging": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Patterns{"X-Amz-Meta-"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowedQueryHoisting is a whitelist for Build query headers. The boolean value
|
|
||||||
// represents whether or not it is a pattern.
|
|
||||||
var AllowedQueryHoisting = InclusiveRules{
|
|
||||||
DenyList{RequiredSignedHeaders},
|
|
||||||
Patterns{"X-Amz-"},
|
|
||||||
}
|
|
|
@ -1,15 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/hmac.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HMACSHA256 computes a HMAC-SHA256 of data given the provided key.
|
|
||||||
func HMACSHA256(key []byte, data []byte) []byte {
|
|
||||||
hash := hmac.New(sha256.New, key)
|
|
||||||
hash.Write(data)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
|
@ -1,77 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/host.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SanitizeHostForHeader removes default port from host and updates request.Host
|
|
||||||
func SanitizeHostForHeader(r *http.Request) {
|
|
||||||
host := getHost(r)
|
|
||||||
port := portOnly(host)
|
|
||||||
if port != "" && isDefaultPort(r.URL.Scheme, port) {
|
|
||||||
r.Host = stripPort(host)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns host from request
|
|
||||||
func getHost(r *http.Request) string {
|
|
||||||
if r.Host != "" {
|
|
||||||
return r.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.URL.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hostname returns u.Host, without any port number.
|
|
||||||
//
|
|
||||||
// If Host is an IPv6 literal with a port number, Hostname returns the
|
|
||||||
// IPv6 literal without the square brackets. IPv6 literals may include
|
|
||||||
// a zone identifier.
|
|
||||||
//
|
|
||||||
// Copied from the Go 1.8 standard library (net/url)
|
|
||||||
func stripPort(hostport string) string {
|
|
||||||
colon := strings.IndexByte(hostport, ':')
|
|
||||||
if colon == -1 {
|
|
||||||
return hostport
|
|
||||||
}
|
|
||||||
if i := strings.IndexByte(hostport, ']'); i != -1 {
|
|
||||||
return strings.TrimPrefix(hostport[:i], "[")
|
|
||||||
}
|
|
||||||
return hostport[:colon]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Port returns the port part of u.Host, without the leading colon.
|
|
||||||
// If u.Host doesn't contain a port, Port returns an empty string.
|
|
||||||
//
|
|
||||||
// Copied from the Go 1.8 standard library (net/url)
|
|
||||||
func portOnly(hostport string) string {
|
|
||||||
colon := strings.IndexByte(hostport, ':')
|
|
||||||
if colon == -1 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
if i := strings.Index(hostport, "]:"); i != -1 {
|
|
||||||
return hostport[i+len("]:"):]
|
|
||||||
}
|
|
||||||
if strings.Contains(hostport, "]") {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return hostport[colon+len(":"):]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if the specified URI is using the standard port
|
|
||||||
// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
|
|
||||||
func isDefaultPort(scheme, port string) bool {
|
|
||||||
if port == "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
lowerCaseScheme := strings.ToLower(scheme)
|
|
||||||
if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
|
@ -1,38 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/time.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing.
|
|
||||||
type SigningTime struct {
|
|
||||||
time.Time
|
|
||||||
timeFormat string
|
|
||||||
shortTimeFormat string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSigningTime creates a new SigningTime given a time.Time
|
|
||||||
func NewSigningTime(t time.Time) SigningTime {
|
|
||||||
return SigningTime{
|
|
||||||
Time: t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeFormat provides a time formatted in the X-Amz-Date format.
|
|
||||||
func (m *SigningTime) TimeFormat() string {
|
|
||||||
return m.format(&m.timeFormat, TimeFormat)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShortTimeFormat provides a time formatted of 20060102.
|
|
||||||
func (m *SigningTime) ShortTimeFormat() string {
|
|
||||||
return m.format(&m.shortTimeFormat, ShortTimeFormat)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *SigningTime) format(target *string, format string) string {
|
|
||||||
if len(*target) > 0 {
|
|
||||||
return *target
|
|
||||||
}
|
|
||||||
v := m.Time.Format(format)
|
|
||||||
*target = v
|
|
||||||
return v
|
|
||||||
}
|
|
|
@ -1,66 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/util.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const doubleSpace = " "
|
|
||||||
|
|
||||||
// StripExcessSpaces will rewrite the passed in slice's string values to not
|
|
||||||
// contain muliple side-by-side spaces.
|
|
||||||
func StripExcessSpaces(str string) string {
|
|
||||||
var j, k, l, m, spaces int
|
|
||||||
// Trim trailing spaces
|
|
||||||
for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trim leading spaces
|
|
||||||
for k = 0; k < j && str[k] == ' '; k++ {
|
|
||||||
}
|
|
||||||
str = str[k : j+1]
|
|
||||||
|
|
||||||
// Strip multiple spaces.
|
|
||||||
j = strings.Index(str, doubleSpace)
|
|
||||||
if j < 0 {
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := []byte(str)
|
|
||||||
for k, m, l = j, j, len(buf); k < l; k++ {
|
|
||||||
if buf[k] == ' ' {
|
|
||||||
if spaces == 0 {
|
|
||||||
// First space.
|
|
||||||
buf[m] = buf[k]
|
|
||||||
m++
|
|
||||||
}
|
|
||||||
spaces++
|
|
||||||
} else {
|
|
||||||
// End of multiple spaces.
|
|
||||||
spaces = 0
|
|
||||||
buf[m] = buf[k]
|
|
||||||
m++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(buf[:m])
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetURIPath returns the escaped URI component from the provided URL
|
|
||||||
func GetURIPath(u *url.URL) string {
|
|
||||||
var uri string
|
|
||||||
|
|
||||||
if len(u.Opaque) > 0 {
|
|
||||||
uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
|
|
||||||
} else {
|
|
||||||
uri = u.EscapedPath()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(uri) == 0 {
|
|
||||||
uri = "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
return uri
|
|
||||||
}
|
|
|
@ -1,77 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/tuil_test.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestStripExcessHeaders(t *testing.T) {
|
|
||||||
vals := []string{
|
|
||||||
"",
|
|
||||||
"123",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3 ",
|
|
||||||
" 1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 23",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 ",
|
|
||||||
" 1 2 ",
|
|
||||||
"12 3",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1abc123",
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := []string{
|
|
||||||
"",
|
|
||||||
"123",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 23",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2",
|
|
||||||
"1 2",
|
|
||||||
"12 3",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1abc123",
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(vals); i++ {
|
|
||||||
r := StripExcessSpaces(vals[i])
|
|
||||||
if e, a := expected[i], r; e != a {
|
|
||||||
t.Errorf("%d, expect %v, got %v", i, e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var stripExcessSpaceCases = []string{
|
|
||||||
`AWS4-HMAC-SHA256 Credential=AKIDFAKEIDFAKEID/20160628/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=1234567890abcdef1234567890abcdef1234567890abcdef`,
|
|
||||||
`123 321 123 321`,
|
|
||||||
` 123 321 123 321 `,
|
|
||||||
` 123 321 123 321 `,
|
|
||||||
"123",
|
|
||||||
"1 2 3",
|
|
||||||
" 1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 23",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 ",
|
|
||||||
" 1 2 ",
|
|
||||||
"12 3",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1abc123",
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkStripExcessSpaces(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
for _, v := range stripExcessSpaceCases {
|
|
||||||
StripExcessSpaces(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,133 +0,0 @@
|
||||||
// This file is adopting https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/stream.go for sigv4a.
|
|
||||||
// with changes
|
|
||||||
// * add VerifyTrailerSignature
|
|
||||||
|
|
||||||
package v4a
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
signerCrypto "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/crypto"
|
|
||||||
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/v4"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EventStreamSigner is an AWS EventStream protocol signer.
|
|
||||||
type EventStreamSigner interface {
|
|
||||||
GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StreamSignerOptions is the configuration options for StreamSigner.
|
|
||||||
type StreamSignerOptions struct{}
|
|
||||||
|
|
||||||
// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads.
|
|
||||||
type StreamSigner struct {
|
|
||||||
options StreamSignerOptions
|
|
||||||
|
|
||||||
credentials Credentials
|
|
||||||
service string
|
|
||||||
|
|
||||||
prevSignature []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewStreamSigner returns a new AWS EventStream protocol signer.
|
|
||||||
func NewStreamSigner(credentials Credentials, service string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner {
|
|
||||||
o := StreamSignerOptions{}
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&o)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &StreamSigner{
|
|
||||||
options: o,
|
|
||||||
credentials: credentials,
|
|
||||||
service: service,
|
|
||||||
prevSignature: seedSignature,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StreamSigner) VerifySignature(headers, payload []byte, signingTime time.Time, signature []byte, optFns ...func(*StreamSignerOptions)) error {
|
|
||||||
options := s.options
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
prevSignature := s.prevSignature
|
|
||||||
|
|
||||||
st := v4Internal.NewSigningTime(signingTime)
|
|
||||||
|
|
||||||
scope := buildCredentialScope(st, s.service)
|
|
||||||
|
|
||||||
stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st)
|
|
||||||
|
|
||||||
ok, err := signerCrypto.VerifySignature(&s.credentials.PrivateKey.PublicKey, makeHash(sha256.New(), []byte(stringToSign)), signature)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("v4a: invalid signature")
|
|
||||||
}
|
|
||||||
|
|
||||||
s.prevSignature = signature
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
|
|
||||||
hash := sha256.New()
|
|
||||||
return strings.Join([]string{
|
|
||||||
"AWS4-ECDSA-P256-SHA256-PAYLOAD",
|
|
||||||
signingTime.TimeFormat(),
|
|
||||||
credentialScope,
|
|
||||||
hex.EncodeToString(previousSignature),
|
|
||||||
hex.EncodeToString(makeHash(hash, headers)),
|
|
||||||
hex.EncodeToString(makeHash(hash, payload)),
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StreamSigner) VerifyTrailerSignature(payload []byte, signingTime time.Time, signature []byte) error {
|
|
||||||
prevSignature := s.prevSignature
|
|
||||||
|
|
||||||
st := v4Internal.NewSigningTime(signingTime)
|
|
||||||
|
|
||||||
scope := buildCredentialScope(st, s.service)
|
|
||||||
|
|
||||||
stringToSign := s.buildEventStreamStringToSignTrailer(payload, prevSignature, scope, &st)
|
|
||||||
|
|
||||||
ok, err := signerCrypto.VerifySignature(&s.credentials.PrivateKey.PublicKey, makeHash(sha256.New(), []byte(stringToSign)), signature)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("v4a: invalid signature")
|
|
||||||
}
|
|
||||||
|
|
||||||
s.prevSignature = signature
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StreamSigner) buildEventStreamStringToSignTrailer(payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
|
|
||||||
hash := sha256.New()
|
|
||||||
return strings.Join([]string{
|
|
||||||
"AWS4-ECDSA-P256-SHA256-TRAILER",
|
|
||||||
signingTime.TimeFormat(),
|
|
||||||
credentialScope,
|
|
||||||
hex.EncodeToString(previousSignature),
|
|
||||||
hex.EncodeToString(makeHash(hash, payload)),
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildCredentialScope(st v4Internal.SigningTime, service string) string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
st.Format(shortTimeFormat),
|
|
||||||
service,
|
|
||||||
"aws4_request",
|
|
||||||
}, "/")
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,591 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/v4a.go
|
|
||||||
// with changes:
|
|
||||||
// * adding exported VerifySignature methods
|
|
||||||
// * using different ignore headers for sing/presign requests
|
|
||||||
// * don't duplicate content-length as signed header
|
|
||||||
// * use copy of smithy-go encoding/httpbinding package
|
|
||||||
// * use zap.Logger instead of smithy-go/logging
|
|
||||||
|
|
||||||
package v4a
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"math/big"
|
|
||||||
"net/http"
|
|
||||||
"net/textproto"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/smithy/encoding/httpbinding"
|
|
||||||
signerCrypto "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/crypto"
|
|
||||||
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/v4"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// AmzRegionSetKey represents the region set header used for sigv4a
|
|
||||||
AmzRegionSetKey = "X-Amz-Region-Set"
|
|
||||||
amzAlgorithmKey = v4Internal.AmzAlgorithmKey
|
|
||||||
amzSecurityTokenKey = v4Internal.AmzSecurityTokenKey
|
|
||||||
amzDateKey = v4Internal.AmzDateKey
|
|
||||||
amzCredentialKey = v4Internal.AmzCredentialKey
|
|
||||||
amzSignedHeadersKey = v4Internal.AmzSignedHeadersKey
|
|
||||||
authorizationHeader = "Authorization"
|
|
||||||
|
|
||||||
signingAlgorithm = "AWS4-ECDSA-P256-SHA256"
|
|
||||||
|
|
||||||
timeFormat = "20060102T150405Z"
|
|
||||||
shortTimeFormat = "20060102"
|
|
||||||
|
|
||||||
// EmptyStringSHA256 is a hex encoded SHA-256 hash of an empty string
|
|
||||||
EmptyStringSHA256 = v4Internal.EmptyStringSHA256
|
|
||||||
|
|
||||||
// Version of signing v4a
|
|
||||||
Version = "SigV4A"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
p256 elliptic.Curve
|
|
||||||
nMinusTwoP256 *big.Int
|
|
||||||
|
|
||||||
one = new(big.Int).SetInt64(1)
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Ensure the elliptic curve parameters are initialized on package import rather then on first usage
|
|
||||||
p256 = elliptic.P256()
|
|
||||||
|
|
||||||
nMinusTwoP256 = new(big.Int).SetBytes(p256.Params().N.Bytes())
|
|
||||||
nMinusTwoP256 = nMinusTwoP256.Sub(nMinusTwoP256, new(big.Int).SetInt64(2))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignerOptions is the SigV4a signing options for constructing a Signer.
|
|
||||||
type SignerOptions struct {
|
|
||||||
Logger *zap.Logger
|
|
||||||
LogSigning bool
|
|
||||||
|
|
||||||
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
|
|
||||||
// request header to the request's query string. This is most commonly used
|
|
||||||
// with pre-signed requests preventing headers from being added to the
|
|
||||||
// request's query string.
|
|
||||||
DisableHeaderHoisting bool
|
|
||||||
|
|
||||||
// Disables the automatic escaping of the URI path of the request for the
|
|
||||||
// siganture's canonical string's path. For services that do not need additional
|
|
||||||
// escaping then use this to disable the signer escaping the path.
|
|
||||||
//
|
|
||||||
// S3 is an example of a service that does not need additional escaping.
|
|
||||||
//
|
|
||||||
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
|
||||||
DisableURIPathEscaping bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Signer is a SigV4a HTTP signing implementation
|
|
||||||
type Signer struct {
|
|
||||||
options SignerOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSigner constructs a SigV4a Signer.
|
|
||||||
func NewSigner(optFns ...func(*SignerOptions)) *Signer {
|
|
||||||
options := SignerOptions{}
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Signer{options: options}
|
|
||||||
}
|
|
||||||
|
|
||||||
// deriveKeyFromAccessKeyPair derives a NIST P-256 PrivateKey from the given
|
|
||||||
// IAM AccessKey and SecretKey pair.
|
|
||||||
//
|
|
||||||
// Based on FIPS.186-4 Appendix B.4.2
|
|
||||||
func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey, error) {
|
|
||||||
params := p256.Params()
|
|
||||||
bitLen := params.BitSize // Testing random candidates does not require an additional 64 bits
|
|
||||||
counter := 0x01
|
|
||||||
|
|
||||||
buffer := make([]byte, 1+len(accessKey)) // 1 byte counter + len(accessKey)
|
|
||||||
kdfContext := bytes.NewBuffer(buffer)
|
|
||||||
|
|
||||||
inputKey := append([]byte("AWS4A"), []byte(secretKey)...)
|
|
||||||
|
|
||||||
d := new(big.Int)
|
|
||||||
for {
|
|
||||||
kdfContext.Reset()
|
|
||||||
kdfContext.WriteString(accessKey)
|
|
||||||
kdfContext.WriteByte(byte(counter))
|
|
||||||
|
|
||||||
key, err := signerCrypto.HMACKeyDerivation(sha256.New, bitLen, inputKey, []byte(signingAlgorithm), kdfContext.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check key first before calling SetBytes if key key is in fact a valid candidate.
|
|
||||||
// This ensures the byte slice is the correct length (32-bytes) to compare in constant-time
|
|
||||||
cmp, err := signerCrypto.ConstantTimeByteCompare(key, nMinusTwoP256.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if cmp == -1 {
|
|
||||||
d.SetBytes(key)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
counter++
|
|
||||||
if counter > 0xFF {
|
|
||||||
return nil, fmt.Errorf("exhausted single byte external counter")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d = d.Add(d, one)
|
|
||||||
|
|
||||||
priv := new(ecdsa.PrivateKey)
|
|
||||||
priv.PublicKey.Curve = p256
|
|
||||||
priv.D = d
|
|
||||||
priv.PublicKey.X, priv.PublicKey.Y = p256.ScalarBaseMult(d.Bytes())
|
|
||||||
|
|
||||||
return priv, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type httpSigner struct {
|
|
||||||
Request *http.Request
|
|
||||||
ServiceName string
|
|
||||||
RegionSet []string
|
|
||||||
Time time.Time
|
|
||||||
Credentials Credentials
|
|
||||||
IsPreSign bool
|
|
||||||
|
|
||||||
Logger *zap.Logger
|
|
||||||
Debug bool
|
|
||||||
|
|
||||||
// PayloadHash is the hex encoded SHA-256 hash of the request payload
|
|
||||||
// If len(PayloadHash) == 0 the signer will attempt to send the request
|
|
||||||
// as an unsigned payload. Note: Unsigned payloads only work for a subset of services.
|
|
||||||
PayloadHash string
|
|
||||||
|
|
||||||
DisableHeaderHoisting bool
|
|
||||||
DisableURIPathEscaping bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and signs using SigV4a.
|
|
||||||
// The passed in request will be modified in place.
|
|
||||||
func (s *Signer) SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) error {
|
|
||||||
options := s.options
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
signer := &httpSigner{
|
|
||||||
Request: r,
|
|
||||||
PayloadHash: payloadHash,
|
|
||||||
ServiceName: service,
|
|
||||||
RegionSet: regionSet,
|
|
||||||
Credentials: credentials,
|
|
||||||
Time: signingTime.UTC(),
|
|
||||||
DisableHeaderHoisting: options.DisableHeaderHoisting,
|
|
||||||
DisableURIPathEscaping: options.DisableURIPathEscaping,
|
|
||||||
Logger: options.Logger,
|
|
||||||
}
|
|
||||||
|
|
||||||
signedRequest, err := signer.Build()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
logHTTPSigningInfo(ctx, options, signedRequest)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifySignature checks sigv4a.
|
|
||||||
func (s *Signer) VerifySignature(credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, signature string, optFns ...func(*SignerOptions)) error {
|
|
||||||
return s.verifySignature(credentials, r, payloadHash, service, regionSet, signingTime, signature, false, optFns...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyPresigned checks sigv4a.
|
|
||||||
func (s *Signer) VerifyPresigned(credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, signature string, optFns ...func(*SignerOptions)) error {
|
|
||||||
return s.verifySignature(credentials, r, payloadHash, service, regionSet, signingTime, signature, true, optFns...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Signer) verifySignature(credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, signature string, isPresigned bool, optFns ...func(*SignerOptions)) error {
|
|
||||||
options := s.options
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
signer := &httpSigner{
|
|
||||||
Request: r,
|
|
||||||
PayloadHash: payloadHash,
|
|
||||||
ServiceName: service,
|
|
||||||
RegionSet: regionSet,
|
|
||||||
Credentials: credentials,
|
|
||||||
Time: signingTime.UTC(),
|
|
||||||
IsPreSign: isPresigned,
|
|
||||||
DisableHeaderHoisting: options.DisableHeaderHoisting,
|
|
||||||
DisableURIPathEscaping: options.DisableURIPathEscaping,
|
|
||||||
}
|
|
||||||
|
|
||||||
signedReq, err := signer.Build()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
logHTTPSigningInfo(context.TODO(), options, signedReq)
|
|
||||||
|
|
||||||
signatureRaw, err := hex.DecodeString(signature)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("decode hex signature: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ok, err := signerCrypto.VerifySignature(&credentials.PrivateKey.PublicKey, makeHash(sha256.New(), []byte(signedReq.StringToSign)), signatureRaw)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("v4a: invalid signature")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PresignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and presigns using SigV4a
|
|
||||||
// Returns the presigned URL along with the headers that were signed with the request.
|
|
||||||
//
|
|
||||||
// PresignHTTP will not set the expires time of the presigned request
|
|
||||||
// automatically. To specify the expire duration for a request add the
|
|
||||||
// "X-Amz-Expires" query parameter on the request with the value as the
|
|
||||||
// duration in seconds the presigned URL should be considered valid for. This
|
|
||||||
// parameter is not used by all AWS services, and is most notable used by
|
|
||||||
// Amazon S3 APIs.
|
|
||||||
func (s *Signer) PresignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) (signedURI string, signedHeaders http.Header, err error) {
|
|
||||||
options := s.options
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
signer := &httpSigner{
|
|
||||||
Request: r,
|
|
||||||
PayloadHash: payloadHash,
|
|
||||||
ServiceName: service,
|
|
||||||
RegionSet: regionSet,
|
|
||||||
Credentials: credentials,
|
|
||||||
Time: signingTime.UTC(),
|
|
||||||
IsPreSign: true,
|
|
||||||
DisableHeaderHoisting: options.DisableHeaderHoisting,
|
|
||||||
DisableURIPathEscaping: options.DisableURIPathEscaping,
|
|
||||||
}
|
|
||||||
|
|
||||||
signedRequest, err := signer.Build()
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
logHTTPSigningInfo(ctx, options, signedRequest)
|
|
||||||
|
|
||||||
signedHeaders = make(http.Header)
|
|
||||||
|
|
||||||
// For the signed headers we canonicalize the header keys in the returned map.
|
|
||||||
// This avoids situations where can standard library double headers like host header. For example the standard
|
|
||||||
// library will set the Host header, even if it is present in lower-case form.
|
|
||||||
for k, v := range signedRequest.SignedHeaders {
|
|
||||||
key := textproto.CanonicalMIMEHeaderKey(k)
|
|
||||||
signedHeaders[key] = append(signedHeaders[key], v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return signedRequest.Request.URL.String(), signedHeaders, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) {
|
|
||||||
amzDate := s.Time.Format(timeFormat)
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
query.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ","))
|
|
||||||
query.Set(amzDateKey, amzDate)
|
|
||||||
query.Set(amzAlgorithmKey, signingAlgorithm)
|
|
||||||
if len(s.Credentials.SessionToken) > 0 {
|
|
||||||
query.Set(amzSecurityTokenKey, s.Credentials.SessionToken)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
headers.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ","))
|
|
||||||
headers.Set(amzDateKey, amzDate)
|
|
||||||
if len(s.Credentials.SessionToken) > 0 {
|
|
||||||
headers.Set(amzSecurityTokenKey, s.Credentials.SessionToken)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) Build() (signedRequest, error) {
|
|
||||||
req := s.Request
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
headers := req.Header
|
|
||||||
|
|
||||||
s.setRequiredSigningFields(headers, query)
|
|
||||||
|
|
||||||
// Sort Each Query Key's Values
|
|
||||||
for key := range query {
|
|
||||||
sort.Strings(query[key])
|
|
||||||
}
|
|
||||||
|
|
||||||
v4Internal.SanitizeHostForHeader(req)
|
|
||||||
|
|
||||||
credentialScope := s.buildCredentialScope()
|
|
||||||
credentialStr := s.Credentials.Context + "/" + credentialScope
|
|
||||||
if s.IsPreSign {
|
|
||||||
query.Set(amzCredentialKey, credentialStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
unsignedHeaders := headers
|
|
||||||
if s.IsPreSign && !s.DisableHeaderHoisting {
|
|
||||||
urlValues := url.Values{}
|
|
||||||
urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, unsignedHeaders)
|
|
||||||
for k := range urlValues {
|
|
||||||
query[k] = urlValues[k]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
host := req.URL.Host
|
|
||||||
if len(req.Host) > 0 {
|
|
||||||
host = req.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
signedHeaders http.Header
|
|
||||||
signedHeadersStr string
|
|
||||||
canonicalHeaderStr string
|
|
||||||
)
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredPresignedHeaders, unsignedHeaders, s.Request.ContentLength)
|
|
||||||
} else {
|
|
||||||
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength)
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
query.Set(amzSignedHeadersKey, signedHeadersStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
rawQuery := strings.Replace(query.Encode(), "+", "%20", -1)
|
|
||||||
|
|
||||||
canonicalURI := v4Internal.GetURIPath(req.URL)
|
|
||||||
if !s.DisableURIPathEscaping {
|
|
||||||
canonicalURI = httpbinding.EscapePath(canonicalURI, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
canonicalString := s.buildCanonicalString(
|
|
||||||
req.Method,
|
|
||||||
canonicalURI,
|
|
||||||
rawQuery,
|
|
||||||
signedHeadersStr,
|
|
||||||
canonicalHeaderStr,
|
|
||||||
)
|
|
||||||
|
|
||||||
strToSign := s.buildStringToSign(credentialScope, canonicalString)
|
|
||||||
signingSignature, err := s.buildSignature(strToSign)
|
|
||||||
if err != nil {
|
|
||||||
return signedRequest{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
rawQuery += "&X-Amz-Signature=" + signingSignature
|
|
||||||
} else {
|
|
||||||
headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature))
|
|
||||||
}
|
|
||||||
|
|
||||||
req.URL.RawQuery = rawQuery
|
|
||||||
|
|
||||||
return signedRequest{
|
|
||||||
Request: req,
|
|
||||||
SignedHeaders: signedHeaders,
|
|
||||||
CanonicalString: canonicalString,
|
|
||||||
StringToSign: strToSign,
|
|
||||||
PreSigned: s.IsPreSign,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string {
|
|
||||||
const credential = "Credential="
|
|
||||||
const signedHeaders = "SignedHeaders="
|
|
||||||
const signature = "Signature="
|
|
||||||
const commaSpace = ", "
|
|
||||||
|
|
||||||
var parts strings.Builder
|
|
||||||
parts.Grow(len(signingAlgorithm) + 1 +
|
|
||||||
len(credential) + len(credentialStr) + len(commaSpace) +
|
|
||||||
len(signedHeaders) + len(signedHeadersStr) + len(commaSpace) +
|
|
||||||
len(signature) + len(signingSignature),
|
|
||||||
)
|
|
||||||
parts.WriteString(signingAlgorithm)
|
|
||||||
parts.WriteRune(' ')
|
|
||||||
parts.WriteString(credential)
|
|
||||||
parts.WriteString(credentialStr)
|
|
||||||
parts.WriteString(commaSpace)
|
|
||||||
parts.WriteString(signedHeaders)
|
|
||||||
parts.WriteString(signedHeadersStr)
|
|
||||||
parts.WriteString(commaSpace)
|
|
||||||
parts.WriteString(signature)
|
|
||||||
parts.WriteString(signingSignature)
|
|
||||||
return parts.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildCredentialScope() string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
s.Time.Format(shortTimeFormat),
|
|
||||||
s.ServiceName,
|
|
||||||
"aws4_request",
|
|
||||||
}, "/")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) {
|
|
||||||
query := url.Values{}
|
|
||||||
unsignedHeaders := http.Header{}
|
|
||||||
for k, h := range header {
|
|
||||||
if r.IsValid(k) {
|
|
||||||
query[k] = h
|
|
||||||
} else {
|
|
||||||
unsignedHeaders[k] = h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return query, unsignedHeaders
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) {
|
|
||||||
signed = make(http.Header)
|
|
||||||
|
|
||||||
var headers []string
|
|
||||||
const hostHeader = "host"
|
|
||||||
headers = append(headers, hostHeader)
|
|
||||||
signed[hostHeader] = append(signed[hostHeader], host)
|
|
||||||
|
|
||||||
//const contentLengthHeader = "content-length"
|
|
||||||
//if length > 0 {
|
|
||||||
// headers = append(headers, contentLengthHeader)
|
|
||||||
// signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10))
|
|
||||||
//}
|
|
||||||
|
|
||||||
for k, v := range header {
|
|
||||||
if !rule.IsValid(k) {
|
|
||||||
continue // ignored header
|
|
||||||
}
|
|
||||||
|
|
||||||
lowerCaseKey := strings.ToLower(k)
|
|
||||||
if _, ok := signed[lowerCaseKey]; ok {
|
|
||||||
// include additional values
|
|
||||||
signed[lowerCaseKey] = append(signed[lowerCaseKey], v...)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
headers = append(headers, lowerCaseKey)
|
|
||||||
signed[lowerCaseKey] = v
|
|
||||||
}
|
|
||||||
sort.Strings(headers)
|
|
||||||
|
|
||||||
signedHeaders = strings.Join(headers, ";")
|
|
||||||
|
|
||||||
var canonicalHeaders strings.Builder
|
|
||||||
n := len(headers)
|
|
||||||
const colon = ':'
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
if headers[i] == hostHeader {
|
|
||||||
canonicalHeaders.WriteString(hostHeader)
|
|
||||||
canonicalHeaders.WriteRune(colon)
|
|
||||||
canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host))
|
|
||||||
} else {
|
|
||||||
canonicalHeaders.WriteString(headers[i])
|
|
||||||
canonicalHeaders.WriteRune(colon)
|
|
||||||
// Trim out leading, trailing, and dedup inner spaces from signed header values.
|
|
||||||
values := signed[headers[i]]
|
|
||||||
for j, v := range values {
|
|
||||||
cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v))
|
|
||||||
canonicalHeaders.WriteString(cleanedValue)
|
|
||||||
if j < len(values)-1 {
|
|
||||||
canonicalHeaders.WriteRune(',')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
canonicalHeaders.WriteRune('\n')
|
|
||||||
}
|
|
||||||
canonicalHeadersStr = canonicalHeaders.String()
|
|
||||||
|
|
||||||
return signed, signedHeaders, canonicalHeadersStr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
method,
|
|
||||||
uri,
|
|
||||||
query,
|
|
||||||
canonicalHeaders,
|
|
||||||
signedHeaders,
|
|
||||||
s.PayloadHash,
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
signingAlgorithm,
|
|
||||||
s.Time.Format(timeFormat),
|
|
||||||
credentialScope,
|
|
||||||
hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))),
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeHash(hash hash.Hash, b []byte) []byte {
|
|
||||||
hash.Reset()
|
|
||||||
hash.Write(b)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildSignature(strToSign string) (string, error) {
|
|
||||||
sig, err := s.Credentials.PrivateKey.Sign(rand.Reader, makeHash(sha256.New(), []byte(strToSign)), crypto.SHA256)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return hex.EncodeToString(sig), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const logSignInfoMsg = `Request Signature:
|
|
||||||
---[ CANONICAL STRING ]-----------------------------
|
|
||||||
%s
|
|
||||||
---[ STRING TO SIGN ]--------------------------------
|
|
||||||
%s%s
|
|
||||||
-----------------------------------------------------`
|
|
||||||
const logSignedURLMsg = `
|
|
||||||
---[ SIGNED URL ]------------------------------------
|
|
||||||
%s`
|
|
||||||
|
|
||||||
func logHTTPSigningInfo(_ context.Context, options SignerOptions, r signedRequest) {
|
|
||||||
if !options.LogSigning {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
signedURLMsg := ""
|
|
||||||
if r.PreSigned {
|
|
||||||
signedURLMsg = fmt.Sprintf(logSignedURLMsg, r.Request.URL.String())
|
|
||||||
}
|
|
||||||
if options.Logger != nil {
|
|
||||||
options.Logger.Debug(fmt.Sprintf(logSignInfoMsg, r.CanonicalString, r.StringToSign, signedURLMsg))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type signedRequest struct {
|
|
||||||
Request *http.Request
|
|
||||||
SignedHeaders http.Header
|
|
||||||
CanonicalString string
|
|
||||||
StringToSign string
|
|
||||||
PreSigned bool
|
|
||||||
}
|
|
|
@ -1,425 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/v4a_test.go
|
|
||||||
// with changes:
|
|
||||||
// * use zap.Logger instead of smithy-go/logging
|
|
||||||
|
|
||||||
package v4a
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/crypto"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
"go.uber.org/zap/zaptest"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
accessKey = "AKISORANDOMAASORANDOM"
|
|
||||||
secretKey = "q+jcrXGc+0zWN6uzclKVhvMmUsIfRPa4rlRandom"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDeriveECDSAKeyPairFromSecret(t *testing.T) {
|
|
||||||
privateKey, err := deriveKeyFromAccessKeyPair(accessKey, secretKey)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedX := func() *big.Int {
|
|
||||||
t.Helper()
|
|
||||||
b, ok := new(big.Int).SetString("15D242CEEBF8D8169FD6A8B5A746C41140414C3B07579038DA06AF89190FFFCB", 16)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("failed to parse big integer")
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}()
|
|
||||||
expectedY := func() *big.Int {
|
|
||||||
t.Helper()
|
|
||||||
b, ok := new(big.Int).SetString("515242CEDD82E94799482E4C0514B505AFCCF2C0C98D6A553BF539F424C5EC0", 16)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("failed to parse big integer")
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}()
|
|
||||||
|
|
||||||
if privateKey.X.Cmp(expectedX) != 0 {
|
|
||||||
t.Errorf("expected % X, got % X", expectedX, privateKey.X)
|
|
||||||
}
|
|
||||||
if privateKey.Y.Cmp(expectedY) != 0 {
|
|
||||||
t.Errorf("expected % X, got % X", expectedY, privateKey.Y)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignHTTP(t *testing.T) {
|
|
||||||
req := buildRequest("dynamodb", "us-east-1")
|
|
||||||
|
|
||||||
signer, credProvider := buildSigner(t, true)
|
|
||||||
|
|
||||||
key, err := credProvider.RetrievePrivateKey(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = signer.SignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedDate := "19700101T000000Z"
|
|
||||||
expectedAlg := "AWS4-ECDSA-P256-SHA256"
|
|
||||||
expectedCredential := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
|
|
||||||
expectedSignedHeaders := "content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-region-set;x-amz-security-token;x-amz-target"
|
|
||||||
expectedStrToSignHash := "4ba7d0482cf4d5450cefdc067a00de1a4a715e444856fa3e1d85c35fb34d9730"
|
|
||||||
|
|
||||||
q := req.Header
|
|
||||||
|
|
||||||
validateAuthorization(t, q.Get("Authorization"), expectedAlg, expectedCredential, expectedSignedHeaders, expectedStrToSignHash)
|
|
||||||
|
|
||||||
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignHTTP_NoSessionToken(t *testing.T) {
|
|
||||||
req := buildRequest("dynamodb", "us-east-1")
|
|
||||||
|
|
||||||
signer, credProvider := buildSigner(t, false)
|
|
||||||
|
|
||||||
key, err := credProvider.RetrievePrivateKey(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = signer.SignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedAlg := "AWS4-ECDSA-P256-SHA256"
|
|
||||||
expectedCredential := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
|
|
||||||
expectedSignedHeaders := "content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-region-set;x-amz-target"
|
|
||||||
expectedStrToSignHash := "1aeefb422ae6aa0de7aec829da813e55cff35553cac212dffd5f9474c71e47ee"
|
|
||||||
|
|
||||||
q := req.Header
|
|
||||||
|
|
||||||
validateAuthorization(t, q.Get("Authorization"), expectedAlg, expectedCredential, expectedSignedHeaders, expectedStrToSignHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPresignHTTP(t *testing.T) {
|
|
||||||
req := buildRequest("dynamodb", "us-east-1")
|
|
||||||
|
|
||||||
signer, credProvider := buildSigner(t, false)
|
|
||||||
|
|
||||||
key, err := credProvider.RetrievePrivateKey(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
query.Set("X-Amz-Expires", "18000")
|
|
||||||
req.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
signedURL, _, err := signer.PresignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedDate := "19700101T000000Z"
|
|
||||||
expectedAlg := "AWS4-ECDSA-P256-SHA256"
|
|
||||||
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
|
|
||||||
expectedCredential := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
|
|
||||||
expectedStrToSignHash := "d7ffbd2fab644384c056957e6ac38de4ae68246764b5f5df171b3824153b6397"
|
|
||||||
expectedTarget := "prefix.Operation"
|
|
||||||
|
|
||||||
signedReq, err := url.Parse(signedURL)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
q := signedReq.Query()
|
|
||||||
|
|
||||||
validateSignature(t, expectedStrToSignHash, q.Get("X-Amz-Signature"))
|
|
||||||
|
|
||||||
if e, a := expectedAlg, q.Get("X-Amz-Algorithm"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedCredential, q.Get("X-Amz-Credential"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
|
|
||||||
t.Errorf("expect %v to be empty", a)
|
|
||||||
}
|
|
||||||
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := "us-east-1", q.Get("X-Amz-Region-Set"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPresignHTTP_BodyWithArrayRequest(t *testing.T) {
|
|
||||||
req := buildRequest("dynamodb", "us-east-1")
|
|
||||||
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
|
|
||||||
|
|
||||||
signer, credProvider := buildSigner(t, true)
|
|
||||||
|
|
||||||
key, err := credProvider.RetrievePrivateKey(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
query.Set("X-Amz-Expires", "300")
|
|
||||||
req.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
signedURI, _, err := signer.PresignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
signedReq, err := url.Parse(signedURI)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedAlg := "AWS4-ECDSA-P256-SHA256"
|
|
||||||
expectedDate := "19700101T000000Z"
|
|
||||||
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
|
|
||||||
expectedStrToSignHash := "acff64fd3689be96259d4112c3742ff79f4da0d813bc58a285dc1c4449760bec"
|
|
||||||
expectedCred := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
|
|
||||||
expectedTarget := "prefix.Operation"
|
|
||||||
|
|
||||||
q := signedReq.Query()
|
|
||||||
|
|
||||||
validateSignature(t, expectedStrToSignHash, q.Get("X-Amz-Signature"))
|
|
||||||
|
|
||||||
if e, a := expectedAlg, q.Get("X-Amz-Algorithm"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedCred, q.Get("X-Amz-Credential"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
|
|
||||||
t.Errorf("expect %v to be empty, was not", a)
|
|
||||||
}
|
|
||||||
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := "us-east-1", q.Get("X-Amz-Region-Set"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func TestSign_buildCanonicalHeaders(t *testing.T) {
|
|
||||||
serviceName := "mockAPI"
|
|
||||||
region := "mock-region"
|
|
||||||
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
|
|
||||||
|
|
||||||
req, err := http.NewRequest("POST", endpoint, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create request, %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("FooInnerSpace", " inner space ")
|
|
||||||
req.Header.Set("FooLeadingSpace", " leading-space")
|
|
||||||
req.Header.Add("FooMultipleSpace", "no-space")
|
|
||||||
req.Header.Add("FooMultipleSpace", "\ttab-space")
|
|
||||||
req.Header.Add("FooMultipleSpace", "trailing-space ")
|
|
||||||
req.Header.Set("FooNoSpace", "no-space")
|
|
||||||
req.Header.Set("FooTabSpace", "\ttab-space\t")
|
|
||||||
req.Header.Set("FooTrailingSpace", "trailing-space ")
|
|
||||||
req.Header.Set("FooWrappedSpace", " wrapped-space ")
|
|
||||||
|
|
||||||
credProvider := &SymmetricCredentialAdaptor{
|
|
||||||
SymmetricProvider: staticCredentialsProvider{
|
|
||||||
Value: aws.Credentials{
|
|
||||||
AccessKeyID: accessKey,
|
|
||||||
SecretAccessKey: secretKey,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
key, err := credProvider.RetrievePrivateKey(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := &httpSigner{
|
|
||||||
Request: req,
|
|
||||||
ServiceName: serviceName,
|
|
||||||
RegionSet: []string{region},
|
|
||||||
Credentials: key,
|
|
||||||
Time: time.Date(2021, 10, 20, 12, 42, 0, 0, time.UTC),
|
|
||||||
}
|
|
||||||
|
|
||||||
build, err := ctx.Build()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectCanonicalString := strings.Join([]string{
|
|
||||||
`POST`,
|
|
||||||
`/`,
|
|
||||||
``,
|
|
||||||
`fooinnerspace:inner space`,
|
|
||||||
`fooleadingspace:leading-space`,
|
|
||||||
`foomultiplespace:no-space,tab-space,trailing-space`,
|
|
||||||
`foonospace:no-space`,
|
|
||||||
`footabspace:tab-space`,
|
|
||||||
`footrailingspace:trailing-space`,
|
|
||||||
`foowrappedspace:wrapped-space`,
|
|
||||||
`host:mockAPI.mock-region.amazonaws.com`,
|
|
||||||
`x-amz-date:20211020T124200Z`,
|
|
||||||
`x-amz-region-set:mock-region`,
|
|
||||||
``,
|
|
||||||
`fooinnerspace;fooleadingspace;foomultiplespace;foonospace;footabspace;footrailingspace;foowrappedspace;host;x-amz-date;x-amz-region-set`,
|
|
||||||
``,
|
|
||||||
}, "\n")
|
|
||||||
if diff := cmpDiff(expectCanonicalString, build.CanonicalString); diff != "" {
|
|
||||||
t.Errorf("expect match, got\n%s", diff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateAuthorization(t *testing.T, authorization, expectedAlg, expectedCredential, expectedSignedHeaders, expectedStrToSignHash string) {
|
|
||||||
t.Helper()
|
|
||||||
split := strings.SplitN(authorization, " ", 2)
|
|
||||||
|
|
||||||
if len(split) != 2 {
|
|
||||||
t.Fatal("unexpected authorization header format")
|
|
||||||
}
|
|
||||||
|
|
||||||
if e, a := split[0], expectedAlg; e != a {
|
|
||||||
t.Errorf("expected %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
keyValues := strings.Split(split[1], ", ")
|
|
||||||
seen := make(map[string]string)
|
|
||||||
|
|
||||||
for _, kv := range keyValues {
|
|
||||||
idx := strings.Index(kv, "=")
|
|
||||||
if idx == -1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
key, value := kv[:idx], kv[idx+1:]
|
|
||||||
seen[key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
if a, ok := seen["Credential"]; ok {
|
|
||||||
if expectedCredential != a {
|
|
||||||
t.Errorf("expected credential %v, got %v", expectedCredential, a)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t.Errorf("Credential not found in authorization string")
|
|
||||||
}
|
|
||||||
|
|
||||||
if a, ok := seen["SignedHeaders"]; ok {
|
|
||||||
if expectedSignedHeaders != a {
|
|
||||||
t.Errorf("expected signed headers %v, got %v", expectedSignedHeaders, a)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t.Errorf("SignedHeaders not found in authorization string")
|
|
||||||
}
|
|
||||||
|
|
||||||
if a, ok := seen["Signature"]; ok {
|
|
||||||
validateSignature(t, expectedStrToSignHash, a)
|
|
||||||
} else {
|
|
||||||
t.Errorf("signature not found in authorization string")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateSignature(t *testing.T, expectedHash, signature string) {
|
|
||||||
t.Helper()
|
|
||||||
pair, err := deriveKeyFromAccessKeyPair(accessKey, secretKey)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hash, _ := hex.DecodeString(expectedHash)
|
|
||||||
sig, _ := hex.DecodeString(signature)
|
|
||||||
|
|
||||||
ok, err := crypto.VerifySignature(&pair.PublicKey, hash, sig)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("failed to verify signing singature")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildRequest(serviceName, region string) *http.Request {
|
|
||||||
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
|
|
||||||
req, _ := http.NewRequest("POST", endpoint, nil)
|
|
||||||
req.URL.Opaque = "//example.org/bucket/key-._~,!@%23$%25^&*()"
|
|
||||||
req.Header.Set("X-Amz-Target", "prefix.Operation")
|
|
||||||
req.Header.Set("Content-Type", "application/x-amz-json-1.0")
|
|
||||||
|
|
||||||
req.Header.Set("Content-Length", strconv.Itoa(1024))
|
|
||||||
|
|
||||||
req.Header.Set("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
|
|
||||||
req.Header.Add("X-Amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
|
|
||||||
req.Header.Add("X-amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
|
|
||||||
return req
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildSigner(t *testing.T, withToken bool) (*Signer, CredentialsProvider) {
|
|
||||||
creds := aws.Credentials{
|
|
||||||
AccessKeyID: accessKey,
|
|
||||||
SecretAccessKey: secretKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
if withToken {
|
|
||||||
creds.SessionToken = "TOKEN"
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewSigner(func(options *SignerOptions) {
|
|
||||||
options.Logger = zaptest.NewLogger(t)
|
|
||||||
}), &SymmetricCredentialAdaptor{
|
|
||||||
SymmetricProvider: staticCredentialsProvider{
|
|
||||||
Value: creds,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type staticCredentialsProvider struct {
|
|
||||||
Value aws.Credentials
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s staticCredentialsProvider) Retrieve(_ context.Context) (aws.Credentials, error) {
|
|
||||||
v := s.Value
|
|
||||||
if v.AccessKeyID == "" || v.SecretAccessKey == "" {
|
|
||||||
return aws.Credentials{
|
|
||||||
Source: "Source Name",
|
|
||||||
}, fmt.Errorf("static credentials are empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(v.Source) == 0 {
|
|
||||||
v.Source = "Source Name"
|
|
||||||
}
|
|
||||||
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func cmpDiff(e, a interface{}) string {
|
|
||||||
if !reflect.DeepEqual(e, a) {
|
|
||||||
return fmt.Sprintf("%v != %v", e, a)
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
|
@ -1,117 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/cache.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
)
|
|
||||||
|
|
||||||
func lookupKey(service, region string) string {
|
|
||||||
var s strings.Builder
|
|
||||||
s.Grow(len(region) + len(service) + 3)
|
|
||||||
s.WriteString(region)
|
|
||||||
s.WriteRune('/')
|
|
||||||
s.WriteString(service)
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
type derivedKey struct {
|
|
||||||
AccessKey string
|
|
||||||
Date time.Time
|
|
||||||
Credential []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type derivedKeyCache struct {
|
|
||||||
values map[string]derivedKey
|
|
||||||
mutex sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDerivedKeyCache() derivedKeyCache {
|
|
||||||
return derivedKeyCache{
|
|
||||||
values: make(map[string]derivedKey),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *derivedKeyCache) Get(credentials aws.Credentials, service, region string, signingTime SigningTime) []byte {
|
|
||||||
key := lookupKey(service, region)
|
|
||||||
s.mutex.RLock()
|
|
||||||
if cred, ok := s.get(key, credentials, signingTime.Time); ok {
|
|
||||||
s.mutex.RUnlock()
|
|
||||||
return cred
|
|
||||||
}
|
|
||||||
s.mutex.RUnlock()
|
|
||||||
|
|
||||||
s.mutex.Lock()
|
|
||||||
if cred, ok := s.get(key, credentials, signingTime.Time); ok {
|
|
||||||
s.mutex.Unlock()
|
|
||||||
return cred
|
|
||||||
}
|
|
||||||
cred := deriveKey(credentials.SecretAccessKey, service, region, signingTime)
|
|
||||||
entry := derivedKey{
|
|
||||||
AccessKey: credentials.AccessKeyID,
|
|
||||||
Date: signingTime.Time,
|
|
||||||
Credential: cred,
|
|
||||||
}
|
|
||||||
s.values[key] = entry
|
|
||||||
s.mutex.Unlock()
|
|
||||||
|
|
||||||
return cred
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *derivedKeyCache) get(key string, credentials aws.Credentials, signingTime time.Time) ([]byte, bool) {
|
|
||||||
cacheEntry, ok := s.retrieveFromCache(key)
|
|
||||||
if ok && cacheEntry.AccessKey == credentials.AccessKeyID && isSameDay(signingTime, cacheEntry.Date) {
|
|
||||||
return cacheEntry.Credential, true
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *derivedKeyCache) retrieveFromCache(key string) (derivedKey, bool) {
|
|
||||||
if v, ok := s.values[key]; ok {
|
|
||||||
return v, true
|
|
||||||
}
|
|
||||||
return derivedKey{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// SigningKeyDeriver derives a signing key from a set of credentials
|
|
||||||
type SigningKeyDeriver struct {
|
|
||||||
cache derivedKeyCache
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSigningKeyDeriver returns a new SigningKeyDeriver
|
|
||||||
func NewSigningKeyDeriver() *SigningKeyDeriver {
|
|
||||||
return &SigningKeyDeriver{
|
|
||||||
cache: newDerivedKeyCache(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeriveKey returns a derived signing key from the given credentials to be used with SigV4 signing.
|
|
||||||
func (k *SigningKeyDeriver) DeriveKey(credential aws.Credentials, service, region string, signingTime SigningTime) []byte {
|
|
||||||
return k.cache.Get(credential, service, region, signingTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
func deriveKey(secret, service, region string, t SigningTime) []byte {
|
|
||||||
hmacDate := HMACSHA256([]byte("AWS4"+secret), []byte(t.ShortTimeFormat()))
|
|
||||||
hmacRegion := HMACSHA256(hmacDate, []byte(region))
|
|
||||||
hmacService := HMACSHA256(hmacRegion, []byte(service))
|
|
||||||
return HMACSHA256(hmacService, []byte("aws4_request"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSameDay(x, y time.Time) bool {
|
|
||||||
xYear, xMonth, xDay := x.Date()
|
|
||||||
yYear, yMonth, yDay := y.Date()
|
|
||||||
|
|
||||||
if xYear != yYear {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if xMonth != yMonth {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return xDay == yDay
|
|
||||||
}
|
|
|
@ -1,42 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/const.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
// Signature Version 4 (SigV4) Constants
|
|
||||||
const (
|
|
||||||
// EmptyStringSHA256 is the hex encoded sha256 value of an empty string
|
|
||||||
EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
|
|
||||||
|
|
||||||
// UnsignedPayload indicates that the request payload body is unsigned
|
|
||||||
UnsignedPayload = "UNSIGNED-PAYLOAD"
|
|
||||||
|
|
||||||
// AmzAlgorithmKey indicates the signing algorithm
|
|
||||||
AmzAlgorithmKey = "X-Amz-Algorithm"
|
|
||||||
|
|
||||||
// AmzSecurityTokenKey indicates the security token to be used with temporary credentials
|
|
||||||
AmzSecurityTokenKey = "X-Amz-Security-Token"
|
|
||||||
|
|
||||||
// AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z'
|
|
||||||
AmzDateKey = "X-Amz-Date"
|
|
||||||
|
|
||||||
// AmzCredentialKey is the access key ID and credential scope
|
|
||||||
AmzCredentialKey = "X-Amz-Credential"
|
|
||||||
|
|
||||||
// AmzSignedHeadersKey is the set of headers signed for the request
|
|
||||||
AmzSignedHeadersKey = "X-Amz-SignedHeaders"
|
|
||||||
|
|
||||||
// AmzSignatureKey is the query parameter to store the SigV4 signature
|
|
||||||
AmzSignatureKey = "X-Amz-Signature"
|
|
||||||
|
|
||||||
// TimeFormat is the time format to be used in the X-Amz-Date header or query parameter
|
|
||||||
TimeFormat = "20060102T150405Z"
|
|
||||||
|
|
||||||
// ShortTimeFormat is the shorten time format used in the credential scope
|
|
||||||
ShortTimeFormat = "20060102"
|
|
||||||
|
|
||||||
// ContentSHAKey is the SHA256 of request body
|
|
||||||
ContentSHAKey = "X-Amz-Content-Sha256"
|
|
||||||
|
|
||||||
// StreamingEventsPayload indicates that the request payload body is a signed event stream.
|
|
||||||
StreamingEventsPayload = "STREAMING-AWS4-HMAC-SHA256-EVENTS"
|
|
||||||
)
|
|
|
@ -1,90 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/header_rules.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Rules houses a set of Rule needed for validation of a
|
|
||||||
// string value
|
|
||||||
type Rules []Rule
|
|
||||||
|
|
||||||
// Rule interface allows for more flexible rules and just simply
|
|
||||||
// checks whether or not a value adheres to that Rule
|
|
||||||
type Rule interface {
|
|
||||||
IsValid(value string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid will iterate through all rules and see if any rules
|
|
||||||
// apply to the value and supports nested rules
|
|
||||||
func (r Rules) IsValid(value string) bool {
|
|
||||||
for _, rule := range r {
|
|
||||||
if rule.IsValid(value) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapRule generic Rule for maps
|
|
||||||
type MapRule map[string]struct{}
|
|
||||||
|
|
||||||
// IsValid for the map Rule satisfies whether it exists in the map
|
|
||||||
func (m MapRule) IsValid(value string) bool {
|
|
||||||
_, ok := m[value]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowList is a generic Rule for include listing
|
|
||||||
type AllowList struct {
|
|
||||||
Rule
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid for AllowList checks if the value is within the AllowList
|
|
||||||
func (w AllowList) IsValid(value string) bool {
|
|
||||||
return w.Rule.IsValid(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExcludeList is a generic Rule for exclude listing
|
|
||||||
type ExcludeList struct {
|
|
||||||
Rule
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid for AllowList checks if the value is within the AllowList
|
|
||||||
func (b ExcludeList) IsValid(value string) bool {
|
|
||||||
return !b.Rule.IsValid(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Patterns is a list of strings to match against
|
|
||||||
type Patterns []string
|
|
||||||
|
|
||||||
// IsValid for Patterns checks each pattern and returns if a match has
|
|
||||||
// been found
|
|
||||||
func (p Patterns) IsValid(value string) bool {
|
|
||||||
for _, pattern := range p {
|
|
||||||
if HasPrefixFold(value, pattern) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// InclusiveRules rules allow for rules to depend on one another
|
|
||||||
type InclusiveRules []Rule
|
|
||||||
|
|
||||||
// IsValid will return true if all rules are true
|
|
||||||
func (r InclusiveRules) IsValid(value string) bool {
|
|
||||||
for _, rule := range r {
|
|
||||||
if !rule.IsValid(value) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
|
|
||||||
// under Unicode case-folding.
|
|
||||||
func HasPrefixFold(s, prefix string) bool {
|
|
||||||
return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
|
|
||||||
}
|
|
|
@ -1,88 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/header.go
|
|
||||||
// with changes:
|
|
||||||
// * drop User-Agent header from ignored
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
// IgnoredPresignedHeaders is a list of headers that are ignored during signing
|
|
||||||
var IgnoredPresignedHeaders = Rules{
|
|
||||||
ExcludeList{
|
|
||||||
MapRule{
|
|
||||||
"Authorization": struct{}{},
|
|
||||||
"User-Agent": struct{}{},
|
|
||||||
"X-Amzn-Trace-Id": struct{}{},
|
|
||||||
"Expect": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoredHeaders is a list of headers that are ignored during signing
|
|
||||||
// drop User-Agent header to be compatible with aws sdk java v1.
|
|
||||||
var IgnoredHeaders = Rules{
|
|
||||||
ExcludeList{
|
|
||||||
MapRule{
|
|
||||||
"Authorization": struct{}{},
|
|
||||||
//"User-Agent": struct{}{},
|
|
||||||
"X-Amzn-Trace-Id": struct{}{},
|
|
||||||
"Expect": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequiredSignedHeaders is a allow list for Build canonical headers.
|
|
||||||
var RequiredSignedHeaders = Rules{
|
|
||||||
AllowList{
|
|
||||||
MapRule{
|
|
||||||
"Cache-Control": struct{}{},
|
|
||||||
"Content-Disposition": struct{}{},
|
|
||||||
"Content-Encoding": struct{}{},
|
|
||||||
"Content-Language": struct{}{},
|
|
||||||
"Content-Md5": struct{}{},
|
|
||||||
"Content-Type": struct{}{},
|
|
||||||
"Expires": struct{}{},
|
|
||||||
"If-Match": struct{}{},
|
|
||||||
"If-Modified-Since": struct{}{},
|
|
||||||
"If-None-Match": struct{}{},
|
|
||||||
"If-Unmodified-Since": struct{}{},
|
|
||||||
"Range": struct{}{},
|
|
||||||
"X-Amz-Acl": struct{}{},
|
|
||||||
"X-Amz-Copy-Source": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Match": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Range": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
|
||||||
"X-Amz-Expected-Bucket-Owner": struct{}{},
|
|
||||||
"X-Amz-Grant-Full-control": struct{}{},
|
|
||||||
"X-Amz-Grant-Read": struct{}{},
|
|
||||||
"X-Amz-Grant-Read-Acp": struct{}{},
|
|
||||||
"X-Amz-Grant-Write": struct{}{},
|
|
||||||
"X-Amz-Grant-Write-Acp": struct{}{},
|
|
||||||
"X-Amz-Metadata-Directive": struct{}{},
|
|
||||||
"X-Amz-Mfa": struct{}{},
|
|
||||||
"X-Amz-Request-Payer": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Context": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
|
||||||
"X-Amz-Storage-Class": struct{}{},
|
|
||||||
"X-Amz-Website-Redirect-Location": struct{}{},
|
|
||||||
"X-Amz-Content-Sha256": struct{}{},
|
|
||||||
"X-Amz-Tagging": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Patterns{"X-Amz-Object-Lock-"},
|
|
||||||
Patterns{"X-Amz-Meta-"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowedQueryHoisting is a allowed list for Build query headers. The boolean value
|
|
||||||
// represents whether or not it is a pattern.
|
|
||||||
var AllowedQueryHoisting = InclusiveRules{
|
|
||||||
ExcludeList{RequiredSignedHeaders},
|
|
||||||
Patterns{"X-Amz-"},
|
|
||||||
}
|
|
|
@ -1,65 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/header_test.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func TestAllowedQueryHoisting(t *testing.T) {
|
|
||||||
cases := map[string]struct {
|
|
||||||
Header string
|
|
||||||
ExpectHoist bool
|
|
||||||
}{
|
|
||||||
"object-lock": {
|
|
||||||
Header: "X-Amz-Object-Lock-Mode",
|
|
||||||
ExpectHoist: false,
|
|
||||||
},
|
|
||||||
"s3 metadata": {
|
|
||||||
Header: "X-Amz-Meta-SomeName",
|
|
||||||
ExpectHoist: false,
|
|
||||||
},
|
|
||||||
"another header": {
|
|
||||||
Header: "X-Amz-SomeOtherHeader",
|
|
||||||
ExpectHoist: true,
|
|
||||||
},
|
|
||||||
"non X-AMZ header": {
|
|
||||||
Header: "X-SomeOtherHeader",
|
|
||||||
ExpectHoist: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, c := range cases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
if e, a := c.ExpectHoist, AllowedQueryHoisting.IsValid(c.Header); e != a {
|
|
||||||
t.Errorf("expect hoist %v, was %v", e, a)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIgnoredHeaders(t *testing.T) {
|
|
||||||
cases := map[string]struct {
|
|
||||||
Header string
|
|
||||||
ExpectIgnored bool
|
|
||||||
}{
|
|
||||||
"expect": {
|
|
||||||
Header: "Expect",
|
|
||||||
ExpectIgnored: true,
|
|
||||||
},
|
|
||||||
"authorization": {
|
|
||||||
Header: "Authorization",
|
|
||||||
ExpectIgnored: true,
|
|
||||||
},
|
|
||||||
"X-AMZ header": {
|
|
||||||
Header: "X-Amz-Content-Sha256",
|
|
||||||
ExpectIgnored: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, c := range cases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
if e, a := c.ExpectIgnored, IgnoredHeaders.IsValid(c.Header); e == a {
|
|
||||||
t.Errorf("expect ignored %v, was %v", e, a)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,15 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/hmac.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HMACSHA256 computes a HMAC-SHA256 of data given the provided key.
|
|
||||||
func HMACSHA256(key []byte, data []byte) []byte {
|
|
||||||
hash := hmac.New(sha256.New, key)
|
|
||||||
hash.Write(data)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
|
@ -1,77 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/host.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SanitizeHostForHeader removes default port from host and updates request.Host
|
|
||||||
func SanitizeHostForHeader(r *http.Request) {
|
|
||||||
host := getHost(r)
|
|
||||||
port := portOnly(host)
|
|
||||||
if port != "" && isDefaultPort(r.URL.Scheme, port) {
|
|
||||||
r.Host = stripPort(host)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns host from request
|
|
||||||
func getHost(r *http.Request) string {
|
|
||||||
if r.Host != "" {
|
|
||||||
return r.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.URL.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hostname returns u.Host, without any port number.
|
|
||||||
//
|
|
||||||
// If Host is an IPv6 literal with a port number, Hostname returns the
|
|
||||||
// IPv6 literal without the square brackets. IPv6 literals may include
|
|
||||||
// a zone identifier.
|
|
||||||
//
|
|
||||||
// Copied from the Go 1.8 standard library (net/url)
|
|
||||||
func stripPort(hostport string) string {
|
|
||||||
colon := strings.IndexByte(hostport, ':')
|
|
||||||
if colon == -1 {
|
|
||||||
return hostport
|
|
||||||
}
|
|
||||||
if i := strings.IndexByte(hostport, ']'); i != -1 {
|
|
||||||
return strings.TrimPrefix(hostport[:i], "[")
|
|
||||||
}
|
|
||||||
return hostport[:colon]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Port returns the port part of u.Host, without the leading colon.
|
|
||||||
// If u.Host doesn't contain a port, Port returns an empty string.
|
|
||||||
//
|
|
||||||
// Copied from the Go 1.8 standard library (net/url)
|
|
||||||
func portOnly(hostport string) string {
|
|
||||||
colon := strings.IndexByte(hostport, ':')
|
|
||||||
if colon == -1 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
if i := strings.Index(hostport, "]:"); i != -1 {
|
|
||||||
return hostport[i+len("]:"):]
|
|
||||||
}
|
|
||||||
if strings.Contains(hostport, "]") {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return hostport[colon+len(":"):]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if the specified URI is using the standard port
|
|
||||||
// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
|
|
||||||
func isDefaultPort(scheme, port string) bool {
|
|
||||||
if port == "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
lowerCaseScheme := strings.ToLower(scheme)
|
|
||||||
if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
|
@ -1,15 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/scope.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import "strings"
|
|
||||||
|
|
||||||
// BuildCredentialScope builds the Signature Version 4 (SigV4) signing scope
|
|
||||||
func BuildCredentialScope(signingTime SigningTime, region, service string) string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
signingTime.ShortTimeFormat(),
|
|
||||||
region,
|
|
||||||
service,
|
|
||||||
"aws4_request",
|
|
||||||
}, "/")
|
|
||||||
}
|
|
|
@ -1,38 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/time.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing.
|
|
||||||
type SigningTime struct {
|
|
||||||
time.Time
|
|
||||||
timeFormat string
|
|
||||||
shortTimeFormat string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSigningTime creates a new SigningTime given a time.Time
|
|
||||||
func NewSigningTime(t time.Time) SigningTime {
|
|
||||||
return SigningTime{
|
|
||||||
Time: t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeFormat provides a time formatted in the X-Amz-Date format.
|
|
||||||
func (m *SigningTime) TimeFormat() string {
|
|
||||||
return m.format(&m.timeFormat, TimeFormat)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShortTimeFormat provides a time formatted of 20060102.
|
|
||||||
func (m *SigningTime) ShortTimeFormat() string {
|
|
||||||
return m.format(&m.shortTimeFormat, ShortTimeFormat)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *SigningTime) format(target *string, format string) string {
|
|
||||||
if len(*target) > 0 {
|
|
||||||
return *target
|
|
||||||
}
|
|
||||||
v := m.Time.Format(format)
|
|
||||||
*target = v
|
|
||||||
return v
|
|
||||||
}
|
|
|
@ -1,82 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/util.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const doubleSpace = " "
|
|
||||||
|
|
||||||
// StripExcessSpaces will rewrite the passed in slice's string values to not
|
|
||||||
// contain multiple side-by-side spaces.
|
|
||||||
func StripExcessSpaces(str string) string {
|
|
||||||
var j, k, l, m, spaces int
|
|
||||||
// Trim trailing spaces
|
|
||||||
for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trim leading spaces
|
|
||||||
for k = 0; k < j && str[k] == ' '; k++ {
|
|
||||||
}
|
|
||||||
str = str[k : j+1]
|
|
||||||
|
|
||||||
// Strip multiple spaces.
|
|
||||||
j = strings.Index(str, doubleSpace)
|
|
||||||
if j < 0 {
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := []byte(str)
|
|
||||||
for k, m, l = j, j, len(buf); k < l; k++ {
|
|
||||||
if buf[k] == ' ' {
|
|
||||||
if spaces == 0 {
|
|
||||||
// First space.
|
|
||||||
buf[m] = buf[k]
|
|
||||||
m++
|
|
||||||
}
|
|
||||||
spaces++
|
|
||||||
} else {
|
|
||||||
// End of multiple spaces.
|
|
||||||
spaces = 0
|
|
||||||
buf[m] = buf[k]
|
|
||||||
m++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(buf[:m])
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetURIPath returns the escaped URI component from the provided URL.
|
|
||||||
func GetURIPath(u *url.URL) string {
|
|
||||||
var uriPath string
|
|
||||||
|
|
||||||
if len(u.Opaque) > 0 {
|
|
||||||
const schemeSep, pathSep, queryStart = "//", "/", "?"
|
|
||||||
|
|
||||||
opaque := u.Opaque
|
|
||||||
// Cut off the query string if present.
|
|
||||||
if idx := strings.Index(opaque, queryStart); idx >= 0 {
|
|
||||||
opaque = opaque[:idx]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cutout the scheme separator if present.
|
|
||||||
if strings.Index(opaque, schemeSep) == 0 {
|
|
||||||
opaque = opaque[len(schemeSep):]
|
|
||||||
}
|
|
||||||
|
|
||||||
// capture URI path starting with first path separator.
|
|
||||||
if idx := strings.Index(opaque, pathSep); idx >= 0 {
|
|
||||||
uriPath = opaque[idx:]
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
uriPath = u.EscapedPath()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(uriPath) == 0 {
|
|
||||||
uriPath = "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
return uriPath
|
|
||||||
}
|
|
|
@ -1,160 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/util_test.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func lazyURLParse(v string) func() (*url.URL, error) {
|
|
||||||
return func() (*url.URL, error) {
|
|
||||||
return url.Parse(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetURIPath(t *testing.T) {
|
|
||||||
cases := map[string]struct {
|
|
||||||
getURL func() (*url.URL, error)
|
|
||||||
expect string
|
|
||||||
}{
|
|
||||||
// Cases
|
|
||||||
"with scheme": {
|
|
||||||
getURL: lazyURLParse("https://localhost:9000"),
|
|
||||||
expect: "/",
|
|
||||||
},
|
|
||||||
"no port, with scheme": {
|
|
||||||
getURL: lazyURLParse("https://localhost"),
|
|
||||||
expect: "/",
|
|
||||||
},
|
|
||||||
"without scheme": {
|
|
||||||
getURL: lazyURLParse("localhost:9000"),
|
|
||||||
expect: "/",
|
|
||||||
},
|
|
||||||
"without scheme, with path": {
|
|
||||||
getURL: lazyURLParse("localhost:9000/abc123"),
|
|
||||||
expect: "/abc123",
|
|
||||||
},
|
|
||||||
"without scheme, with separator": {
|
|
||||||
getURL: lazyURLParse("//localhost:9000"),
|
|
||||||
expect: "/",
|
|
||||||
},
|
|
||||||
"no port, without scheme, with separator": {
|
|
||||||
getURL: lazyURLParse("//localhost"),
|
|
||||||
expect: "/",
|
|
||||||
},
|
|
||||||
"without scheme, with separator, with path": {
|
|
||||||
getURL: lazyURLParse("//localhost:9000/abc123"),
|
|
||||||
expect: "/abc123",
|
|
||||||
},
|
|
||||||
"no port, without scheme, with separator, with path": {
|
|
||||||
getURL: lazyURLParse("//localhost/abc123"),
|
|
||||||
expect: "/abc123",
|
|
||||||
},
|
|
||||||
"opaque with query string": {
|
|
||||||
getURL: lazyURLParse("localhost:9000/abc123?efg=456"),
|
|
||||||
expect: "/abc123",
|
|
||||||
},
|
|
||||||
"failing test": {
|
|
||||||
getURL: func() (*url.URL, error) {
|
|
||||||
endpoint := "https://service.region.amazonaws.com"
|
|
||||||
req, _ := http.NewRequest("POST", endpoint, nil)
|
|
||||||
u := req.URL
|
|
||||||
|
|
||||||
u.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()"
|
|
||||||
|
|
||||||
query := u.Query()
|
|
||||||
query.Set("some-query-key", "value")
|
|
||||||
u.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
return u, nil
|
|
||||||
},
|
|
||||||
expect: "/bucket/key-._~,!@#$%^&*()",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, c := range cases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
u, err := c.getURL()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to get URL, %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
actual := GetURIPath(u)
|
|
||||||
if e, a := c.expect, actual; e != a {
|
|
||||||
t.Errorf("expect %v path, got %v", e, a)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStripExcessHeaders(t *testing.T) {
|
|
||||||
vals := []string{
|
|
||||||
"",
|
|
||||||
"123",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3 ",
|
|
||||||
" 1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 23",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 ",
|
|
||||||
" 1 2 ",
|
|
||||||
"12 3",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1abc123",
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := []string{
|
|
||||||
"",
|
|
||||||
"123",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 23",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2",
|
|
||||||
"1 2",
|
|
||||||
"12 3",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1abc123",
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(vals); i++ {
|
|
||||||
r := StripExcessSpaces(vals[i])
|
|
||||||
if e, a := expected[i], r; e != a {
|
|
||||||
t.Errorf("%d, expect %v, got %v", i, e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var stripExcessSpaceCases = []string{
|
|
||||||
`AWS4-HMAC-SHA256 Credential=AKIDFAKEIDFAKEID/20160628/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=1234567890abcdef1234567890abcdef1234567890abcdef`,
|
|
||||||
`123 321 123 321`,
|
|
||||||
` 123 321 123 321 `,
|
|
||||||
` 123 321 123 321 `,
|
|
||||||
"123",
|
|
||||||
"1 2 3",
|
|
||||||
" 1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 23",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 ",
|
|
||||||
" 1 2 ",
|
|
||||||
"12 3",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1abc123",
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkStripExcessSpaces(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
for _, v := range stripExcessSpaceCases {
|
|
||||||
StripExcessSpaces(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,120 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/stream.go
|
|
||||||
// with changes
|
|
||||||
// * add GetTrailingSignature
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/internal/v4"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EventStreamSigner is an AWS EventStream protocol signer.
|
|
||||||
type EventStreamSigner interface {
|
|
||||||
GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StreamSignerOptions is the configuration options for StreamSigner.
|
|
||||||
type StreamSignerOptions struct{}
|
|
||||||
|
|
||||||
// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads.
|
|
||||||
type StreamSigner struct {
|
|
||||||
options StreamSignerOptions
|
|
||||||
|
|
||||||
credentials aws.Credentials
|
|
||||||
service string
|
|
||||||
region string
|
|
||||||
|
|
||||||
prevSignature []byte
|
|
||||||
|
|
||||||
signingKeyDeriver *v4Internal.SigningKeyDeriver
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewStreamSigner returns a new AWS EventStream protocol signer.
|
|
||||||
func NewStreamSigner(credentials aws.Credentials, service, region string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner {
|
|
||||||
o := StreamSignerOptions{}
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&o)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &StreamSigner{
|
|
||||||
options: o,
|
|
||||||
credentials: credentials,
|
|
||||||
service: service,
|
|
||||||
region: region,
|
|
||||||
signingKeyDeriver: v4Internal.NewSigningKeyDeriver(),
|
|
||||||
prevSignature: seedSignature,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSignature signs the provided header and payload bytes.
|
|
||||||
func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) {
|
|
||||||
options := s.options
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
prevSignature := s.prevSignature
|
|
||||||
|
|
||||||
st := v4Internal.NewSigningTime(signingTime)
|
|
||||||
|
|
||||||
sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st)
|
|
||||||
|
|
||||||
scope := v4Internal.BuildCredentialScope(st, s.region, s.service)
|
|
||||||
|
|
||||||
stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st)
|
|
||||||
|
|
||||||
signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign))
|
|
||||||
s.prevSignature = signature
|
|
||||||
|
|
||||||
return signature, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
|
|
||||||
hash := sha256.New()
|
|
||||||
return strings.Join([]string{
|
|
||||||
"AWS4-HMAC-SHA256-PAYLOAD",
|
|
||||||
signingTime.TimeFormat(),
|
|
||||||
credentialScope,
|
|
||||||
hex.EncodeToString(previousSignature),
|
|
||||||
hex.EncodeToString(makeHash(hash, headers)),
|
|
||||||
hex.EncodeToString(makeHash(hash, payload)),
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTrailerSignature signs the provided header and payload bytes.
|
|
||||||
func (s *StreamSigner) GetTrailerSignature(payload []byte, signingTime time.Time) ([]byte, error) {
|
|
||||||
prevSignature := s.prevSignature
|
|
||||||
|
|
||||||
st := v4Internal.NewSigningTime(signingTime)
|
|
||||||
|
|
||||||
sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st)
|
|
||||||
|
|
||||||
scope := v4Internal.BuildCredentialScope(st, s.region, s.service)
|
|
||||||
|
|
||||||
stringToSign := s.buildEventStreamStringToSignTrailer(payload, prevSignature, scope, &st)
|
|
||||||
|
|
||||||
signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign))
|
|
||||||
s.prevSignature = signature
|
|
||||||
|
|
||||||
return signature, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StreamSigner) buildEventStreamStringToSignTrailer(payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
|
|
||||||
hash := sha256.New()
|
|
||||||
return strings.Join([]string{
|
|
||||||
"AWS4-HMAC-SHA256-TRAILER",
|
|
||||||
signingTime.TimeFormat(),
|
|
||||||
credentialScope,
|
|
||||||
hex.EncodeToString(previousSignature),
|
|
||||||
hex.EncodeToString(makeHash(hash, payload)),
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
|
@ -1,582 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/v4.go
|
|
||||||
// with changes:
|
|
||||||
// * using different headers for sign/presign
|
|
||||||
// * don't duplicate content-length as signed header
|
|
||||||
// * use copy of smithy-go encoding/httpbinding package
|
|
||||||
// * use zap.Logger instead of smithy-go/logging
|
|
||||||
|
|
||||||
// Package v4 implements the AWS signature version 4 algorithm (commonly known
|
|
||||||
// as SigV4).
|
|
||||||
//
|
|
||||||
// For more information about SigV4, see [Signing AWS API requests] in the IAM
|
|
||||||
// user guide.
|
|
||||||
//
|
|
||||||
// While this implementation CAN work in an external context, it is developed
|
|
||||||
// primarily for SDK use and you may encounter fringe behaviors around header
|
|
||||||
// canonicalization.
|
|
||||||
//
|
|
||||||
// # Pre-escaping a request URI
|
|
||||||
//
|
|
||||||
// AWS v4 signature validation requires that the canonical string's URI path
|
|
||||||
// component must be the escaped form of the HTTP request's path.
|
|
||||||
//
|
|
||||||
// The Go HTTP client will perform escaping automatically on the HTTP request.
|
|
||||||
// This may cause signature validation errors because the request differs from
|
|
||||||
// the URI path or query from which the signature was generated.
|
|
||||||
//
|
|
||||||
// Because of this, we recommend that you explicitly escape the request when
|
|
||||||
// using this signer outside of the SDK to prevent possible signature mismatch.
|
|
||||||
// This can be done by setting URL.Opaque on the request. The signer will
|
|
||||||
// prefer that value, falling back to the return of URL.EscapedPath if unset.
|
|
||||||
//
|
|
||||||
// When setting URL.Opaque you must do so in the form of:
|
|
||||||
//
|
|
||||||
// "//<hostname>/<path>"
|
|
||||||
//
|
|
||||||
// // e.g.
|
|
||||||
// "//example.com/some/path"
|
|
||||||
//
|
|
||||||
// The leading "//" and hostname are required or the escaping will not work
|
|
||||||
// correctly.
|
|
||||||
//
|
|
||||||
// The TestStandaloneSign unit test provides a complete example of using the
|
|
||||||
// signer outside of the SDK and pre-escaping the URI path.
|
|
||||||
//
|
|
||||||
// [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"net/http"
|
|
||||||
"net/textproto"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/smithy/encoding/httpbinding"
|
|
||||||
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/internal/v4"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
signingAlgorithm = "AWS4-HMAC-SHA256"
|
|
||||||
authorizationHeader = "Authorization"
|
|
||||||
|
|
||||||
// Version of signing v4
|
|
||||||
Version = "SigV4"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests
|
|
||||||
type HTTPSigner interface {
|
|
||||||
SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*SignerOptions)) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type keyDerivator interface {
|
|
||||||
DeriveKey(credential aws.Credentials, service, region string, signingTime v4Internal.SigningTime) []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignerOptions is the SigV4 Signer options.
|
|
||||||
type SignerOptions struct {
|
|
||||||
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
|
|
||||||
// request header to the request's query string. This is most commonly used
|
|
||||||
// with pre-signed requests preventing headers from being added to the
|
|
||||||
// request's query string.
|
|
||||||
DisableHeaderHoisting bool
|
|
||||||
|
|
||||||
// Disables the automatic escaping of the URI path of the request for the
|
|
||||||
// siganture's canonical string's path. For services that do not need additional
|
|
||||||
// escaping then use this to disable the signer escaping the path.
|
|
||||||
//
|
|
||||||
// S3 is an example of a service that does not need additional escaping.
|
|
||||||
//
|
|
||||||
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
|
||||||
DisableURIPathEscaping bool
|
|
||||||
|
|
||||||
// The logger to send log messages to.
|
|
||||||
Logger *zap.Logger
|
|
||||||
|
|
||||||
// Enable logging of signed requests.
|
|
||||||
// This will enable logging of the canonical request, the string to sign, and for presigning the subsequent
|
|
||||||
// presigned URL.
|
|
||||||
LogSigning bool
|
|
||||||
|
|
||||||
// Disables setting the session token on the request as part of signing
|
|
||||||
// through X-Amz-Security-Token. This is needed for variations of v4 that
|
|
||||||
// present the token elsewhere.
|
|
||||||
DisableSessionToken bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Signer applies AWS v4 signing to given request. Use this to sign requests
|
|
||||||
// that need to be signed with AWS V4 Signatures.
|
|
||||||
type Signer struct {
|
|
||||||
options SignerOptions
|
|
||||||
keyDerivator keyDerivator
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSigner returns a new SigV4 Signer
|
|
||||||
func NewSigner(optFns ...func(signer *SignerOptions)) *Signer {
|
|
||||||
options := SignerOptions{}
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Signer{options: options, keyDerivator: v4Internal.NewSigningKeyDeriver()}
|
|
||||||
}
|
|
||||||
|
|
||||||
type httpSigner struct {
|
|
||||||
Request *http.Request
|
|
||||||
ServiceName string
|
|
||||||
Region string
|
|
||||||
Time v4Internal.SigningTime
|
|
||||||
Credentials aws.Credentials
|
|
||||||
KeyDerivator keyDerivator
|
|
||||||
IsPreSign bool
|
|
||||||
|
|
||||||
PayloadHash string
|
|
||||||
|
|
||||||
DisableHeaderHoisting bool
|
|
||||||
DisableURIPathEscaping bool
|
|
||||||
DisableSessionToken bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) Build() (signedRequest, error) {
|
|
||||||
req := s.Request
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
headers := req.Header
|
|
||||||
|
|
||||||
s.setRequiredSigningFields(headers, query)
|
|
||||||
|
|
||||||
// Sort Each Query Key's Values
|
|
||||||
for key := range query {
|
|
||||||
sort.Strings(query[key])
|
|
||||||
}
|
|
||||||
|
|
||||||
v4Internal.SanitizeHostForHeader(req)
|
|
||||||
|
|
||||||
credentialScope := s.buildCredentialScope()
|
|
||||||
credentialStr := s.Credentials.AccessKeyID + "/" + credentialScope
|
|
||||||
if s.IsPreSign {
|
|
||||||
query.Set(v4Internal.AmzCredentialKey, credentialStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
unsignedHeaders := headers
|
|
||||||
if s.IsPreSign && !s.DisableHeaderHoisting {
|
|
||||||
var urlValues url.Values
|
|
||||||
urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, headers)
|
|
||||||
for k := range urlValues {
|
|
||||||
query[k] = urlValues[k]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
host := req.URL.Host
|
|
||||||
if len(req.Host) > 0 {
|
|
||||||
host = req.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
signedHeaders http.Header
|
|
||||||
signedHeadersStr string
|
|
||||||
canonicalHeaderStr string
|
|
||||||
)
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredPresignedHeaders, unsignedHeaders, s.Request.ContentLength)
|
|
||||||
} else {
|
|
||||||
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength)
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
query.Set(v4Internal.AmzSignedHeadersKey, signedHeadersStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
var rawQuery strings.Builder
|
|
||||||
rawQuery.WriteString(strings.Replace(query.Encode(), "+", "%20", -1))
|
|
||||||
|
|
||||||
canonicalURI := v4Internal.GetURIPath(req.URL)
|
|
||||||
if !s.DisableURIPathEscaping {
|
|
||||||
canonicalURI = httpbinding.EscapePath(canonicalURI, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
canonicalString := s.buildCanonicalString(
|
|
||||||
req.Method,
|
|
||||||
canonicalURI,
|
|
||||||
rawQuery.String(),
|
|
||||||
signedHeadersStr,
|
|
||||||
canonicalHeaderStr,
|
|
||||||
)
|
|
||||||
|
|
||||||
strToSign := s.buildStringToSign(credentialScope, canonicalString)
|
|
||||||
signingSignature, err := s.buildSignature(strToSign)
|
|
||||||
if err != nil {
|
|
||||||
return signedRequest{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
rawQuery.WriteString("&X-Amz-Signature=")
|
|
||||||
rawQuery.WriteString(signingSignature)
|
|
||||||
} else {
|
|
||||||
headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature))
|
|
||||||
}
|
|
||||||
|
|
||||||
req.URL.RawQuery = rawQuery.String()
|
|
||||||
|
|
||||||
return signedRequest{
|
|
||||||
Request: req,
|
|
||||||
SignedHeaders: signedHeaders,
|
|
||||||
CanonicalString: canonicalString,
|
|
||||||
StringToSign: strToSign,
|
|
||||||
PreSigned: s.IsPreSign,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string {
|
|
||||||
const credential = "Credential="
|
|
||||||
const signedHeaders = "SignedHeaders="
|
|
||||||
const signature = "Signature="
|
|
||||||
const commaSpace = ", "
|
|
||||||
|
|
||||||
var parts strings.Builder
|
|
||||||
parts.Grow(len(signingAlgorithm) + 1 +
|
|
||||||
len(credential) + len(credentialStr) + 2 +
|
|
||||||
len(signedHeaders) + len(signedHeadersStr) + 2 +
|
|
||||||
len(signature) + len(signingSignature),
|
|
||||||
)
|
|
||||||
parts.WriteString(signingAlgorithm)
|
|
||||||
parts.WriteRune(' ')
|
|
||||||
parts.WriteString(credential)
|
|
||||||
parts.WriteString(credentialStr)
|
|
||||||
parts.WriteString(commaSpace)
|
|
||||||
parts.WriteString(signedHeaders)
|
|
||||||
parts.WriteString(signedHeadersStr)
|
|
||||||
parts.WriteString(commaSpace)
|
|
||||||
parts.WriteString(signature)
|
|
||||||
parts.WriteString(signingSignature)
|
|
||||||
return parts.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignHTTP signs AWS v4 requests with the provided payload hash, service name, region the
|
|
||||||
// request is made to, and time the request is signed at. The signTime allows
|
|
||||||
// you to specify that a request is signed for the future, and cannot be
|
|
||||||
// used until then.
|
|
||||||
//
|
|
||||||
// The payloadHash is the hex encoded SHA-256 hash of the request payload, and
|
|
||||||
// must be provided. Even if the request has no payload (aka body). If the
|
|
||||||
// request has no payload you should use the hex encoded SHA-256 of an empty
|
|
||||||
// string as the payloadHash value.
|
|
||||||
//
|
|
||||||
// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
|
||||||
//
|
|
||||||
// Some services such as Amazon S3 accept alternative values for the payload
|
|
||||||
// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
|
|
||||||
// included in the request signature.
|
|
||||||
//
|
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
|
|
||||||
//
|
|
||||||
// Sign differs from Presign in that it will sign the request using HTTP
|
|
||||||
// header values. This type of signing is intended for http.Request values that
|
|
||||||
// will not be shared, or are shared in a way the header values on the request
|
|
||||||
// will not be lost.
|
|
||||||
//
|
|
||||||
// The passed in request will be modified in place.
|
|
||||||
func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(options *SignerOptions)) error {
|
|
||||||
options := s.options
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
signer := &httpSigner{
|
|
||||||
Request: r,
|
|
||||||
PayloadHash: payloadHash,
|
|
||||||
ServiceName: service,
|
|
||||||
Region: region,
|
|
||||||
Credentials: credentials,
|
|
||||||
Time: v4Internal.NewSigningTime(signingTime.UTC()),
|
|
||||||
DisableHeaderHoisting: options.DisableHeaderHoisting,
|
|
||||||
DisableURIPathEscaping: options.DisableURIPathEscaping,
|
|
||||||
DisableSessionToken: options.DisableSessionToken,
|
|
||||||
KeyDerivator: s.keyDerivator,
|
|
||||||
}
|
|
||||||
|
|
||||||
signedRequest, err := signer.Build()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
logSigningInfo(ctx, options, &signedRequest, false)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PresignHTTP signs AWS v4 requests with the payload hash, service name, region
|
|
||||||
// the request is made to, and time the request is signed at. The signTime
|
|
||||||
// allows you to specify that a request is signed for the future, and cannot
|
|
||||||
// be used until then.
|
|
||||||
//
|
|
||||||
// Returns the signed URL and the map of HTTP headers that were included in the
|
|
||||||
// signature or an error if signing the request failed. For presigned requests
|
|
||||||
// these headers and their values must be included on the HTTP request when it
|
|
||||||
// is made. This is helpful to know what header values need to be shared with
|
|
||||||
// the party the presigned request will be distributed to.
|
|
||||||
//
|
|
||||||
// The payloadHash is the hex encoded SHA-256 hash of the request payload, and
|
|
||||||
// must be provided. Even if the request has no payload (aka body). If the
|
|
||||||
// request has no payload you should use the hex encoded SHA-256 of an empty
|
|
||||||
// string as the payloadHash value.
|
|
||||||
//
|
|
||||||
// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
|
||||||
//
|
|
||||||
// Some services such as Amazon S3 accept alternative values for the payload
|
|
||||||
// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
|
|
||||||
// included in the request signature.
|
|
||||||
//
|
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
|
|
||||||
//
|
|
||||||
// PresignHTTP differs from SignHTTP in that it will sign the request using
|
|
||||||
// query string instead of header values. This allows you to share the
|
|
||||||
// Presigned Request's URL with third parties, or distribute it throughout your
|
|
||||||
// system with minimal dependencies.
|
|
||||||
//
|
|
||||||
// PresignHTTP will not set the expires time of the presigned request
|
|
||||||
// automatically. To specify the expire duration for a request add the
|
|
||||||
// "X-Amz-Expires" query parameter on the request with the value as the
|
|
||||||
// duration in seconds the presigned URL should be considered valid for. This
|
|
||||||
// parameter is not used by all AWS services, and is most notable used by
|
|
||||||
// Amazon S3 APIs.
|
|
||||||
//
|
|
||||||
// expires := 20 * time.Minute
|
|
||||||
// query := req.URL.Query()
|
|
||||||
// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10))
|
|
||||||
// req.URL.RawQuery = query.Encode()
|
|
||||||
//
|
|
||||||
// This method does not modify the provided request.
|
|
||||||
func (s *Signer) PresignHTTP(
|
|
||||||
ctx context.Context, credentials aws.Credentials, r *http.Request,
|
|
||||||
payloadHash string, service string, region string, signingTime time.Time,
|
|
||||||
optFns ...func(*SignerOptions),
|
|
||||||
) (signedURI string, signedHeaders http.Header, err error) {
|
|
||||||
options := s.options
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
signer := &httpSigner{
|
|
||||||
Request: r.Clone(r.Context()),
|
|
||||||
PayloadHash: payloadHash,
|
|
||||||
ServiceName: service,
|
|
||||||
Region: region,
|
|
||||||
Credentials: credentials,
|
|
||||||
Time: v4Internal.NewSigningTime(signingTime.UTC()),
|
|
||||||
IsPreSign: true,
|
|
||||||
DisableHeaderHoisting: options.DisableHeaderHoisting,
|
|
||||||
DisableURIPathEscaping: options.DisableURIPathEscaping,
|
|
||||||
DisableSessionToken: options.DisableSessionToken,
|
|
||||||
KeyDerivator: s.keyDerivator,
|
|
||||||
}
|
|
||||||
|
|
||||||
signedRequest, err := signer.Build()
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
logSigningInfo(ctx, options, &signedRequest, true)
|
|
||||||
|
|
||||||
signedHeaders = make(http.Header)
|
|
||||||
|
|
||||||
// For the signed headers we canonicalize the header keys in the returned map.
|
|
||||||
// This avoids situations where can standard library double headers like host header. For example the standard
|
|
||||||
// library will set the Host header, even if it is present in lower-case form.
|
|
||||||
for k, v := range signedRequest.SignedHeaders {
|
|
||||||
key := textproto.CanonicalMIMEHeaderKey(k)
|
|
||||||
signedHeaders[key] = append(signedHeaders[key], v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return signedRequest.Request.URL.String(), signedHeaders, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildCredentialScope() string {
|
|
||||||
return v4Internal.BuildCredentialScope(s.Time, s.Region, s.ServiceName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) {
|
|
||||||
query := url.Values{}
|
|
||||||
unsignedHeaders := http.Header{}
|
|
||||||
|
|
||||||
// A list of headers to be converted to lower case to mitigate a limitation from S3
|
|
||||||
lowerCaseHeaders := map[string]string{
|
|
||||||
"X-Amz-Expected-Bucket-Owner": "x-amz-expected-bucket-owner", // see #2508
|
|
||||||
"X-Amz-Request-Payer": "x-amz-request-payer", // see #2764
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, h := range header {
|
|
||||||
if newKey, ok := lowerCaseHeaders[k]; ok {
|
|
||||||
k = newKey
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.IsValid(k) {
|
|
||||||
query[k] = h
|
|
||||||
} else {
|
|
||||||
unsignedHeaders[k] = h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return query, unsignedHeaders
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) {
|
|
||||||
signed = make(http.Header)
|
|
||||||
|
|
||||||
var headers []string
|
|
||||||
const hostHeader = "host"
|
|
||||||
headers = append(headers, hostHeader)
|
|
||||||
signed[hostHeader] = append(signed[hostHeader], host)
|
|
||||||
|
|
||||||
//const contentLengthHeader = "content-length"
|
|
||||||
//if length > 0 {
|
|
||||||
// headers = append(headers, contentLengthHeader)
|
|
||||||
// signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10))
|
|
||||||
//}
|
|
||||||
|
|
||||||
for k, v := range header {
|
|
||||||
if !rule.IsValid(k) {
|
|
||||||
continue // ignored header
|
|
||||||
}
|
|
||||||
//if strings.EqualFold(k, contentLengthHeader) {
|
|
||||||
// // prevent signing already handled content-length header.
|
|
||||||
// continue
|
|
||||||
//}
|
|
||||||
|
|
||||||
lowerCaseKey := strings.ToLower(k)
|
|
||||||
if _, ok := signed[lowerCaseKey]; ok {
|
|
||||||
// include additional values
|
|
||||||
signed[lowerCaseKey] = append(signed[lowerCaseKey], v...)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
headers = append(headers, lowerCaseKey)
|
|
||||||
signed[lowerCaseKey] = v
|
|
||||||
}
|
|
||||||
sort.Strings(headers)
|
|
||||||
|
|
||||||
signedHeaders = strings.Join(headers, ";")
|
|
||||||
|
|
||||||
var canonicalHeaders strings.Builder
|
|
||||||
n := len(headers)
|
|
||||||
const colon = ':'
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
if headers[i] == hostHeader {
|
|
||||||
canonicalHeaders.WriteString(hostHeader)
|
|
||||||
canonicalHeaders.WriteRune(colon)
|
|
||||||
canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host))
|
|
||||||
} else {
|
|
||||||
canonicalHeaders.WriteString(headers[i])
|
|
||||||
canonicalHeaders.WriteRune(colon)
|
|
||||||
// Trim out leading, trailing, and dedup inner spaces from signed header values.
|
|
||||||
values := signed[headers[i]]
|
|
||||||
for j, v := range values {
|
|
||||||
cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v))
|
|
||||||
canonicalHeaders.WriteString(cleanedValue)
|
|
||||||
if j < len(values)-1 {
|
|
||||||
canonicalHeaders.WriteRune(',')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
canonicalHeaders.WriteRune('\n')
|
|
||||||
}
|
|
||||||
canonicalHeadersStr = canonicalHeaders.String()
|
|
||||||
|
|
||||||
return signed, signedHeaders, canonicalHeadersStr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
method,
|
|
||||||
uri,
|
|
||||||
query,
|
|
||||||
canonicalHeaders,
|
|
||||||
signedHeaders,
|
|
||||||
s.PayloadHash,
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
signingAlgorithm,
|
|
||||||
s.Time.TimeFormat(),
|
|
||||||
credentialScope,
|
|
||||||
hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))),
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeHash(hash hash.Hash, b []byte) []byte {
|
|
||||||
hash.Reset()
|
|
||||||
hash.Write(b)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildSignature(strToSign string) (string, error) {
|
|
||||||
key := s.KeyDerivator.DeriveKey(s.Credentials, s.ServiceName, s.Region, s.Time)
|
|
||||||
return hex.EncodeToString(v4Internal.HMACSHA256(key, []byte(strToSign))), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) {
|
|
||||||
amzDate := s.Time.TimeFormat()
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm)
|
|
||||||
sessionToken := s.Credentials.SessionToken
|
|
||||||
if !s.DisableSessionToken && len(sessionToken) > 0 {
|
|
||||||
query.Set("X-Amz-Security-Token", sessionToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
query.Set(v4Internal.AmzDateKey, amzDate)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate)
|
|
||||||
|
|
||||||
if !s.DisableSessionToken && len(s.Credentials.SessionToken) > 0 {
|
|
||||||
headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func logSigningInfo(_ context.Context, options SignerOptions, request *signedRequest, isPresign bool) {
|
|
||||||
if !options.LogSigning {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
signedURLMsg := ""
|
|
||||||
if isPresign {
|
|
||||||
signedURLMsg = fmt.Sprintf(logSignedURLMsg, request.Request.URL.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.Logger != nil {
|
|
||||||
options.Logger.Debug(fmt.Sprintf(logSignInfoMsg, request.CanonicalString, request.StringToSign, signedURLMsg))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type signedRequest struct {
|
|
||||||
Request *http.Request
|
|
||||||
SignedHeaders http.Header
|
|
||||||
CanonicalString string
|
|
||||||
StringToSign string
|
|
||||||
PreSigned bool
|
|
||||||
}
|
|
||||||
|
|
||||||
const logSignInfoMsg = `Request Signature:
|
|
||||||
---[ CANONICAL STRING ]-----------------------------
|
|
||||||
%s
|
|
||||||
---[ STRING TO SIGN ]--------------------------------
|
|
||||||
%s%s
|
|
||||||
-----------------------------------------------------`
|
|
||||||
const logSignedURLMsg = `
|
|
||||||
---[ SIGNED URL ]------------------------------------
|
|
||||||
%s`
|
|
|
@ -1,370 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/v4_test.go
|
|
||||||
// with changes:
|
|
||||||
// * don't duplicate content-length as signed header
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/internal/v4"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
)
|
|
||||||
|
|
||||||
var testCredentials = aws.Credentials{AccessKeyID: "AKID", SecretAccessKey: "SECRET", SessionToken: "SESSION"}
|
|
||||||
|
|
||||||
func buildRequest(serviceName, region, body string) (*http.Request, string) {
|
|
||||||
reader := strings.NewReader(body)
|
|
||||||
return buildRequestWithBodyReader(serviceName, region, reader)
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildRequestWithBodyReader(serviceName, region string, body io.Reader) (*http.Request, string) {
|
|
||||||
var bodyLen int
|
|
||||||
|
|
||||||
type lenner interface {
|
|
||||||
Len() int
|
|
||||||
}
|
|
||||||
if lr, ok := body.(lenner); ok {
|
|
||||||
bodyLen = lr.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
|
|
||||||
req, _ := http.NewRequest("POST", endpoint, body)
|
|
||||||
req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()"
|
|
||||||
req.Header.Set("X-Amz-Target", "prefix.Operation")
|
|
||||||
req.Header.Set("Content-Type", "application/x-amz-json-1.0")
|
|
||||||
|
|
||||||
if bodyLen > 0 {
|
|
||||||
req.ContentLength = int64(bodyLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
|
|
||||||
req.Header.Add("X-Amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
|
|
||||||
req.Header.Add("X-amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
|
|
||||||
|
|
||||||
h := sha256.New()
|
|
||||||
_, _ = io.Copy(h, body)
|
|
||||||
payloadHash := hex.EncodeToString(h.Sum(nil))
|
|
||||||
|
|
||||||
return req, payloadHash
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPresignRequest(t *testing.T) {
|
|
||||||
req, body := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
req.Header.Set("Content-Length", "2")
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
query.Set("X-Amz-Expires", "300")
|
|
||||||
req.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
signer := NewSigner()
|
|
||||||
signed, headers, err := signer.PresignHTTP(context.Background(), testCredentials, req, body, "dynamodb", "us-east-1", time.Unix(0, 0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedDate := "19700101T000000Z"
|
|
||||||
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
|
|
||||||
expectedSig := "122f0b9e091e4ba84286097e2b3404a1f1f4c4aad479adda95b7dff0ccbe5581"
|
|
||||||
expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request"
|
|
||||||
expectedTarget := "prefix.Operation"
|
|
||||||
|
|
||||||
q, err := url.ParseQuery(signed[strings.Index(signed, "?"):])
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if e, a := expectedSig, q.Get("X-Amz-Signature"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedCred, q.Get("X-Amz-Credential"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
|
|
||||||
t.Errorf("expect %v to be empty", a)
|
|
||||||
}
|
|
||||||
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, h := range strings.Split(expectedHeaders, ";") {
|
|
||||||
v := headers.Get(h)
|
|
||||||
if len(v) == 0 {
|
|
||||||
t.Errorf("expect %v, to be present in header map", h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPresignBodyWithArrayRequest(t *testing.T) {
|
|
||||||
req, body := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
|
|
||||||
req.Header.Set("Content-Length", "2")
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
query.Set("X-Amz-Expires", "300")
|
|
||||||
req.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
signer := NewSigner()
|
|
||||||
signed, headers, err := signer.PresignHTTP(context.Background(), testCredentials, req, body, "dynamodb", "us-east-1", time.Unix(0, 0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
q, err := url.ParseQuery(signed[strings.Index(signed, "?"):])
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedDate := "19700101T000000Z"
|
|
||||||
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
|
|
||||||
expectedSig := "e3ac55addee8711b76c6d608d762cff285fe8b627a057f8b5ec9268cf82c08b1"
|
|
||||||
expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request"
|
|
||||||
expectedTarget := "prefix.Operation"
|
|
||||||
|
|
||||||
if e, a := expectedSig, q.Get("X-Amz-Signature"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedCred, q.Get("X-Amz-Credential"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
|
|
||||||
t.Errorf("expect %v to be empty, was not", a)
|
|
||||||
}
|
|
||||||
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, h := range strings.Split(expectedHeaders, ";") {
|
|
||||||
v := headers.Get(h)
|
|
||||||
if len(v) == 0 {
|
|
||||||
t.Errorf("expect %v, to be present in header map", h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignRequest(t *testing.T) {
|
|
||||||
req, body := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
req.Header.Set("Content-Length", "2")
|
|
||||||
signer := NewSigner()
|
|
||||||
err := signer.SignHTTP(context.Background(), testCredentials, req, body, "dynamodb", "us-east-1", time.Unix(0, 0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedDate := "19700101T000000Z"
|
|
||||||
expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-security-token;x-amz-target, Signature=a518299330494908a70222cec6899f6f32f297f8595f6df1776d998936652ad9"
|
|
||||||
|
|
||||||
q := req.Header
|
|
||||||
if e, a := expectedSig, q.Get("Authorization"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuildCanonicalRequest(t *testing.T) {
|
|
||||||
req, _ := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
|
|
||||||
|
|
||||||
ctx := &httpSigner{
|
|
||||||
ServiceName: "dynamodb",
|
|
||||||
Region: "us-east-1",
|
|
||||||
Request: req,
|
|
||||||
Time: v4Internal.NewSigningTime(time.Now()),
|
|
||||||
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
|
|
||||||
}
|
|
||||||
|
|
||||||
build, err := ctx.Build()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := "https://example.org/bucket/key-._~,!@#$%^&*()?Foo=a&Foo=m&Foo=o&Foo=z"
|
|
||||||
if e, a := expected, build.Request.URL.String(); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSigner_SignHTTP_NoReplaceRequestBody(t *testing.T) {
|
|
||||||
req, bodyHash := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
req.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
|
|
||||||
|
|
||||||
s := NewSigner()
|
|
||||||
|
|
||||||
origBody := req.Body
|
|
||||||
|
|
||||||
err := s.SignHTTP(context.Background(), testCredentials, req, bodyHash, "dynamodb", "us-east-1", time.Now())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.Body != origBody {
|
|
||||||
t.Errorf("expect request body to not be chagned")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRequestHost(t *testing.T) {
|
|
||||||
req, _ := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
|
|
||||||
req.Host = "myhost"
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
query.Set("X-Amz-Expires", "5")
|
|
||||||
req.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
ctx := &httpSigner{
|
|
||||||
ServiceName: "dynamodb",
|
|
||||||
Region: "us-east-1",
|
|
||||||
Request: req,
|
|
||||||
Time: v4Internal.NewSigningTime(time.Now()),
|
|
||||||
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
|
|
||||||
}
|
|
||||||
|
|
||||||
build, err := ctx.Build()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.Contains(build.CanonicalString, "host:"+req.Host) {
|
|
||||||
t.Errorf("canonical host header invalid")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSign_buildCanonicalHeadersContentLengthPresent(t *testing.T) {
|
|
||||||
body := `{"description": "this is a test"}`
|
|
||||||
req, _ := buildRequest("dynamodb", "us-east-1", body)
|
|
||||||
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
|
|
||||||
req.Host = "myhost"
|
|
||||||
|
|
||||||
contentLength := fmt.Sprintf("%d", len([]byte(body)))
|
|
||||||
req.Header.Add("Content-Length", contentLength)
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
query.Set("X-Amz-Expires", "5")
|
|
||||||
req.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
ctx := &httpSigner{
|
|
||||||
ServiceName: "dynamodb",
|
|
||||||
Region: "us-east-1",
|
|
||||||
Request: req,
|
|
||||||
Time: v4Internal.NewSigningTime(time.Now()),
|
|
||||||
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
|
|
||||||
}
|
|
||||||
|
|
||||||
build, err := ctx.Build()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.Contains(build.CanonicalString, "content-length:"+contentLength+"\n") {
|
|
||||||
t.Errorf("canonical header content-length invalid")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSign_buildCanonicalHeaders(t *testing.T) {
|
|
||||||
serviceName := "mockAPI"
|
|
||||||
region := "mock-region"
|
|
||||||
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
|
|
||||||
|
|
||||||
req, err := http.NewRequest("POST", endpoint, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create request, %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("FooInnerSpace", " inner space ")
|
|
||||||
req.Header.Set("FooLeadingSpace", " leading-space")
|
|
||||||
req.Header.Add("FooMultipleSpace", "no-space")
|
|
||||||
req.Header.Add("FooMultipleSpace", "\ttab-space")
|
|
||||||
req.Header.Add("FooMultipleSpace", "trailing-space ")
|
|
||||||
req.Header.Set("FooNoSpace", "no-space")
|
|
||||||
req.Header.Set("FooTabSpace", "\ttab-space\t")
|
|
||||||
req.Header.Set("FooTrailingSpace", "trailing-space ")
|
|
||||||
req.Header.Set("FooWrappedSpace", " wrapped-space ")
|
|
||||||
|
|
||||||
ctx := &httpSigner{
|
|
||||||
ServiceName: serviceName,
|
|
||||||
Region: region,
|
|
||||||
Request: req,
|
|
||||||
Time: v4Internal.NewSigningTime(time.Date(2021, 10, 20, 12, 42, 0, 0, time.UTC)),
|
|
||||||
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
|
|
||||||
}
|
|
||||||
|
|
||||||
build, err := ctx.Build()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectCanonicalString := strings.Join([]string{
|
|
||||||
`POST`,
|
|
||||||
`/`,
|
|
||||||
``,
|
|
||||||
`fooinnerspace:inner space`,
|
|
||||||
`fooleadingspace:leading-space`,
|
|
||||||
`foomultiplespace:no-space,tab-space,trailing-space`,
|
|
||||||
`foonospace:no-space`,
|
|
||||||
`footabspace:tab-space`,
|
|
||||||
`footrailingspace:trailing-space`,
|
|
||||||
`foowrappedspace:wrapped-space`,
|
|
||||||
`host:mockAPI.mock-region.amazonaws.com`,
|
|
||||||
`x-amz-date:20211020T124200Z`,
|
|
||||||
``,
|
|
||||||
`fooinnerspace;fooleadingspace;foomultiplespace;foonospace;footabspace;footrailingspace;foowrappedspace;host;x-amz-date`,
|
|
||||||
``,
|
|
||||||
}, "\n")
|
|
||||||
if diff := cmpDiff(expectCanonicalString, build.CanonicalString); diff != "" {
|
|
||||||
t.Errorf("expect match, got\n%s", diff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkPresignRequest(b *testing.B) {
|
|
||||||
signer := NewSigner()
|
|
||||||
req, bodyHash := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
query.Set("X-Amz-Expires", "5")
|
|
||||||
req.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
signer.PresignHTTP(context.Background(), testCredentials, req, bodyHash, "dynamodb", "us-east-1", time.Now())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSignRequest(b *testing.B) {
|
|
||||||
signer := NewSigner()
|
|
||||||
req, bodyHash := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
signer.SignHTTP(context.Background(), testCredentials, req, bodyHash, "dynamodb", "us-east-1", time.Now())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func cmpDiff(e, a interface{}) string {
|
|
||||||
if !reflect.DeepEqual(e, a) {
|
|
||||||
return fmt.Sprintf("%v != %v", e, a)
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
70
api/cache/access_control.go
vendored
70
api/cache/access_control.go
vendored
|
@ -1,70 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AccessControlCache provides lru cache for objects.
|
|
||||||
type AccessControlCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultAccessControlCacheLifetime is a default lifetime of entries in access' cache.
|
|
||||||
DefaultAccessControlCacheLifetime = 1 * time.Minute
|
|
||||||
// DefaultAccessControlCacheSize is a default maximum number of entries in access' cache.
|
|
||||||
DefaultAccessControlCacheSize = 1e5
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultAccessControlConfig returns new default cache expiration values.
|
|
||||||
func DefaultAccessControlConfig(logger *zap.Logger) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: DefaultAccessControlCacheSize,
|
|
||||||
Lifetime: DefaultAccessControlCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAccessControlCache creates an object of AccessControlCache.
|
|
||||||
func NewAccessControlCache(config *Config) *AccessControlCache {
|
|
||||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
return &AccessControlCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns true if such key exists.
|
|
||||||
func (o *AccessControlCache) Get(owner user.ID, key string) bool {
|
|
||||||
entry, err := o.cache.Get(cacheKey(owner, key))
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(bool)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put puts an item to cache.
|
|
||||||
func (o *AccessControlCache) Put(owner user.ID, key string) error {
|
|
||||||
return o.cache.Set(cacheKey(owner, key), true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes an object from cache.
|
|
||||||
func (o *AccessControlCache) Delete(owner user.ID, key string) bool {
|
|
||||||
return o.cache.Remove(cacheKey(owner, key))
|
|
||||||
}
|
|
||||||
|
|
||||||
func cacheKey(owner user.ID, key string) string {
|
|
||||||
return owner.EncodeToString() + key
|
|
||||||
}
|
|
85
api/cache/accessbox.go
vendored
85
api/cache/accessbox.go
vendored
|
@ -1,85 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// AccessBoxCache stores an access box by its address.
|
|
||||||
AccessBoxCache struct {
|
|
||||||
logger *zap.Logger
|
|
||||||
cache gcache.Cache
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config stores expiration params for cache.
|
|
||||||
Config struct {
|
|
||||||
Size int
|
|
||||||
Lifetime time.Duration
|
|
||||||
Logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
AccessBoxCacheValue struct {
|
|
||||||
Box *accessbox.Box
|
|
||||||
Attributes []object.Attribute
|
|
||||||
PutTime time.Time
|
|
||||||
Address *oid.Address
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultAccessBoxCacheSize is a default maximum number of entries in cache.
|
|
||||||
DefaultAccessBoxCacheSize = 100
|
|
||||||
// DefaultAccessBoxCacheLifetime is a default lifetime of entries in cache.
|
|
||||||
DefaultAccessBoxCacheLifetime = 10 * time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultAccessBoxConfig returns new default cache expiration values.
|
|
||||||
func DefaultAccessBoxConfig(logger *zap.Logger) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: DefaultAccessBoxCacheSize,
|
|
||||||
Lifetime: DefaultAccessBoxCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAccessBoxCache creates an object of AccessBoxCache.
|
|
||||||
func NewAccessBoxCache(config *Config) *AccessBoxCache {
|
|
||||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
|
|
||||||
return &AccessBoxCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a cached accessbox.
|
|
||||||
func (o *AccessBoxCache) Get(accessKeyID string) *AccessBoxCacheValue {
|
|
||||||
entry, err := o.cache.Get(accessKeyID)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(*AccessBoxCacheValue)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put stores an accessbox to cache.
|
|
||||||
func (o *AccessBoxCache) Put(accessKeyID string, val *AccessBoxCacheValue) error {
|
|
||||||
return o.cache.Set(accessKeyID, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes an accessbox from cache.
|
|
||||||
func (o *AccessBoxCache) Delete(accessKeyID string) {
|
|
||||||
o.cache.Remove(accessKeyID)
|
|
||||||
}
|
|
113
api/cache/buckets.go
vendored
113
api/cache/buckets.go
vendored
|
@ -1,113 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BucketCache contains cache with objects and the lifetime of cache entries.
|
|
||||||
type BucketCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
cidCache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultBucketCacheSize is a default maximum number of entries in cache.
|
|
||||||
DefaultBucketCacheSize = 1e3
|
|
||||||
// DefaultBucketCacheLifetime is a default lifetime of entries in cache.
|
|
||||||
DefaultBucketCacheLifetime = time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultBucketConfig returns new default cache expiration values.
|
|
||||||
func DefaultBucketConfig(logger *zap.Logger) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: DefaultBucketCacheSize,
|
|
||||||
Lifetime: DefaultBucketCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBucketCache creates an object of BucketCache.
|
|
||||||
func NewBucketCache(config *Config, cidCache bool) *BucketCache {
|
|
||||||
cache := &BucketCache{
|
|
||||||
cache: gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build(),
|
|
||||||
logger: config.Logger,
|
|
||||||
}
|
|
||||||
|
|
||||||
if cidCache {
|
|
||||||
cache.cidCache = gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
}
|
|
||||||
return cache
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a cached object.
|
|
||||||
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
|
||||||
return o.get(formKey(ns, bktName))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *BucketCache) GetByCID(cnrID cid.ID) *data.BucketInfo {
|
|
||||||
if o.cidCache == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
entry, err := o.cidCache.Get(cnrID)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
key, ok := entry.(string)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", key)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.get(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *BucketCache) get(key string) *data.BucketInfo {
|
|
||||||
entry, err := o.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(*data.BucketInfo)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put puts an object to cache.
|
|
||||||
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
|
|
||||||
if o.cidCache != nil {
|
|
||||||
if err := o.cidCache.Set(bkt.CID, formKey(bkt.Zone, bkt.Name)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes an object from cache.
|
|
||||||
func (o *BucketCache) Delete(bkt *data.BucketInfo) bool {
|
|
||||||
if o.cidCache != nil {
|
|
||||||
o.cidCache.Remove(bkt.CID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.cache.Remove(formKey(bkt.Zone, bkt.Name))
|
|
||||||
}
|
|
||||||
|
|
||||||
func formKey(zone, name string) string {
|
|
||||||
return name + "." + zone
|
|
||||||
}
|
|
240
api/cache/cache_test.go
vendored
240
api/cache/cache_test.go
vendored
|
@ -1,240 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
|
||||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"go.uber.org/zap/zaptest/observer"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAccessBoxCacheType(t *testing.T) {
|
|
||||||
logger, observedLog := getObservedLogger()
|
|
||||||
cache := NewAccessBoxCache(DefaultAccessBoxConfig(logger))
|
|
||||||
|
|
||||||
addr := oidtest.Address()
|
|
||||||
box := &accessbox.Box{}
|
|
||||||
val := &AccessBoxCacheValue{
|
|
||||||
Box: box,
|
|
||||||
}
|
|
||||||
|
|
||||||
accessKeyID := getAccessKeyID(addr)
|
|
||||||
|
|
||||||
err := cache.Put(accessKeyID, val)
|
|
||||||
require.NoError(t, err)
|
|
||||||
resVal := cache.Get(accessKeyID)
|
|
||||||
require.Equal(t, box, resVal.Box)
|
|
||||||
require.Equal(t, 0, observedLog.Len())
|
|
||||||
|
|
||||||
err = cache.cache.Set(accessKeyID, "tmp")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assertInvalidCacheEntry(t, cache.Get(accessKeyID), observedLog)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBucketsCacheType(t *testing.T) {
|
|
||||||
logger, observedLog := getObservedLogger()
|
|
||||||
cache := NewBucketCache(DefaultBucketConfig(logger), false)
|
|
||||||
|
|
||||||
bktInfo := &data.BucketInfo{Name: "bucket"}
|
|
||||||
|
|
||||||
err := cache.Put(bktInfo)
|
|
||||||
require.NoError(t, err)
|
|
||||||
val := cache.Get("", bktInfo.Name)
|
|
||||||
require.Equal(t, bktInfo, val)
|
|
||||||
require.Equal(t, 0, observedLog.Len())
|
|
||||||
|
|
||||||
err = cache.cache.Set(bktInfo.Name+"."+bktInfo.Zone, "tmp")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assertInvalidCacheEntry(t, cache.Get(bktInfo.Zone, bktInfo.Name), observedLog)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestObjectNamesCacheType(t *testing.T) {
|
|
||||||
logger, observedLog := getObservedLogger()
|
|
||||||
cache := NewObjectsNameCache(DefaultObjectsNameConfig(logger))
|
|
||||||
|
|
||||||
key := "name"
|
|
||||||
addr := oidtest.Address()
|
|
||||||
|
|
||||||
err := cache.Put(key, addr)
|
|
||||||
require.NoError(t, err)
|
|
||||||
val := cache.Get(key)
|
|
||||||
require.Equal(t, addr, *val)
|
|
||||||
require.Equal(t, 0, observedLog.Len())
|
|
||||||
|
|
||||||
err = cache.cache.Set(key, "tmp")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assertInvalidCacheEntry(t, cache.Get(key), observedLog)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestObjectCacheType(t *testing.T) {
|
|
||||||
logger, observedLog := getObservedLogger()
|
|
||||||
cache := New(DefaultObjectsConfig(logger))
|
|
||||||
|
|
||||||
addr := oidtest.Address()
|
|
||||||
|
|
||||||
extObjInfo := &data.ExtendedObjectInfo{
|
|
||||||
ObjectInfo: &data.ObjectInfo{
|
|
||||||
ID: addr.Object(),
|
|
||||||
CID: addr.Container(),
|
|
||||||
},
|
|
||||||
NodeVersion: &data.NodeVersion{
|
|
||||||
BaseNodeVersion: data.BaseNodeVersion{
|
|
||||||
FilePath: "obj",
|
|
||||||
Size: 50,
|
|
||||||
},
|
|
||||||
IsUnversioned: true,
|
|
||||||
},
|
|
||||||
IsLatest: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := cache.PutObject(extObjInfo)
|
|
||||||
require.NoError(t, err)
|
|
||||||
val := cache.GetObject(addr)
|
|
||||||
require.Equal(t, extObjInfo, val)
|
|
||||||
require.Equal(t, 0, observedLog.Len())
|
|
||||||
|
|
||||||
err = cache.cache.Set(addr, "tmp")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assertInvalidCacheEntry(t, cache.GetObject(addr), observedLog)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestObjectsListCacheType(t *testing.T) {
|
|
||||||
logger, observedLog := getObservedLogger()
|
|
||||||
cache := NewObjectsListCache(DefaultObjectsListConfig(logger))
|
|
||||||
|
|
||||||
cnrID := cidtest.ID()
|
|
||||||
key := ObjectsListKey{cid: cnrID, prefix: "obj"}
|
|
||||||
versions := []*data.NodeVersion{{BaseNodeVersion: data.BaseNodeVersion{OID: oidtest.ID()}}}
|
|
||||||
|
|
||||||
err := cache.PutVersions(key, versions)
|
|
||||||
require.NoError(t, err)
|
|
||||||
val := cache.GetVersions(key)
|
|
||||||
require.Equal(t, versions, val)
|
|
||||||
require.Equal(t, 0, observedLog.Len())
|
|
||||||
|
|
||||||
err = cache.cache.Set(key, "tmp")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assertInvalidCacheEntry(t, cache.GetVersions(key), observedLog)
|
|
||||||
|
|
||||||
err = cache.cache.Set("key", "tmp")
|
|
||||||
require.NoError(t, err)
|
|
||||||
cache.CleanCacheEntriesContainingObject(key.prefix, cnrID)
|
|
||||||
require.Equal(t, 2, observedLog.Len())
|
|
||||||
require.Equal(t, observedLog.All()[1].Message, "invalid cache key type")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestObjectInfoCacheType(t *testing.T) {
|
|
||||||
logger, observedLog := getObservedLogger()
|
|
||||||
cache := NewSystemCache(DefaultSystemConfig(logger))
|
|
||||||
|
|
||||||
key := "key"
|
|
||||||
objInfo := &data.ObjectInfo{Name: key}
|
|
||||||
|
|
||||||
err := cache.PutObject(key, objInfo)
|
|
||||||
require.NoError(t, err)
|
|
||||||
val := cache.GetObject(key)
|
|
||||||
require.Equal(t, objInfo, val)
|
|
||||||
require.Equal(t, 0, observedLog.Len())
|
|
||||||
|
|
||||||
err = cache.cache.Set(key, "tmp")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assertInvalidCacheEntry(t, cache.GetObject(key), observedLog)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCORsCacheType(t *testing.T) {
|
|
||||||
logger, observedLog := getObservedLogger()
|
|
||||||
cache := NewSystemCache(DefaultSystemConfig(logger))
|
|
||||||
|
|
||||||
key := "key"
|
|
||||||
cors := &data.CORSConfiguration{}
|
|
||||||
|
|
||||||
err := cache.PutCORS(key, cors)
|
|
||||||
require.NoError(t, err)
|
|
||||||
val := cache.GetCORS(key)
|
|
||||||
require.Equal(t, cors, val)
|
|
||||||
require.Equal(t, 0, observedLog.Len())
|
|
||||||
|
|
||||||
err = cache.cache.Set(key, "tmp")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assertInvalidCacheEntry(t, cache.GetCORS(key), observedLog)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSettingsCacheType(t *testing.T) {
|
|
||||||
logger, observedLog := getObservedLogger()
|
|
||||||
cache := NewSystemCache(DefaultSystemConfig(logger))
|
|
||||||
|
|
||||||
key := "key"
|
|
||||||
settings := &data.BucketSettings{Versioning: data.VersioningEnabled}
|
|
||||||
|
|
||||||
err := cache.PutSettings(key, settings)
|
|
||||||
require.NoError(t, err)
|
|
||||||
val := cache.GetSettings(key)
|
|
||||||
require.Equal(t, settings, val)
|
|
||||||
require.Equal(t, 0, observedLog.Len())
|
|
||||||
|
|
||||||
err = cache.cache.Set(key, "tmp")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assertInvalidCacheEntry(t, cache.GetSettings(key), observedLog)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFrostFSIDSubjectCacheType(t *testing.T) {
|
|
||||||
logger, observedLog := getObservedLogger()
|
|
||||||
cache := NewFrostfsIDCache(DefaultFrostfsIDConfig(logger))
|
|
||||||
|
|
||||||
key, err := util.Uint160DecodeStringLE("4ea976429703418ef00fc4912a409b6a0b973034")
|
|
||||||
require.NoError(t, err)
|
|
||||||
value := &client.SubjectExtended{}
|
|
||||||
|
|
||||||
err = cache.PutSubject(key, value)
|
|
||||||
require.NoError(t, err)
|
|
||||||
val := cache.GetSubject(key)
|
|
||||||
require.Equal(t, value, val)
|
|
||||||
require.Equal(t, 0, observedLog.Len())
|
|
||||||
|
|
||||||
err = cache.cache.Set(key, "tmp")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assertInvalidCacheEntry(t, cache.GetSubject(key), observedLog)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFrostFSIDUserKeyCacheType(t *testing.T) {
|
|
||||||
logger, observedLog := getObservedLogger()
|
|
||||||
cache := NewFrostfsIDCache(DefaultFrostfsIDConfig(logger))
|
|
||||||
|
|
||||||
ns, name := "ns", "name"
|
|
||||||
value, err := keys.NewPrivateKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = cache.PutUserKey(ns, name, value.PublicKey())
|
|
||||||
require.NoError(t, err)
|
|
||||||
val := cache.GetUserKey(ns, name)
|
|
||||||
require.Equal(t, value.PublicKey(), val)
|
|
||||||
require.Equal(t, 0, observedLog.Len())
|
|
||||||
|
|
||||||
err = cache.cache.Set(ns+"/"+name, "tmp")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assertInvalidCacheEntry(t, cache.GetUserKey(ns, name), observedLog)
|
|
||||||
}
|
|
||||||
|
|
||||||
func assertInvalidCacheEntry(t *testing.T, val interface{}, observedLog *observer.ObservedLogs) {
|
|
||||||
require.Nil(t, val)
|
|
||||||
require.Equal(t, 1, observedLog.Len())
|
|
||||||
require.Equal(t, observedLog.All()[0].Message, "invalid cache entry type")
|
|
||||||
}
|
|
||||||
|
|
||||||
func getObservedLogger() (*zap.Logger, *observer.ObservedLogs) {
|
|
||||||
loggerCore, observedLog := observer.New(zap.WarnLevel)
|
|
||||||
return zap.New(loggerCore), observedLog
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAccessKeyID(addr oid.Address) string {
|
|
||||||
return strings.ReplaceAll(addr.EncodeToString(), "/", "0")
|
|
||||||
}
|
|
77
api/cache/frostfsid.go
vendored
77
api/cache/frostfsid.go
vendored
|
@ -1,77 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FrostfsIDCache provides lru cache for frostfsid contract.
|
|
||||||
type FrostfsIDCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultFrostfsIDCacheSize is a default maximum number of entries in cache.
|
|
||||||
DefaultFrostfsIDCacheSize = 1e4
|
|
||||||
// DefaultFrostfsIDCacheLifetime is a default lifetime of entries in cache.
|
|
||||||
DefaultFrostfsIDCacheLifetime = time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultFrostfsIDConfig returns new default cache expiration values.
|
|
||||||
func DefaultFrostfsIDConfig(logger *zap.Logger) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: DefaultFrostfsIDCacheSize,
|
|
||||||
Lifetime: DefaultFrostfsIDCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFrostfsIDCache creates an object of FrostfsIDCache.
|
|
||||||
func NewFrostfsIDCache(config *Config) *FrostfsIDCache {
|
|
||||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
return &FrostfsIDCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSubject returns a cached client.SubjectExtended. Returns nil if value is missing.
|
|
||||||
func (c *FrostfsIDCache) GetSubject(key util.Uint160) *client.SubjectExtended {
|
|
||||||
return get[client.SubjectExtended](c, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutSubject puts a client.SubjectExtended to cache.
|
|
||||||
func (c *FrostfsIDCache) PutSubject(key util.Uint160, subject *client.SubjectExtended) error {
|
|
||||||
return c.cache.Set(key, subject)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUserKey returns a cached *keys.PublicKey. Returns nil if value is missing.
|
|
||||||
func (c *FrostfsIDCache) GetUserKey(ns, name string) *keys.PublicKey {
|
|
||||||
return get[keys.PublicKey](c, ns+"/"+name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutUserKey puts a client.SubjectExtended to cache.
|
|
||||||
func (c *FrostfsIDCache) PutUserKey(ns, name string, userKey *keys.PublicKey) error {
|
|
||||||
return c.cache.Set(ns+"/"+name, userKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func get[T any](c *FrostfsIDCache, key any) *T {
|
|
||||||
entry, err := c.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(*T)
|
|
||||||
if !ok {
|
|
||||||
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
107
api/cache/listsession.go
vendored
107
api/cache/listsession.go
vendored
|
@ -1,107 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// ListSessionCache contains cache for list session (during pagination).
|
|
||||||
ListSessionCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListSessionKey is a key to find a ListSessionCache's entry.
|
|
||||||
ListSessionKey struct {
|
|
||||||
cid cid.ID
|
|
||||||
prefix string
|
|
||||||
token string
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultListSessionCacheLifetime is a default lifetime of entries in cache of ListObjects.
|
|
||||||
DefaultListSessionCacheLifetime = time.Second * 60
|
|
||||||
// DefaultListSessionCacheSize is a default size of cache of ListObjects.
|
|
||||||
DefaultListSessionCacheSize = 100
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultListSessionConfig returns new default cache expiration values.
|
|
||||||
func DefaultListSessionConfig(logger *zap.Logger) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: DefaultListSessionCacheSize,
|
|
||||||
Lifetime: DefaultListSessionCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k *ListSessionKey) String() string {
|
|
||||||
return k.cid.EncodeToString() + k.prefix + k.token
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewListSessionCache is a constructor which creates an object of ListObjectsCache with the given lifetime of entries.
|
|
||||||
func NewListSessionCache(config *Config) *ListSessionCache {
|
|
||||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).EvictedFunc(func(_ interface{}, val interface{}) {
|
|
||||||
session, ok := val.(*data.ListSession)
|
|
||||||
if !ok {
|
|
||||||
config.Logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", val)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", session)), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
|
|
||||||
if !session.Acquired.Load() {
|
|
||||||
session.Cancel()
|
|
||||||
}
|
|
||||||
}).Build()
|
|
||||||
return &ListSessionCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetListSession returns a list of ObjectInfo.
|
|
||||||
func (l *ListSessionCache) GetListSession(key ListSessionKey) *data.ListSession {
|
|
||||||
entry, err := l.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(*data.ListSession)
|
|
||||||
if !ok {
|
|
||||||
l.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutListSession puts a list of object versions to cache.
|
|
||||||
func (l *ListSessionCache) PutListSession(key ListSessionKey, session *data.ListSession) error {
|
|
||||||
s := l.GetListSession(key)
|
|
||||||
if s != nil && s != session {
|
|
||||||
if !s.Acquired.Load() {
|
|
||||||
s.Cancel()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return l.cache.Set(key, session)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteListSession removes key from cache.
|
|
||||||
func (l *ListSessionCache) DeleteListSession(key ListSessionKey) {
|
|
||||||
l.cache.Remove(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateListSessionCacheKey returns ListSessionKey with the given CID, prefix and token.
|
|
||||||
func CreateListSessionCacheKey(cnr cid.ID, prefix, token string) ListSessionKey {
|
|
||||||
p := ListSessionKey{
|
|
||||||
cid: cnr,
|
|
||||||
prefix: prefix,
|
|
||||||
token: token,
|
|
||||||
}
|
|
||||||
|
|
||||||
return p
|
|
||||||
}
|
|
68
api/cache/names.go
vendored
68
api/cache/names.go
vendored
|
@ -1,68 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ObjectsNameCache provides lru cache for objects.
|
|
||||||
// This cache contains mapping nice names to object addresses.
|
|
||||||
// Key is bucketName+objectName.
|
|
||||||
type ObjectsNameCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultObjectsNameCacheSize is a default maximum number of entries in cache.
|
|
||||||
DefaultObjectsNameCacheSize = 1e4
|
|
||||||
// DefaultObjectsNameCacheLifetime is a default lifetime of entries in cache.
|
|
||||||
DefaultObjectsNameCacheLifetime = time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultObjectsNameConfig returns new default cache expiration values.
|
|
||||||
func DefaultObjectsNameConfig(logger *zap.Logger) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: DefaultObjectsNameCacheSize,
|
|
||||||
Lifetime: DefaultObjectsNameCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObjectsNameCache creates an object of ObjectsNameCache.
|
|
||||||
func NewObjectsNameCache(config *Config) *ObjectsNameCache {
|
|
||||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
return &ObjectsNameCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a cached object. Returns nil if value is missing.
|
|
||||||
func (o *ObjectsNameCache) Get(key string) *oid.Address {
|
|
||||||
entry, err := o.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(oid.Address)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put puts an object to cache.
|
|
||||||
func (o *ObjectsNameCache) Put(key string, address oid.Address) error {
|
|
||||||
return o.cache.Set(key, address)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes an object from cache.
|
|
||||||
func (o *ObjectsNameCache) Delete(key string) bool {
|
|
||||||
return o.cache.Remove(key)
|
|
||||||
}
|
|
86
api/cache/network.go
vendored
86
api/cache/network.go
vendored
|
@ -1,86 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// NetworkCache provides cache for network-related values.
|
|
||||||
NetworkCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkCacheConfig stores expiration params for cache.
|
|
||||||
NetworkCacheConfig struct {
|
|
||||||
Lifetime time.Duration
|
|
||||||
Logger *zap.Logger
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultNetworkCacheLifetime = 1 * time.Minute
|
|
||||||
networkCacheSize = 2
|
|
||||||
networkInfoKey = "network_info"
|
|
||||||
netmapKey = "netmap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultNetworkConfig returns new default cache expiration values.
|
|
||||||
func DefaultNetworkConfig(logger *zap.Logger) *NetworkCacheConfig {
|
|
||||||
return &NetworkCacheConfig{
|
|
||||||
Lifetime: DefaultNetworkCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNetworkCache creates an object of NetworkCache.
|
|
||||||
func NewNetworkCache(config *NetworkCacheConfig) *NetworkCache {
|
|
||||||
gc := gcache.New(networkCacheSize).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
return &NetworkCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *NetworkCache) GetNetworkInfo() *netmap.NetworkInfo {
|
|
||||||
entry, err := c.cache.Get(networkInfoKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(netmap.NetworkInfo)
|
|
||||||
if !ok {
|
|
||||||
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *NetworkCache) PutNetworkInfo(info netmap.NetworkInfo) error {
|
|
||||||
return c.cache.Set(networkInfoKey, info)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *NetworkCache) GetNetmap() *netmap.NetMap {
|
|
||||||
entry, err := c.cache.Get(netmapKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(netmap.NetMap)
|
|
||||||
if !ok {
|
|
||||||
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *NetworkCache) PutNetmap(nm netmap.NetMap) error {
|
|
||||||
return c.cache.Set(netmapKey, nm)
|
|
||||||
}
|
|
67
api/cache/objects.go
vendored
67
api/cache/objects.go
vendored
|
@ -1,67 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ObjectsCache provides lru cache for objects.
|
|
||||||
type ObjectsCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultObjectsCacheLifetime is a default lifetime of entries in objects' cache.
|
|
||||||
DefaultObjectsCacheLifetime = time.Minute * 5
|
|
||||||
// DefaultObjectsCacheSize is a default maximum number of entries in objects' cache.
|
|
||||||
DefaultObjectsCacheSize = 1e6
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultObjectsConfig returns new default cache expiration values.
|
|
||||||
func DefaultObjectsConfig(logger *zap.Logger) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: DefaultObjectsCacheSize,
|
|
||||||
Lifetime: DefaultObjectsCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates an object of ObjectHeadersCache.
|
|
||||||
func New(config *Config) *ObjectsCache {
|
|
||||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
return &ObjectsCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetObject returns a cached object info.
|
|
||||||
func (o *ObjectsCache) GetObject(address oid.Address) *data.ExtendedObjectInfo {
|
|
||||||
entry, err := o.cache.Get(address)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(*data.ExtendedObjectInfo)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutObject puts an object info to cache.
|
|
||||||
func (o *ObjectsCache) PutObject(obj *data.ExtendedObjectInfo) error {
|
|
||||||
return o.cache.Set(obj.ObjectInfo.Address(), obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes an object from cache.
|
|
||||||
func (o *ObjectsCache) Delete(address oid.Address) bool {
|
|
||||||
return o.cache.Remove(address)
|
|
||||||
}
|
|
64
api/cache/objects_test.go
vendored
64
api/cache/objects_test.go
vendored
|
@ -1,64 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"go.uber.org/zap/zaptest"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getTestConfig(t *testing.T) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: 10,
|
|
||||||
Lifetime: 5 * time.Second,
|
|
||||||
Logger: zaptest.NewLogger(t),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCache(t *testing.T) {
|
|
||||||
obj := objecttest.Object()
|
|
||||||
objID, _ := obj.ID()
|
|
||||||
cnrID, _ := obj.ContainerID()
|
|
||||||
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(cnrID)
|
|
||||||
addr.SetObject(objID)
|
|
||||||
|
|
||||||
extObjInfo := &data.ExtendedObjectInfo{
|
|
||||||
ObjectInfo: &data.ObjectInfo{
|
|
||||||
ID: addr.Object(),
|
|
||||||
CID: addr.Container(),
|
|
||||||
},
|
|
||||||
NodeVersion: &data.NodeVersion{
|
|
||||||
BaseNodeVersion: data.BaseNodeVersion{
|
|
||||||
FilePath: "obj",
|
|
||||||
Size: 50,
|
|
||||||
},
|
|
||||||
IsUnversioned: true,
|
|
||||||
},
|
|
||||||
IsLatest: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("check get", func(t *testing.T) {
|
|
||||||
cache := New(getTestConfig(t))
|
|
||||||
err := cache.PutObject(extObjInfo)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
actual := cache.GetObject(addr)
|
|
||||||
require.Equal(t, extObjInfo, actual)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("check delete", func(t *testing.T) {
|
|
||||||
cache := New(getTestConfig(t))
|
|
||||||
err := cache.PutObject(extObjInfo)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cache.Delete(addr)
|
|
||||||
actual := cache.GetObject(addr)
|
|
||||||
require.Nil(t, actual)
|
|
||||||
})
|
|
||||||
}
|
|
117
api/cache/objectslist.go
vendored
117
api/cache/objectslist.go
vendored
|
@ -1,117 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
This is an implementation of cache which keeps unsorted lists of objects' IDs (all versions)
|
|
||||||
for a specified bucket and a prefix.
|
|
||||||
|
|
||||||
The cache contains gcache whose entries have a key: ObjectsListKey struct and a value: list of ids.
|
|
||||||
After putting a record, it lives for a while (default value is 60 seconds).
|
|
||||||
|
|
||||||
When we receive a request from a user, we try to find the suitable and non-expired cache entry, go through the list
|
|
||||||
and get ObjectInfos from common object cache or with a request to FrostFS.
|
|
||||||
|
|
||||||
When we put an object into a container, we invalidate entries with prefixes that are prefixes of the object's name.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type (
|
|
||||||
// ObjectsListCache contains cache for ListObjects and ListObjectVersions.
|
|
||||||
ObjectsListCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectsListKey is a key to find a ObjectsListCache's entry.
|
|
||||||
ObjectsListKey struct {
|
|
||||||
cid cid.ID
|
|
||||||
prefix string
|
|
||||||
latestOnly bool
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultObjectsListCacheLifetime is a default lifetime of entries in cache of ListObjects.
|
|
||||||
DefaultObjectsListCacheLifetime = time.Second * 60
|
|
||||||
// DefaultObjectsListCacheSize is a default size of cache of ListObjects.
|
|
||||||
DefaultObjectsListCacheSize = 1e5
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultObjectsListConfig returns new default cache expiration values.
|
|
||||||
func DefaultObjectsListConfig(logger *zap.Logger) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: DefaultObjectsListCacheSize,
|
|
||||||
Lifetime: DefaultObjectsListCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k *ObjectsListKey) String() string {
|
|
||||||
return k.cid.EncodeToString() + k.prefix + strconv.FormatBool(k.latestOnly)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObjectsListCache is a constructor which creates an object of ListObjectsCache with the given lifetime of entries.
|
|
||||||
func NewObjectsListCache(config *Config) *ObjectsListCache {
|
|
||||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
return &ObjectsListCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVersions returns a list of ObjectInfo.
|
|
||||||
func (l *ObjectsListCache) GetVersions(key ObjectsListKey) []*data.NodeVersion {
|
|
||||||
entry, err := l.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.([]*data.NodeVersion)
|
|
||||||
if !ok {
|
|
||||||
l.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutVersions puts a list of object versions to cache.
|
|
||||||
func (l *ObjectsListCache) PutVersions(key ObjectsListKey, versions []*data.NodeVersion) error {
|
|
||||||
return l.cache.Set(key, versions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanCacheEntriesContainingObject deletes entries containing specified object.
|
|
||||||
func (l *ObjectsListCache) CleanCacheEntriesContainingObject(objectName string, cnr cid.ID) {
|
|
||||||
keys := l.cache.Keys(true)
|
|
||||||
for _, key := range keys {
|
|
||||||
k, ok := key.(ObjectsListKey)
|
|
||||||
if !ok {
|
|
||||||
l.logger.Warn(logs.InvalidCacheKeyType, zap.String("actual", fmt.Sprintf("%T", key)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", k)), logs.TagField(logs.TagDatapath))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if cnr.Equals(k.cid) && strings.HasPrefix(objectName, k.prefix) {
|
|
||||||
l.cache.Remove(k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateObjectsListCacheKey returns ObjectsListKey with the given CID, prefix and latestOnly flag.
|
|
||||||
func CreateObjectsListCacheKey(cnr cid.ID, prefix string, latestOnly bool) ObjectsListKey {
|
|
||||||
p := ObjectsListKey{
|
|
||||||
cid: cnr,
|
|
||||||
prefix: prefix,
|
|
||||||
latestOnly: latestOnly,
|
|
||||||
}
|
|
||||||
|
|
||||||
return p
|
|
||||||
}
|
|
191
api/cache/objectslist_test.go
vendored
191
api/cache/objectslist_test.go
vendored
|
@ -1,191 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
|
||||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"go.uber.org/zap/zaptest"
|
|
||||||
)
|
|
||||||
|
|
||||||
const testingCacheLifetime = 5 * time.Second
|
|
||||||
const testingCacheSize = 10
|
|
||||||
|
|
||||||
func getTestObjectsListConfig(t *testing.T) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: testingCacheSize,
|
|
||||||
Lifetime: testingCacheLifetime,
|
|
||||||
Logger: zaptest.NewLogger(t),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestObjectsListCache(t *testing.T) {
|
|
||||||
var (
|
|
||||||
listSize = 10
|
|
||||||
versions []*data.NodeVersion
|
|
||||||
cidKey, cidKey2 = cidtest.ID(), cidtest.ID()
|
|
||||||
)
|
|
||||||
|
|
||||||
for i := 0; i < listSize; i++ {
|
|
||||||
versions = append(versions, &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{OID: oidtest.ID()}})
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("lifetime", func(t *testing.T) {
|
|
||||||
var (
|
|
||||||
config = getTestObjectsListConfig(t)
|
|
||||||
cache = NewObjectsListCache(config)
|
|
||||||
listKey = ObjectsListKey{cid: cidKey}
|
|
||||||
)
|
|
||||||
|
|
||||||
err := cache.PutVersions(listKey, versions)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
condition := func() bool {
|
|
||||||
return cache.GetVersions(listKey) == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
require.Never(t, condition, config.Lifetime, time.Second)
|
|
||||||
require.Eventually(t, condition, time.Second, 10*time.Millisecond)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("get cache with empty prefix", func(t *testing.T) {
|
|
||||||
var (
|
|
||||||
cache = NewObjectsListCache(getTestObjectsListConfig(t))
|
|
||||||
listKey = ObjectsListKey{cid: cidKey}
|
|
||||||
)
|
|
||||||
err := cache.PutVersions(listKey, versions)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
actual := cache.GetVersions(listKey)
|
|
||||||
|
|
||||||
require.Equal(t, len(versions), len(actual))
|
|
||||||
for i := range versions {
|
|
||||||
require.Equal(t, versions[i], actual[i])
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("get cache with prefix", func(t *testing.T) {
|
|
||||||
listKey := ObjectsListKey{
|
|
||||||
cid: cidKey,
|
|
||||||
prefix: "dir",
|
|
||||||
}
|
|
||||||
|
|
||||||
cache := NewObjectsListCache(getTestObjectsListConfig(t))
|
|
||||||
err := cache.PutVersions(listKey, versions)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
actual := cache.GetVersions(listKey)
|
|
||||||
|
|
||||||
require.Equal(t, len(versions), len(actual))
|
|
||||||
for i := range versions {
|
|
||||||
require.Equal(t, versions[i], actual[i])
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("get cache with other prefix", func(t *testing.T) {
|
|
||||||
var (
|
|
||||||
listKey = ObjectsListKey{
|
|
||||||
cid: cidKey,
|
|
||||||
prefix: "dir",
|
|
||||||
}
|
|
||||||
|
|
||||||
newKey = ObjectsListKey{
|
|
||||||
cid: cidKey,
|
|
||||||
prefix: "obj",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
cache := NewObjectsListCache(getTestObjectsListConfig(t))
|
|
||||||
err := cache.PutVersions(listKey, versions)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
actual := cache.GetVersions(newKey)
|
|
||||||
require.Nil(t, actual)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("get cache with non-existing key", func(t *testing.T) {
|
|
||||||
var (
|
|
||||||
listKey = ObjectsListKey{
|
|
||||||
cid: cidKey,
|
|
||||||
}
|
|
||||||
newKey = ObjectsListKey{
|
|
||||||
cid: cidKey2,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
cache := NewObjectsListCache(getTestObjectsListConfig(t))
|
|
||||||
err := cache.PutVersions(listKey, versions)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
actual := cache.GetVersions(newKey)
|
|
||||||
require.Nil(t, actual)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCleanCacheEntriesChangedWithPutObject(t *testing.T) {
|
|
||||||
var (
|
|
||||||
id = cidtest.ID()
|
|
||||||
versions = []*data.NodeVersion{{BaseNodeVersion: data.BaseNodeVersion{OID: oidtest.ID()}}}
|
|
||||||
keys []ObjectsListKey
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, p := range []string{"", "dir/", "dir/lol/"} {
|
|
||||||
keys = append(keys, ObjectsListKey{cid: id, prefix: p})
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("put object to the root of the bucket", func(t *testing.T) {
|
|
||||||
config := getTestObjectsListConfig(t)
|
|
||||||
config.Lifetime = time.Minute
|
|
||||||
cache := NewObjectsListCache(config)
|
|
||||||
for _, k := range keys {
|
|
||||||
err := cache.PutVersions(k, versions)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
cache.CleanCacheEntriesContainingObject("obj1", id)
|
|
||||||
for _, k := range keys {
|
|
||||||
list := cache.GetVersions(k)
|
|
||||||
if k.prefix == "" {
|
|
||||||
require.Nil(t, list)
|
|
||||||
} else {
|
|
||||||
require.NotNil(t, list)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("put object to dir/", func(t *testing.T) {
|
|
||||||
config := getTestObjectsListConfig(t)
|
|
||||||
config.Lifetime = time.Minute
|
|
||||||
cache := NewObjectsListCache(config)
|
|
||||||
for _, k := range keys {
|
|
||||||
err := cache.PutVersions(k, versions)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
cache.CleanCacheEntriesContainingObject("dir/obj", id)
|
|
||||||
for _, k := range keys {
|
|
||||||
list := cache.GetVersions(k)
|
|
||||||
if k.prefix == "" || k.prefix == "dir/" {
|
|
||||||
require.Nil(t, list)
|
|
||||||
} else {
|
|
||||||
require.NotNil(t, list)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("put object to dir/lol/", func(t *testing.T) {
|
|
||||||
config := getTestObjectsListConfig(t)
|
|
||||||
config.Lifetime = time.Minute
|
|
||||||
cache := NewObjectsListCache(config)
|
|
||||||
for _, k := range keys {
|
|
||||||
err := cache.PutVersions(k, versions)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
cache.CleanCacheEntriesContainingObject("dir/lol/obj", id)
|
|
||||||
for _, k := range keys {
|
|
||||||
list := cache.GetVersions(k)
|
|
||||||
require.Nil(t, list)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
72
api/cache/policy.go
vendored
72
api/cache/policy.go
vendored
|
@ -1,72 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MorphPolicyCache provides lru cache for listing policies stored in policy contract.
|
|
||||||
type MorphPolicyCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
type MorphPolicyCacheKey struct {
|
|
||||||
Target engine.Target
|
|
||||||
Name chain.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultMorphPolicyCacheSize is a default maximum number of entries in cache.
|
|
||||||
DefaultMorphPolicyCacheSize = 1e4
|
|
||||||
// DefaultMorphPolicyCacheLifetime is a default lifetime of entries in cache.
|
|
||||||
DefaultMorphPolicyCacheLifetime = time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultMorphPolicyConfig returns new default cache expiration values.
|
|
||||||
func DefaultMorphPolicyConfig(logger *zap.Logger) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: DefaultMorphPolicyCacheSize,
|
|
||||||
Lifetime: DefaultMorphPolicyCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMorphPolicyCache creates an object of MorphPolicyCache.
|
|
||||||
func NewMorphPolicyCache(config *Config) *MorphPolicyCache {
|
|
||||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
return &MorphPolicyCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a cached object. Returns nil if value is missing.
|
|
||||||
func (o *MorphPolicyCache) Get(key MorphPolicyCacheKey) []*chain.Chain {
|
|
||||||
entry, err := o.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.([]*chain.Chain)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put puts an object to cache.
|
|
||||||
func (o *MorphPolicyCache) Put(key MorphPolicyCacheKey, list []*chain.Chain) error {
|
|
||||||
return o.cache.Set(key, list)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes an object from cache.
|
|
||||||
func (o *MorphPolicyCache) Delete(key MorphPolicyCacheKey) bool {
|
|
||||||
return o.cache.Remove(key)
|
|
||||||
}
|
|
168
api/cache/system.go
vendored
168
api/cache/system.go
vendored
|
@ -1,168 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SystemCache provides lru cache for objects.
|
|
||||||
// This cache contains "system" objects (bucket versioning settings, tagging object etc.).
|
|
||||||
// Key is bucketName+systemFilePath.
|
|
||||||
type SystemCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultSystemCacheSize is a default maximum number of entries in cache.
|
|
||||||
DefaultSystemCacheSize = 1e4
|
|
||||||
// DefaultSystemCacheLifetime is a default lifetime of entries in cache.
|
|
||||||
DefaultSystemCacheLifetime = 5 * time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultSystemConfig returns new default cache expiration values.
|
|
||||||
func DefaultSystemConfig(logger *zap.Logger) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: DefaultSystemCacheSize,
|
|
||||||
Lifetime: DefaultSystemCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSystemCache creates an object of SystemCache.
|
|
||||||
func NewSystemCache(config *Config) *SystemCache {
|
|
||||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
return &SystemCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetObject returns a cached object.
|
|
||||||
func (o *SystemCache) GetObject(key string) *data.ObjectInfo {
|
|
||||||
entry, err := o.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(*data.ObjectInfo)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLockInfo returns a cached object.
|
|
||||||
func (o *SystemCache) GetLockInfo(key string) *data.LockInfo {
|
|
||||||
entry, err := o.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(*data.LockInfo)
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *SystemCache) GetCORS(key string) *data.CORSConfiguration {
|
|
||||||
entry, err := o.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(*data.CORSConfiguration)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *SystemCache) GetLifecycleConfiguration(key string) *data.LifecycleConfiguration {
|
|
||||||
entry, err := o.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(*data.LifecycleConfiguration)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *SystemCache) GetSettings(key string) *data.BucketSettings {
|
|
||||||
entry, err := o.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(*data.BucketSettings)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTagging returns tags of a bucket or an object.
|
|
||||||
func (o *SystemCache) GetTagging(key string) map[string]string {
|
|
||||||
entry, err := o.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(map[string]string)
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutObject puts an object to cache.
|
|
||||||
func (o *SystemCache) PutObject(key string, obj *data.ObjectInfo) error {
|
|
||||||
return o.cache.Set(key, obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutLockInfo puts an object to cache.
|
|
||||||
func (o *SystemCache) PutLockInfo(key string, lockInfo *data.LockInfo) error {
|
|
||||||
return o.cache.Set(key, lockInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *SystemCache) PutCORS(key string, obj *data.CORSConfiguration) error {
|
|
||||||
return o.cache.Set(key, obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *SystemCache) PutLifecycleConfiguration(key string, obj *data.LifecycleConfiguration) error {
|
|
||||||
return o.cache.Set(key, obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *SystemCache) PutSettings(key string, settings *data.BucketSettings) error {
|
|
||||||
return o.cache.Set(key, settings)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutTagging puts tags of a bucket or an object.
|
|
||||||
func (o *SystemCache) PutTagging(key string, tagSet map[string]string) error {
|
|
||||||
return o.cache.Set(key, tagSet)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes an object from cache.
|
|
||||||
func (o *SystemCache) Delete(key string) bool {
|
|
||||||
return o.cache.Remove(key)
|
|
||||||
}
|
|
150
api/data/info.go
150
api/data/info.go
|
@ -1,150 +0,0 @@
|
||||||
package data
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
bktSettingsObject = ".s3-settings"
|
|
||||||
bktCORSConfigurationObject = ".s3-cors"
|
|
||||||
bktLifecycleConfigurationObject = ".s3-lifecycle"
|
|
||||||
|
|
||||||
VersioningUnversioned = "Unversioned"
|
|
||||||
VersioningEnabled = "Enabled"
|
|
||||||
VersioningSuspended = "Suspended"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// BucketInfo stores basic bucket data.
|
|
||||||
BucketInfo struct {
|
|
||||||
Name string // container name from system attribute
|
|
||||||
Zone string // container zone from system attribute
|
|
||||||
CID cid.ID
|
|
||||||
Owner user.ID
|
|
||||||
Created time.Time
|
|
||||||
LocationConstraint string
|
|
||||||
ObjectLockEnabled bool
|
|
||||||
HomomorphicHashDisabled bool
|
|
||||||
PlacementPolicy netmap.PlacementPolicy
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectInfo holds S3 object data.
|
|
||||||
ObjectInfo struct {
|
|
||||||
ID oid.ID
|
|
||||||
CID cid.ID
|
|
||||||
|
|
||||||
Bucket string
|
|
||||||
Name string
|
|
||||||
Size uint64
|
|
||||||
ContentType string
|
|
||||||
Created time.Time
|
|
||||||
CreationEpoch uint64
|
|
||||||
HashSum string
|
|
||||||
MD5Sum string
|
|
||||||
Owner user.ID
|
|
||||||
Headers map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// BucketSettings stores settings such as versioning.
|
|
||||||
BucketSettings struct {
|
|
||||||
Versioning string
|
|
||||||
LockConfiguration *ObjectLockConfiguration
|
|
||||||
CannedACL string
|
|
||||||
OwnerKey *keys.PublicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// CORSConfiguration stores CORS configuration of a request.
|
|
||||||
CORSConfiguration struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CORSConfiguration" json:"-"`
|
|
||||||
CORSRules []CORSRule `xml:"CORSRule" json:"CORSRules"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CORSRule stores rules for CORS in a bucket.
|
|
||||||
CORSRule struct {
|
|
||||||
ID string `xml:"ID,omitempty" json:"ID,omitempty"`
|
|
||||||
AllowedHeaders []string `xml:"AllowedHeader" json:"AllowedHeaders"`
|
|
||||||
AllowedMethods []string `xml:"AllowedMethod" json:"AllowedMethods"`
|
|
||||||
AllowedOrigins []string `xml:"AllowedOrigin" json:"AllowedOrigins"`
|
|
||||||
ExposeHeaders []string `xml:"ExposeHeader" json:"ExposeHeaders"`
|
|
||||||
MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty" json:"MaxAgeSeconds,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectVersion stores object version info.
|
|
||||||
ObjectVersion struct {
|
|
||||||
BktInfo *BucketInfo
|
|
||||||
ObjectName string
|
|
||||||
VersionID string
|
|
||||||
NoErrorOnDeleteMarker bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedObjectInfo stores created object info.
|
|
||||||
CreatedObjectInfo struct {
|
|
||||||
ID oid.ID
|
|
||||||
Size uint64
|
|
||||||
HashSum []byte
|
|
||||||
MD5Sum []byte
|
|
||||||
CreationEpoch uint64
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// SettingsObjectName is a system name for a bucket settings file.
|
|
||||||
func (b *BucketInfo) SettingsObjectName() string { return bktSettingsObject }
|
|
||||||
|
|
||||||
// CORSObjectName returns a system name for a bucket CORS configuration file.
|
|
||||||
func (b *BucketInfo) CORSObjectName() string {
|
|
||||||
return b.CID.EncodeToString() + bktCORSConfigurationObject
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BucketInfo) LifecycleConfigurationObjectName() string {
|
|
||||||
return b.CID.EncodeToString() + bktLifecycleConfigurationObject
|
|
||||||
}
|
|
||||||
|
|
||||||
// VersionID returns object version from ObjectInfo.
|
|
||||||
func (o *ObjectInfo) VersionID() string { return o.ID.EncodeToString() }
|
|
||||||
|
|
||||||
// NiceName returns object name for cache.
|
|
||||||
func (o *ObjectInfo) NiceName() string { return o.Bucket + "/" + o.Name }
|
|
||||||
|
|
||||||
// Address returns object address.
|
|
||||||
func (o *ObjectInfo) Address() oid.Address {
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(o.CID)
|
|
||||||
addr.SetObject(o.ID)
|
|
||||||
|
|
||||||
return addr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *ObjectInfo) ETag(md5Enabled bool) string {
|
|
||||||
if md5Enabled && len(o.MD5Sum) > 0 {
|
|
||||||
return o.MD5Sum
|
|
||||||
}
|
|
||||||
return o.HashSum
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b BucketSettings) Unversioned() bool {
|
|
||||||
return b.Versioning == VersioningUnversioned
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b BucketSettings) VersioningEnabled() bool {
|
|
||||||
return b.Versioning == VersioningEnabled
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b BucketSettings) VersioningSuspended() bool {
|
|
||||||
return b.Versioning == VersioningSuspended
|
|
||||||
}
|
|
||||||
|
|
||||||
func Quote(val string) string {
|
|
||||||
return "\"" + val + "\""
|
|
||||||
}
|
|
||||||
|
|
||||||
func UnQuote(val string) string {
|
|
||||||
return strings.Trim(val, "\"")
|
|
||||||
}
|
|
|
@ -1,56 +0,0 @@
|
||||||
package data
|
|
||||||
|
|
||||||
import "encoding/xml"
|
|
||||||
|
|
||||||
const (
|
|
||||||
LifecycleStatusEnabled = "Enabled"
|
|
||||||
LifecycleStatusDisabled = "Disabled"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
LifecycleConfiguration struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LifecycleConfiguration" json:"-"`
|
|
||||||
Rules []LifecycleRule `xml:"Rule"`
|
|
||||||
}
|
|
||||||
|
|
||||||
LifecycleRule struct {
|
|
||||||
Status string `xml:"Status,omitempty"`
|
|
||||||
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty"`
|
|
||||||
Expiration *LifecycleExpiration `xml:"Expiration,omitempty"`
|
|
||||||
Filter *LifecycleRuleFilter `xml:"Filter,omitempty"`
|
|
||||||
ID string `xml:"ID,omitempty"`
|
|
||||||
NonCurrentVersionExpiration *NonCurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
|
|
||||||
Prefix string `xml:"Prefix,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
AbortIncompleteMultipartUpload struct {
|
|
||||||
DaysAfterInitiation *int `xml:"DaysAfterInitiation,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
LifecycleExpiration struct {
|
|
||||||
Date string `xml:"Date,omitempty"`
|
|
||||||
Days *int `xml:"Days,omitempty"`
|
|
||||||
Epoch *uint64 `xml:"Epoch,omitempty"`
|
|
||||||
ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
LifecycleRuleFilter struct {
|
|
||||||
And *LifecycleRuleAndOperator `xml:"And,omitempty"`
|
|
||||||
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
|
|
||||||
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
|
|
||||||
Prefix string `xml:"Prefix,omitempty"`
|
|
||||||
Tag *Tag `xml:"Tag,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
LifecycleRuleAndOperator struct {
|
|
||||||
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
|
|
||||||
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
|
|
||||||
Prefix string `xml:"Prefix,omitempty"`
|
|
||||||
Tags []Tag `xml:"Tag"`
|
|
||||||
}
|
|
||||||
|
|
||||||
NonCurrentVersionExpiration struct {
|
|
||||||
NewerNonCurrentVersions *int `xml:"NewerNoncurrentVersions,omitempty"`
|
|
||||||
NonCurrentDays *int `xml:"NoncurrentDays,omitempty"`
|
|
||||||
}
|
|
||||||
)
|
|
|
@ -1,19 +0,0 @@
|
||||||
package data
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync/atomic"
|
|
||||||
)
|
|
||||||
|
|
||||||
type VersionsStream interface {
|
|
||||||
Next(ctx context.Context) (*NodeVersion, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ListSession struct {
|
|
||||||
Next []*ExtendedNodeVersion
|
|
||||||
Stream VersionsStream
|
|
||||||
NamesMap map[string]struct{}
|
|
||||||
Context context.Context
|
|
||||||
Cancel context.CancelFunc
|
|
||||||
Acquired atomic.Bool
|
|
||||||
}
|
|
|
@ -1,50 +0,0 @@
|
||||||
package data
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
ObjectLockConfiguration struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ObjectLockConfiguration" json:"-"`
|
|
||||||
ObjectLockEnabled string `xml:"ObjectLockEnabled" json:"ObjectLockEnabled"`
|
|
||||||
Rule *ObjectLockRule `xml:"Rule" json:"Rule"`
|
|
||||||
}
|
|
||||||
|
|
||||||
ObjectLockRule struct {
|
|
||||||
DefaultRetention *DefaultRetention `xml:"DefaultRetention" json:"DefaultRetention"`
|
|
||||||
}
|
|
||||||
|
|
||||||
DefaultRetention struct {
|
|
||||||
Days int64 `xml:"Days" json:"Days"`
|
|
||||||
Mode string `xml:"Mode" json:"Mode"`
|
|
||||||
Years int64 `xml:"Years" json:"Years"`
|
|
||||||
}
|
|
||||||
|
|
||||||
LegalHold struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LegalHold" json:"-"`
|
|
||||||
Status string `xml:"Status" json:"Status"`
|
|
||||||
}
|
|
||||||
|
|
||||||
Retention struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Retention" json:"-"`
|
|
||||||
Mode string `xml:"Mode" json:"Mode"`
|
|
||||||
RetainUntilDate string `xml:"RetainUntilDate" json:"RetainUntilDate"`
|
|
||||||
}
|
|
||||||
|
|
||||||
ObjectLock struct {
|
|
||||||
LegalHold *LegalHoldLock
|
|
||||||
Retention *RetentionLock
|
|
||||||
}
|
|
||||||
|
|
||||||
LegalHoldLock struct {
|
|
||||||
Enabled bool
|
|
||||||
}
|
|
||||||
|
|
||||||
RetentionLock struct {
|
|
||||||
Until time.Time
|
|
||||||
IsCompliance bool
|
|
||||||
ByPassedGovernance bool
|
|
||||||
}
|
|
||||||
)
|
|
|
@ -1,30 +0,0 @@
|
||||||
package data
|
|
||||||
|
|
||||||
import "encoding/xml"
|
|
||||||
|
|
||||||
// Tagging contains tag set.
|
|
||||||
type Tagging struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Tagging"`
|
|
||||||
TagSet []Tag `xml:"TagSet>Tag"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tag is an AWS key-value tag.
|
|
||||||
type Tag struct {
|
|
||||||
Key string
|
|
||||||
Value string
|
|
||||||
}
|
|
||||||
|
|
||||||
type GetObjectTaggingParams struct {
|
|
||||||
ObjectVersion *ObjectVersion
|
|
||||||
|
|
||||||
// NodeVersion can be nil. If not nil we save one request to tree service.
|
|
||||||
NodeVersion *NodeVersion // optional
|
|
||||||
}
|
|
||||||
|
|
||||||
type PutObjectTaggingParams struct {
|
|
||||||
ObjectVersion *ObjectVersion
|
|
||||||
TagSet map[string]string
|
|
||||||
|
|
||||||
// NodeVersion can be nil. If not nil we save one request to tree service.
|
|
||||||
NodeVersion *NodeVersion // optional
|
|
||||||
}
|
|
208
api/data/tree.go
208
api/data/tree.go
|
@ -1,208 +0,0 @@
|
||||||
package data
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
UnversionedObjectVersionID = "null"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NodeVersion represent node from tree service.
|
|
||||||
type NodeVersion struct {
|
|
||||||
BaseNodeVersion
|
|
||||||
IsUnversioned bool
|
|
||||||
IsCombined bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtendedNodeVersion contains additional node info to be able to sort versions by timestamp.
|
|
||||||
type ExtendedNodeVersion struct {
|
|
||||||
NodeVersion *NodeVersion
|
|
||||||
IsLatest bool
|
|
||||||
DirName string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e ExtendedNodeVersion) Version() string {
|
|
||||||
if e.NodeVersion.IsUnversioned {
|
|
||||||
return UnversionedObjectVersionID
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.NodeVersion.OID.EncodeToString()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e ExtendedNodeVersion) Name() string {
|
|
||||||
if e.DirName != "" {
|
|
||||||
return e.DirName
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.NodeVersion.FilePath
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtendedObjectInfo contains additional node info to be able to sort versions by timestamp.
|
|
||||||
type ExtendedObjectInfo struct {
|
|
||||||
ObjectInfo *ObjectInfo
|
|
||||||
NodeVersion *NodeVersion
|
|
||||||
IsLatest bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e ExtendedObjectInfo) Version() string {
|
|
||||||
if e.NodeVersion.IsUnversioned {
|
|
||||||
return UnversionedObjectVersionID
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.ObjectInfo.ID.EncodeToString()
|
|
||||||
}
|
|
||||||
|
|
||||||
// BaseNodeVersion is minimal node info from tree service.
|
|
||||||
// Basically used for "system" object.
|
|
||||||
type BaseNodeVersion struct {
|
|
||||||
ID uint64
|
|
||||||
ParentID uint64
|
|
||||||
OID oid.ID
|
|
||||||
Timestamp uint64
|
|
||||||
Size uint64
|
|
||||||
ETag string
|
|
||||||
MD5 string
|
|
||||||
FilePath string
|
|
||||||
Created *time.Time
|
|
||||||
Owner *user.ID
|
|
||||||
IsDeleteMarker bool
|
|
||||||
CreationEpoch uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseNodeVersion) GetETag(md5Enabled bool) string {
|
|
||||||
if md5Enabled && len(v.MD5) > 0 {
|
|
||||||
return v.MD5
|
|
||||||
}
|
|
||||||
return v.ETag
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsFilledExtra returns true is node was created by version of gate v0.29.x and later.
|
|
||||||
func (v BaseNodeVersion) IsFilledExtra() bool {
|
|
||||||
return v.Created != nil && v.Owner != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseNodeVersion) FillExtra(owner *user.ID, created *time.Time, realSize uint64) {
|
|
||||||
v.Owner = owner
|
|
||||||
v.Created = created
|
|
||||||
v.Size = realSize
|
|
||||||
}
|
|
||||||
|
|
||||||
type ObjectTaggingInfo struct {
|
|
||||||
CnrID cid.ID
|
|
||||||
ObjName string
|
|
||||||
VersionID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultipartInfo is multipart upload information.
|
|
||||||
type MultipartInfo struct {
|
|
||||||
// ID is node id in tree service.
|
|
||||||
// It's ignored when creating a new multipart upload.
|
|
||||||
ID uint64
|
|
||||||
Key string
|
|
||||||
UploadID string
|
|
||||||
Owner user.ID
|
|
||||||
Created time.Time
|
|
||||||
Meta map[string]string
|
|
||||||
Finished bool
|
|
||||||
CreationEpoch uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// PartInfo is upload information about part.
|
|
||||||
type PartInfo struct {
|
|
||||||
Key string `json:"key"`
|
|
||||||
UploadID string `json:"uploadId"`
|
|
||||||
Number int `json:"number"`
|
|
||||||
OID oid.ID `json:"oid"`
|
|
||||||
Size uint64 `json:"size"`
|
|
||||||
ETag string `json:"etag"`
|
|
||||||
MD5 string `json:"md5"`
|
|
||||||
Created time.Time `json:"created"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type PartInfoExtended struct {
|
|
||||||
PartInfo
|
|
||||||
|
|
||||||
// Timestamp is used to find the latest version of part info in case of tree split
|
|
||||||
// when there are multiple nodes for the same part.
|
|
||||||
Timestamp uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToHeaderString form short part representation to use in S3-Completed-Parts header.
|
|
||||||
func (p *PartInfo) ToHeaderString() string {
|
|
||||||
// ETag value contains SHA256 checksum which is used while getting object parts attributes.
|
|
||||||
return strconv.Itoa(p.Number) + "-" + strconv.FormatUint(p.Size, 10) + "-" + p.ETag
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartInfo) GetETag(md5Enabled bool) string {
|
|
||||||
if md5Enabled && len(p.MD5) > 0 {
|
|
||||||
return p.MD5
|
|
||||||
}
|
|
||||||
return p.ETag
|
|
||||||
}
|
|
||||||
|
|
||||||
// LockInfo is lock information to create appropriate tree node.
|
|
||||||
type LockInfo struct {
|
|
||||||
id uint64
|
|
||||||
|
|
||||||
legalHoldOID oid.ID
|
|
||||||
setLegalHold bool
|
|
||||||
|
|
||||||
retentionOID oid.ID
|
|
||||||
setRetention bool
|
|
||||||
untilDate string
|
|
||||||
isCompliance bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLockInfo(id uint64) *LockInfo {
|
|
||||||
return &LockInfo{id: id}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l LockInfo) ID() uint64 {
|
|
||||||
return l.id
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LockInfo) SetLegalHold(objID oid.ID) {
|
|
||||||
l.legalHoldOID = objID
|
|
||||||
l.setLegalHold = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LockInfo) ResetLegalHold() {
|
|
||||||
l.setLegalHold = false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l LockInfo) LegalHold() oid.ID {
|
|
||||||
return l.legalHoldOID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l LockInfo) IsLegalHoldSet() bool {
|
|
||||||
return l.setLegalHold
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LockInfo) SetRetention(objID oid.ID, until string, isCompliance bool) {
|
|
||||||
l.retentionOID = objID
|
|
||||||
l.setRetention = true
|
|
||||||
l.untilDate = until
|
|
||||||
l.isCompliance = isCompliance
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l LockInfo) IsRetentionSet() bool {
|
|
||||||
return l.setRetention
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l LockInfo) Retention() oid.ID {
|
|
||||||
return l.retentionOID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l LockInfo) UntilDate() string {
|
|
||||||
return l.untilDate
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l LockInfo) IsCompliance() bool {
|
|
||||||
return l.isCompliance
|
|
||||||
}
|
|
1861
api/errors/errors.go
1861
api/errors/errors.go
File diff suppressed because it is too large
Load diff
|
@ -1,84 +0,0 @@
|
||||||
package errors
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func BenchmarkErrCode(b *testing.B) {
|
|
||||||
err := GetAPIError(ErrNoSuchKey)
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
if IsS3Error(err, ErrNoSuchKey) {
|
|
||||||
_ = err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkErrorsIs(b *testing.B) {
|
|
||||||
err := GetAPIError(ErrNoSuchKey)
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
if errors.Is(err, GetAPIError(ErrNoSuchKey)) {
|
|
||||||
_ = err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTransformS3Errors(t *testing.T) {
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
err error
|
|
||||||
expected ErrorCode
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "simple std error to internal error",
|
|
||||||
err: errors.New("some error"),
|
|
||||||
expected: ErrInternalError,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "layer access denied error to s3 access denied error",
|
|
||||||
err: frostfs.ErrAccessDenied,
|
|
||||||
expected: ErrAccessDenied,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "wrapped layer access denied error to s3 access denied error",
|
|
||||||
err: fmt.Errorf("wrap: %w", frostfs.ErrAccessDenied),
|
|
||||||
expected: ErrAccessDenied,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "layer node access denied error to s3 access denied error",
|
|
||||||
err: tree.ErrNodeAccessDenied,
|
|
||||||
expected: ErrAccessDenied,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "layer gateway timeout error to s3 gateway timeout error",
|
|
||||||
err: frostfs.ErrGatewayTimeout,
|
|
||||||
expected: ErrGatewayTimeout,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "s3 error to s3 error",
|
|
||||||
err: GetAPIError(ErrInvalidPart),
|
|
||||||
expected: ErrInvalidPart,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "wrapped s3 error to s3 error",
|
|
||||||
err: fmt.Errorf("wrap: %w", GetAPIError(ErrInvalidPart)),
|
|
||||||
expected: ErrInvalidPart,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
err := TransformToS3Error(tc.err)
|
|
||||||
s3err, ok := err.(Error)
|
|
||||||
require.True(t, ok, "error must be s3 error")
|
|
||||||
require.Equalf(t, tc.expected, s3err.ErrCode,
|
|
||||||
"expected: '%s', got: '%s'",
|
|
||||||
GetAPIError(tc.expected).Code, GetAPIError(s3err.ErrCode).Code)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,440 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
|
||||||
stderrors "errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
engineiam "git.frostfs.info/TrueCloudLab/policy-engine/iam"
|
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
arnAwsPrefix = "arn:aws:s3:::"
|
|
||||||
allUsersGroup = "http://acs.amazonaws.com/groups/global/AllUsers"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AWSACL is aws permission constants.
|
|
||||||
type AWSACL string
|
|
||||||
|
|
||||||
const (
|
|
||||||
aclFullControl AWSACL = "FULL_CONTROL"
|
|
||||||
aclWrite AWSACL = "WRITE"
|
|
||||||
aclRead AWSACL = "READ"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GranteeType is aws grantee permission type constants.
|
|
||||||
type GranteeType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
acpCanonicalUser GranteeType = "CanonicalUser"
|
|
||||||
acpGroup GranteeType = "Group"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (h *handler) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.GetBucketACL")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "couldn't get bucket settings", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, h.encodeBucketCannedACL(ctx, bktInfo, settings)); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) encodeBucketCannedACL(ctx context.Context, bktInfo *data.BucketInfo, settings *data.BucketSettings) *AccessControlPolicy {
|
|
||||||
res := h.encodePrivateCannedACL(ctx, bktInfo, settings)
|
|
||||||
|
|
||||||
switch settings.CannedACL {
|
|
||||||
case basicACLPublic:
|
|
||||||
grantee := NewGrantee(acpGroup)
|
|
||||||
grantee.URI = allUsersGroup
|
|
||||||
|
|
||||||
res.AccessControlList = append(res.AccessControlList, &Grant{
|
|
||||||
Grantee: grantee,
|
|
||||||
Permission: aclWrite,
|
|
||||||
})
|
|
||||||
fallthrough
|
|
||||||
case basicACLReadOnly:
|
|
||||||
grantee := NewGrantee(acpGroup)
|
|
||||||
grantee.URI = allUsersGroup
|
|
||||||
|
|
||||||
res.AccessControlList = append(res.AccessControlList, &Grant{
|
|
||||||
Grantee: grantee,
|
|
||||||
Permission: aclRead,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) encodePrivateCannedACL(ctx context.Context, bktInfo *data.BucketInfo, settings *data.BucketSettings) *AccessControlPolicy {
|
|
||||||
ownerDisplayName := bktInfo.Owner.EncodeToString()
|
|
||||||
ownerEncodedID := ownerDisplayName
|
|
||||||
|
|
||||||
if settings.OwnerKey == nil {
|
|
||||||
h.reqLogger(ctx).Warn(logs.BucketOwnerKeyIsMissing, zap.String("owner", bktInfo.Owner.String()), logs.TagField(logs.TagDatapath))
|
|
||||||
} else {
|
|
||||||
ownerDisplayName = settings.OwnerKey.Address()
|
|
||||||
ownerEncodedID = hex.EncodeToString(settings.OwnerKey.Bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
res := &AccessControlPolicy{Owner: Owner{
|
|
||||||
ID: ownerEncodedID,
|
|
||||||
DisplayName: ownerDisplayName,
|
|
||||||
}}
|
|
||||||
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func getTokenIssuerKey(box *accessbox.Box) (*keys.PublicKey, error) {
|
|
||||||
if box.Gate.BearerToken == nil {
|
|
||||||
return nil, stderrors.New("bearer token is missing")
|
|
||||||
}
|
|
||||||
|
|
||||||
key, err := keys.NewPublicKeyFromBytes(box.Gate.BearerToken.SigningKeyBytes(), elliptic.P256())
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("public key from bytes: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return key, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.PutBucketACL")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "couldn't get bucket settings", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
h.putBucketACLAPEHandler(w, r, reqInfo, bktInfo, settings)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) putBucketACLAPEHandler(w http.ResponseWriter, r *http.Request, reqInfo *middleware.ReqInfo, bktInfo *data.BucketInfo, settings *data.BucketSettings) {
|
|
||||||
ctx := r.Context()
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if errBody := r.Body.Close(); errBody != nil {
|
|
||||||
h.reqLogger(ctx).Warn(logs.CouldNotCloseRequestBody, zap.Error(errBody), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
written, err := io.Copy(io.Discard, r.Body)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "couldn't read request body", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if written != 0 || len(r.Header.Get(api.AmzACL)) == 0 {
|
|
||||||
h.logAndSendError(ctx, w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cannedACL, err := parseCannedACL(r.Header)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not parse canned ACL", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
chainRules := bucketCannedACLToAPERules(cannedACL, reqInfo, bktInfo.CID)
|
|
||||||
if err = h.ape.SaveACLChains(bktInfo.CID.EncodeToString(), chainRules); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "failed to add morph rule chains", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
settings.CannedACL = cannedACL
|
|
||||||
|
|
||||||
sp := &layer.PutSettingsParams{
|
|
||||||
BktInfo: bktInfo,
|
|
||||||
Settings: settings,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = h.obj.PutBucketSettings(ctx, sp); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "couldn't save bucket settings", reqInfo, err,
|
|
||||||
zap.String("container_id", bktInfo.CID.EncodeToString()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.GetObjectACL")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "couldn't get bucket settings", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, h.encodePrivateCannedACL(ctx, bktInfo, settings)); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.PutObjectACL")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
if _, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
h.logAndSendError(ctx, w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) GetBucketPolicyStatusHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.GetBucketPolicyStatus")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
jsonPolicy, err := h.ape.GetBucketPolicy(reqInfo.Namespace, bktInfo.CID)
|
|
||||||
if err != nil {
|
|
||||||
if strings.Contains(err.Error(), "not found") {
|
|
||||||
err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucketPolicy), err.Error())
|
|
||||||
}
|
|
||||||
h.logAndSendError(ctx, w, "failed to get policy from storage", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var bktPolicy engineiam.Policy
|
|
||||||
if err = json.Unmarshal(jsonPolicy, &bktPolicy); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not parse bucket policy", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
policyStatus := &PolicyStatus{
|
|
||||||
IsPublic: PolicyStatusIsPublicFalse,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, st := range bktPolicy.Statement {
|
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html#access-control-block-public-access-policy-status
|
|
||||||
if _, ok := st.Principal[engineiam.Wildcard]; ok {
|
|
||||||
policyStatus.IsPublic = PolicyStatusIsPublicTrue
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, policyStatus); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "encode and write response", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.GetBucketPolicy")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
jsonPolicy, err := h.ape.GetBucketPolicy(reqInfo.Namespace, bktInfo.CID)
|
|
||||||
if err != nil {
|
|
||||||
if strings.Contains(err.Error(), "not found") {
|
|
||||||
err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucketPolicy), err.Error())
|
|
||||||
}
|
|
||||||
h.logAndSendError(ctx, w, "failed to get policy from storage", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set(api.ContentType, "application/json")
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
|
|
||||||
if _, err = w.Write(jsonPolicy); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "write json policy to client", reqInfo, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.DeleteBucketPolicy")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
chainIDs := []chain.ID{getBucketChainID(chain.S3, bktInfo), getBucketChainID(chain.Ingress, bktInfo)}
|
|
||||||
if err = h.ape.DeleteBucketPolicy(reqInfo.Namespace, bktInfo.CID, chainIDs); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "failed to delete policy from storage", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkOwner(info *data.BucketInfo, owner string) error {
|
|
||||||
if owner == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// may need to convert owner to appropriate format
|
|
||||||
if info.Owner.String() != owner {
|
|
||||||
return fmt.Errorf("%w: mismatch owner", errors.GetAPIError(errors.ErrAccessDenied))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.PutBucketPolicy")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
jsonPolicy, err := io.ReadAll(r.Body)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "read body", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var bktPolicy engineiam.Policy
|
|
||||||
if err = json.Unmarshal(jsonPolicy, &bktPolicy); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not parse bucket policy", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, stat := range bktPolicy.Statement {
|
|
||||||
if len(stat.NotResource) != 0 {
|
|
||||||
h.logAndSendError(ctx, w, "policy resource mismatched bucket", reqInfo, errors.GetAPIError(errors.ErrMalformedPolicy))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(stat.NotPrincipal) != 0 && stat.Effect == engineiam.AllowEffect {
|
|
||||||
h.logAndSendError(ctx, w, "invalid NotPrincipal", reqInfo, errors.GetAPIError(errors.ErrMalformedPolicyNotPrincipal))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, resource := range stat.Resource {
|
|
||||||
if reqInfo.BucketName != strings.Split(strings.TrimPrefix(resource, arnAwsPrefix), "/")[0] {
|
|
||||||
h.logAndSendError(ctx, w, "policy resource mismatched bucket", reqInfo, errors.GetAPIError(errors.ErrMalformedPolicy))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s3Chain, err := engineiam.ConvertToS3Chain(bktPolicy, h.frostfsid)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not convert s3 policy to chain policy", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s3Chain.ID = getBucketChainID(chain.S3, bktInfo)
|
|
||||||
|
|
||||||
nativeChain, err := engineiam.ConvertToNativeChain(bktPolicy, h.nativeResolver(reqInfo.Namespace, bktInfo))
|
|
||||||
if err == nil {
|
|
||||||
nativeChain.ID = getBucketChainID(chain.Ingress, bktInfo)
|
|
||||||
} else if !stderrors.Is(err, engineiam.ErrActionsNotApplicable) {
|
|
||||||
h.logAndSendError(ctx, w, "could not convert s3 policy to native chain policy", reqInfo, err)
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
h.reqLogger(ctx).Warn(logs.PolicyCouldntBeConvertedToNativeRules, logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
|
|
||||||
chainsToSave := []*chain.Chain{s3Chain}
|
|
||||||
if nativeChain != nil {
|
|
||||||
chainsToSave = append(chainsToSave, nativeChain)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = h.ape.PutBucketPolicy(reqInfo.Namespace, bktInfo.CID, jsonPolicy, chainsToSave); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "failed to update policy in contract", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type nativeResolver struct {
|
|
||||||
FrostFSID
|
|
||||||
namespace string
|
|
||||||
bktInfo *data.BucketInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *nativeResolver) GetBucketInfo(bucket string) (*engineiam.BucketInfo, error) {
|
|
||||||
if n.bktInfo.Name != bucket {
|
|
||||||
return nil, fmt.Errorf("invalid bucket %s: %w", bucket, errors.GetAPIError(errors.ErrMalformedPolicy))
|
|
||||||
}
|
|
||||||
|
|
||||||
return &engineiam.BucketInfo{Namespace: n.namespace, Container: n.bktInfo.CID.EncodeToString()}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) nativeResolver(ns string, bktInfo *data.BucketInfo) engineiam.NativeResolver {
|
|
||||||
return &nativeResolver{
|
|
||||||
FrostFSID: h.frostfsid,
|
|
||||||
namespace: ns,
|
|
||||||
bktInfo: bktInfo,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBucketChainID(prefix chain.Name, bktInfo *data.BucketInfo) chain.ID {
|
|
||||||
return chain.ID(string(prefix) + ":bkt" + string(bktInfo.CID[:]))
|
|
||||||
}
|
|
|
@ -1,445 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/xml"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"net/url"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
|
||||||
engineiam "git.frostfs.info/TrueCloudLab/policy-engine/iam"
|
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestPutObjectACLErrorAPE(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
bktName, objName := "bucket-for-acl-ape", "object"
|
|
||||||
|
|
||||||
info := createBucket(hc, bktName)
|
|
||||||
|
|
||||||
putObjectWithHeadersAssertS3Error(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPublic}, apierr.ErrAccessControlListNotSupported)
|
|
||||||
putObjectWithHeaders(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPrivate}) // only `private` canned acl is allowed, that is actually ignored
|
|
||||||
putObjectWithHeaders(hc, bktName, objName, nil)
|
|
||||||
|
|
||||||
aclBody := &AccessControlPolicy{}
|
|
||||||
putObjectACLAssertS3Error(hc, bktName, objName, info.Box, nil, aclBody, apierr.ErrAccessControlListNotSupported)
|
|
||||||
|
|
||||||
aclRes := getObjectACL(hc, bktName, objName)
|
|
||||||
checkPrivateACL(t, aclRes, info.Key.PublicKey())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateObjectACLErrorAPE(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
bktName, objName, objNameCopy := "bucket-for-acl-ape", "object", "copy"
|
|
||||||
|
|
||||||
createBucket(hc, bktName)
|
|
||||||
|
|
||||||
putObject(hc, bktName, objName)
|
|
||||||
copyObject(hc, bktName, objName, objNameCopy, CopyMeta{Headers: map[string]string{api.AmzACL: basicACLPublic}}, http.StatusBadRequest)
|
|
||||||
copyObject(hc, bktName, objName, objNameCopy, CopyMeta{Headers: map[string]string{api.AmzACL: basicACLPrivate}}, http.StatusOK)
|
|
||||||
|
|
||||||
createMultipartUploadAssertS3Error(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPublic}, apierr.ErrAccessControlListNotSupported)
|
|
||||||
createMultipartUpload(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPrivate})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBucketACLAPE(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
bktName := "bucket-for-acl-ape"
|
|
||||||
|
|
||||||
info := createBucket(hc, bktName)
|
|
||||||
|
|
||||||
aclBody := &AccessControlPolicy{}
|
|
||||||
putBucketACLAssertS3Error(hc, bktName, info.Box, nil, aclBody, apierr.ErrAccessControlListNotSupported)
|
|
||||||
|
|
||||||
aclRes := getBucketACL(hc, bktName)
|
|
||||||
checkPrivateACL(t, aclRes, info.Key.PublicKey())
|
|
||||||
|
|
||||||
putBucketACL(hc, bktName, info.Box, map[string]string{api.AmzACL: basicACLPrivate})
|
|
||||||
aclRes = getBucketACL(hc, bktName)
|
|
||||||
checkPrivateACL(t, aclRes, info.Key.PublicKey())
|
|
||||||
|
|
||||||
putBucketACL(hc, bktName, info.Box, map[string]string{api.AmzACL: basicACLReadOnly})
|
|
||||||
aclRes = getBucketACL(hc, bktName)
|
|
||||||
checkPublicReadACL(t, aclRes, info.Key.PublicKey())
|
|
||||||
|
|
||||||
putBucketACL(hc, bktName, info.Box, map[string]string{api.AmzACL: basicACLPublic})
|
|
||||||
aclRes = getBucketACL(hc, bktName)
|
|
||||||
checkPublicReadWriteACL(t, aclRes, info.Key.PublicKey())
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkPrivateACL(t *testing.T, aclRes *AccessControlPolicy, ownerKey *keys.PublicKey) {
|
|
||||||
checkACLOwner(t, aclRes, ownerKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkPublicReadACL(t *testing.T, aclRes *AccessControlPolicy, ownerKey *keys.PublicKey) {
|
|
||||||
checkACLOwner(t, aclRes, ownerKey)
|
|
||||||
|
|
||||||
require.Equal(t, allUsersGroup, aclRes.AccessControlList[0].Grantee.URI)
|
|
||||||
require.Equal(t, aclRead, aclRes.AccessControlList[0].Permission)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkPublicReadWriteACL(t *testing.T, aclRes *AccessControlPolicy, ownerKey *keys.PublicKey) {
|
|
||||||
checkACLOwner(t, aclRes, ownerKey)
|
|
||||||
|
|
||||||
require.Equal(t, allUsersGroup, aclRes.AccessControlList[0].Grantee.URI)
|
|
||||||
require.Equal(t, aclWrite, aclRes.AccessControlList[0].Permission)
|
|
||||||
|
|
||||||
require.Equal(t, allUsersGroup, aclRes.AccessControlList[1].Grantee.URI)
|
|
||||||
require.Equal(t, aclRead, aclRes.AccessControlList[1].Permission)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkACLOwner(t *testing.T, aclRes *AccessControlPolicy, ownerKey *keys.PublicKey) {
|
|
||||||
ownerIDStr := hex.EncodeToString(ownerKey.Bytes())
|
|
||||||
ownerNameStr := ownerKey.Address()
|
|
||||||
|
|
||||||
require.Equal(t, ownerIDStr, aclRes.Owner.ID)
|
|
||||||
require.Equal(t, ownerNameStr, aclRes.Owner.DisplayName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBucketPolicy(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
bktName := "bucket-for-policy"
|
|
||||||
|
|
||||||
createTestBucket(hc, bktName)
|
|
||||||
|
|
||||||
getBucketPolicy(hc, bktName, apierr.ErrNoSuchBucketPolicy)
|
|
||||||
|
|
||||||
newPolicy := engineiam.Policy{
|
|
||||||
Version: "2012-10-17",
|
|
||||||
Statement: []engineiam.Statement{{
|
|
||||||
Principal: map[engineiam.PrincipalType][]string{engineiam.Wildcard: {}},
|
|
||||||
Effect: engineiam.DenyEffect,
|
|
||||||
Action: engineiam.Action{"s3:PutObject"},
|
|
||||||
Resource: engineiam.Resource{"arn:aws:s3:::test/*"},
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
|
|
||||||
putBucketPolicy(hc, bktName, newPolicy, apierr.ErrMalformedPolicy)
|
|
||||||
|
|
||||||
newPolicy.Statement[0].Resource[0] = arnAwsPrefix + bktName + "/*"
|
|
||||||
putBucketPolicy(hc, bktName, newPolicy)
|
|
||||||
|
|
||||||
bktPolicy := getBucketPolicy(hc, bktName)
|
|
||||||
require.Equal(t, newPolicy, bktPolicy)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBucketPolicyStatus(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
bktName := "bucket-for-policy"
|
|
||||||
|
|
||||||
createTestBucket(hc, bktName)
|
|
||||||
|
|
||||||
getBucketPolicy(hc, bktName, apierr.ErrNoSuchBucketPolicy)
|
|
||||||
|
|
||||||
newPolicy := engineiam.Policy{
|
|
||||||
Version: "2012-10-17",
|
|
||||||
Statement: []engineiam.Statement{{
|
|
||||||
NotPrincipal: engineiam.Principal{engineiam.Wildcard: {}},
|
|
||||||
Effect: engineiam.AllowEffect,
|
|
||||||
Action: engineiam.Action{"s3:PutObject"},
|
|
||||||
Resource: engineiam.Resource{arnAwsPrefix + bktName + "/*"},
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
|
|
||||||
putBucketPolicy(hc, bktName, newPolicy, apierr.ErrMalformedPolicyNotPrincipal)
|
|
||||||
|
|
||||||
newPolicy.Statement[0].NotPrincipal = nil
|
|
||||||
newPolicy.Statement[0].Principal = map[engineiam.PrincipalType][]string{engineiam.Wildcard: {}}
|
|
||||||
putBucketPolicy(hc, bktName, newPolicy)
|
|
||||||
bktPolicyStatus := getBucketPolicyStatus(hc, bktName)
|
|
||||||
require.True(t, PolicyStatusIsPublicTrue == bktPolicyStatus.IsPublic)
|
|
||||||
|
|
||||||
key, err := keys.NewPrivateKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
hc.Handler().frostfsid.(*frostfsidMock).data["devenv"] = key.PublicKey()
|
|
||||||
|
|
||||||
newPolicy.Statement[0].Principal = map[engineiam.PrincipalType][]string{engineiam.AWSPrincipalType: {"arn:aws:iam:::user/devenv"}}
|
|
||||||
putBucketPolicy(hc, bktName, newPolicy)
|
|
||||||
bktPolicyStatus = getBucketPolicyStatus(hc, bktName)
|
|
||||||
require.True(t, PolicyStatusIsPublicFalse == bktPolicyStatus.IsPublic)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteBucketWithPolicy(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName := "bucket-for-policy"
|
|
||||||
bi := createTestBucket(hc, bktName)
|
|
||||||
|
|
||||||
newPolicy := engineiam.Policy{
|
|
||||||
Version: "2012-10-17",
|
|
||||||
Statement: []engineiam.Statement{{
|
|
||||||
Principal: map[engineiam.PrincipalType][]string{engineiam.Wildcard: {}},
|
|
||||||
Effect: engineiam.AllowEffect,
|
|
||||||
Action: engineiam.Action{"s3:PutObject"},
|
|
||||||
Resource: engineiam.Resource{"arn:aws:s3:::bucket-for-policy/*"},
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
|
|
||||||
putBucketPolicy(hc, bktName, newPolicy)
|
|
||||||
|
|
||||||
require.Len(t, hc.h.ape.(*apeMock).policyMap, 1)
|
|
||||||
require.Len(t, hc.h.ape.(*apeMock).chainMap[engine.ContainerTarget(bi.CID.EncodeToString())], 4)
|
|
||||||
|
|
||||||
hc.owner = bi.Owner
|
|
||||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
|
||||||
|
|
||||||
require.Empty(t, hc.h.ape.(*apeMock).policyMap)
|
|
||||||
chains, err := hc.h.ape.(*apeMock).ListChains(engine.ContainerTarget(bi.CID.EncodeToString()))
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Empty(t, chains)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPutBucketPolicy(t *testing.T) {
|
|
||||||
bktPolicy := `
|
|
||||||
{
|
|
||||||
"Version": "2012-10-17",
|
|
||||||
"Statement": [{
|
|
||||||
"Principal": "*",
|
|
||||||
"Effect": "Deny",
|
|
||||||
"Action": "s3:GetObject",
|
|
||||||
"Resource": "arn:aws:s3:::bucket-for-policy/*"
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
`
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
bktName := "bucket-for-policy"
|
|
||||||
|
|
||||||
createTestBucket(hc, bktName)
|
|
||||||
|
|
||||||
w, r := prepareTestPayloadRequest(hc, bktName, "", bytes.NewReader([]byte(bktPolicy)))
|
|
||||||
hc.Handler().PutBucketPolicyHandler(w, r)
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBucketPolicy(hc *handlerContext, bktName string, errCode ...apierr.ErrorCode) engineiam.Policy {
|
|
||||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
|
||||||
hc.Handler().GetBucketPolicyHandler(w, r)
|
|
||||||
|
|
||||||
var policy engineiam.Policy
|
|
||||||
if len(errCode) == 0 {
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
err := json.NewDecoder(w.Result().Body).Decode(&policy)
|
|
||||||
require.NoError(hc.t, err)
|
|
||||||
} else {
|
|
||||||
assertS3Error(hc.t, w, apierr.GetAPIError(errCode[0]))
|
|
||||||
}
|
|
||||||
|
|
||||||
return policy
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBucketPolicyStatus(hc *handlerContext, bktName string, errCode ...apierr.ErrorCode) PolicyStatus {
|
|
||||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
|
||||||
hc.Handler().GetBucketPolicyStatusHandler(w, r)
|
|
||||||
|
|
||||||
var policyStatus PolicyStatus
|
|
||||||
if len(errCode) == 0 {
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
err := xml.NewDecoder(w.Result().Body).Decode(&policyStatus)
|
|
||||||
require.NoError(hc.t, err)
|
|
||||||
} else {
|
|
||||||
assertS3Error(hc.t, w, apierr.GetAPIError(errCode[0]))
|
|
||||||
}
|
|
||||||
|
|
||||||
return policyStatus
|
|
||||||
}
|
|
||||||
|
|
||||||
func putBucketPolicy(hc *handlerContext, bktName string, bktPolicy engineiam.Policy, errCode ...apierr.ErrorCode) {
|
|
||||||
body, err := json.Marshal(bktPolicy)
|
|
||||||
require.NoError(hc.t, err)
|
|
||||||
|
|
||||||
w, r := prepareTestPayloadRequest(hc, bktName, "", bytes.NewReader(body))
|
|
||||||
hc.Handler().PutBucketPolicyHandler(w, r)
|
|
||||||
|
|
||||||
if len(errCode) == 0 {
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
} else {
|
|
||||||
assertS3Error(hc.t, w, apierr.GetAPIError(errCode[0]))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func createAccessBox(t *testing.T) (*accessbox.Box, *keys.PrivateKey) {
|
|
||||||
key, err := keys.NewPrivateKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var bearerToken bearer.Token
|
|
||||||
err = bearerToken.Sign(key.PrivateKey)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
tok := new(session.Container)
|
|
||||||
tok.ForVerb(session.VerbContainerPut)
|
|
||||||
err = tok.Sign(key.PrivateKey)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
box := &accessbox.Box{
|
|
||||||
Gate: &accessbox.GateData{
|
|
||||||
SessionTokens: []*session.Container{tok},
|
|
||||||
BearerToken: &bearerToken,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
return box, key
|
|
||||||
}
|
|
||||||
|
|
||||||
type createBucketInfo struct {
|
|
||||||
BktInfo *data.BucketInfo
|
|
||||||
Box *accessbox.Box
|
|
||||||
Key *keys.PrivateKey
|
|
||||||
}
|
|
||||||
|
|
||||||
type bucketPrm struct {
|
|
||||||
bktName string
|
|
||||||
query url.Values
|
|
||||||
box *accessbox.Box
|
|
||||||
createParams createBucketParams
|
|
||||||
}
|
|
||||||
|
|
||||||
func createBucket(hc *handlerContext, bktName string) *createBucketInfo {
|
|
||||||
box, key := createAccessBox(hc.t)
|
|
||||||
|
|
||||||
w := createBucketBase(hc, bucketPrm{bktName: bktName, box: box})
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
|
|
||||||
bktInfo, err := hc.Layer().GetBucketInfo(hc.Context(), bktName)
|
|
||||||
require.NoError(hc.t, err)
|
|
||||||
|
|
||||||
return &createBucketInfo{
|
|
||||||
BktInfo: bktInfo,
|
|
||||||
Box: box,
|
|
||||||
Key: key,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func createBucketAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, code apierr.ErrorCode) {
|
|
||||||
w := createBucketBase(hc, bucketPrm{bktName: bktName, box: box})
|
|
||||||
assertS3Error(hc.t, w, apierr.GetAPIError(code))
|
|
||||||
}
|
|
||||||
|
|
||||||
func createBucketWithConstraint(hc *handlerContext, bktName, constraint string) *createBucketInfo {
|
|
||||||
box, key := createAccessBox(hc.t)
|
|
||||||
var prm createBucketParams
|
|
||||||
if constraint != "" {
|
|
||||||
prm.LocationConstraint = constraint
|
|
||||||
}
|
|
||||||
w := createBucketBase(hc, bucketPrm{bktName: bktName, box: box, createParams: prm})
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
|
|
||||||
bktInfo, err := hc.Layer().GetBucketInfo(hc.Context(), bktName)
|
|
||||||
require.NoError(hc.t, err)
|
|
||||||
|
|
||||||
return &createBucketInfo{
|
|
||||||
BktInfo: bktInfo,
|
|
||||||
Box: box,
|
|
||||||
Key: key,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func createBucketBase(hc *handlerContext, prm bucketPrm) *httptest.ResponseRecorder {
|
|
||||||
w, r := prepareTestFullRequest(hc, prm.bktName, "", nil, prm.createParams)
|
|
||||||
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: prm.box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
hc.Handler().CreateBucketHandler(w, r)
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
func putBucketACL(hc *handlerContext, bktName string, box *accessbox.Box, header map[string]string) {
|
|
||||||
w := putBucketACLBase(hc, bktName, box, header, nil)
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func putBucketACLAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy, code apierr.ErrorCode) {
|
|
||||||
w := putBucketACLBase(hc, bktName, box, header, body)
|
|
||||||
assertS3Error(hc.t, w, apierr.GetAPIError(code))
|
|
||||||
}
|
|
||||||
|
|
||||||
func putBucketACLBase(hc *handlerContext, bktName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy) *httptest.ResponseRecorder {
|
|
||||||
w, r := prepareTestRequest(hc, bktName, "", body)
|
|
||||||
for key, val := range header {
|
|
||||||
r.Header.Set(key, val)
|
|
||||||
}
|
|
||||||
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
hc.Handler().PutBucketACLHandler(w, r)
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBucketACL(hc *handlerContext, bktName string) *AccessControlPolicy {
|
|
||||||
w := getBucketACLBase(hc, bktName)
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
res := &AccessControlPolicy{}
|
|
||||||
parseTestResponse(hc.t, w, res)
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBucketACLBase(hc *handlerContext, bktName string) *httptest.ResponseRecorder {
|
|
||||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
|
||||||
hc.Handler().GetBucketACLHandler(w, r)
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
func putObjectACLAssertS3Error(hc *handlerContext, bktName, objName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy, code apierr.ErrorCode) {
|
|
||||||
w := putObjectACLBase(hc, bktName, objName, box, header, body)
|
|
||||||
assertS3Error(hc.t, w, apierr.GetAPIError(code))
|
|
||||||
}
|
|
||||||
|
|
||||||
func putObjectACLBase(hc *handlerContext, bktName, objName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy) *httptest.ResponseRecorder {
|
|
||||||
w, r := prepareTestRequest(hc, bktName, objName, body)
|
|
||||||
for key, val := range header {
|
|
||||||
r.Header.Set(key, val)
|
|
||||||
}
|
|
||||||
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
hc.Handler().PutObjectACLHandler(w, r)
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
func getObjectACL(hc *handlerContext, bktName, objName string) *AccessControlPolicy {
|
|
||||||
w := getObjectACLBase(hc, bktName, objName)
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
res := &AccessControlPolicy{}
|
|
||||||
parseTestResponse(hc.t, w, res)
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func getObjectACLBase(hc *handlerContext, bktName, objName string) *httptest.ResponseRecorder {
|
|
||||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
|
||||||
hc.Handler().GetObjectACLHandler(w, r)
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
func putObjectWithHeaders(hc *handlerContext, bktName, objName string, headers map[string]string) http.Header {
|
|
||||||
w := putObjectWithHeadersBase(hc, bktName, objName, headers, nil, nil)
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
return w.Header()
|
|
||||||
}
|
|
||||||
|
|
||||||
func putObjectWithHeadersAssertS3Error(hc *handlerContext, bktName, objName string, headers map[string]string, code apierr.ErrorCode) {
|
|
||||||
w := putObjectWithHeadersBase(hc, bktName, objName, headers, nil, nil)
|
|
||||||
assertS3Error(hc.t, w, apierr.GetAPIError(code))
|
|
||||||
}
|
|
||||||
|
|
||||||
func putObjectWithHeadersBase(hc *handlerContext, bktName, objName string, headers map[string]string, box *accessbox.Box, data []byte) *httptest.ResponseRecorder {
|
|
||||||
body := bytes.NewReader(data)
|
|
||||||
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
|
|
||||||
|
|
||||||
for k, v := range headers {
|
|
||||||
r.Header.Set(k, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
|
|
||||||
hc.Handler().PutObjectHandler(w, r)
|
|
||||||
return w
|
|
||||||
}
|
|
|
@ -1,132 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
handler struct {
|
|
||||||
log *zap.Logger
|
|
||||||
obj *layer.Layer
|
|
||||||
cfg Config
|
|
||||||
ape APE
|
|
||||||
frostfsid FrostFSID
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config contains data which handler needs to keep.
|
|
||||||
Config interface {
|
|
||||||
DefaultPlacementPolicy(namespace string) netmap.PlacementPolicy
|
|
||||||
PlacementPolicy(namespace, constraint string) (netmap.PlacementPolicy, bool)
|
|
||||||
CopiesNumbers(namespace, constraint string) ([]uint32, bool)
|
|
||||||
DefaultCopiesNumbers(namespace string) []uint32
|
|
||||||
NewXMLDecoder(reader io.Reader, agent string) *xml.Decoder
|
|
||||||
DefaultMaxAge() int
|
|
||||||
ResolveZoneList() []string
|
|
||||||
IsResolveListAllow() bool
|
|
||||||
BypassContentEncodingInChunks(agent string) bool
|
|
||||||
MD5Enabled() bool
|
|
||||||
RetryMaxAttempts() int
|
|
||||||
RetryMaxBackoff() time.Duration
|
|
||||||
RetryStrategy() RetryStrategy
|
|
||||||
TLSTerminationHeader() string
|
|
||||||
ListingKeepaliveThrottle() time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
FrostFSID interface {
|
|
||||||
GetUserAddress(account, user string) (string, error)
|
|
||||||
GetUserKey(account, name string) (string, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// APE is Access Policy Engine that needs to save policy and acl info to different places.
|
|
||||||
APE interface {
|
|
||||||
PutBucketPolicy(ns string, cnrID cid.ID, policy []byte, chains []*chain.Chain) error
|
|
||||||
DeleteBucketPolicy(ns string, cnrID cid.ID, chainIDs []chain.ID) error
|
|
||||||
GetBucketPolicy(ns string, cnrID cid.ID) ([]byte, error)
|
|
||||||
SaveACLChains(cid string, chains []*chain.Chain) error
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
type RetryStrategy string
|
|
||||||
|
|
||||||
const (
|
|
||||||
RetryStrategyExponential = "exponential"
|
|
||||||
RetryStrategyConstant = "constant"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ api.Handler = (*handler)(nil)
|
|
||||||
|
|
||||||
// New creates new api.Handler using given logger and client.
|
|
||||||
func New(log *zap.Logger, obj *layer.Layer, cfg Config, storage APE, ffsid FrostFSID) (api.Handler, error) {
|
|
||||||
switch {
|
|
||||||
case obj == nil:
|
|
||||||
return nil, errors.New("empty FrostFS Object Layer")
|
|
||||||
case log == nil:
|
|
||||||
return nil, errors.New("empty logger")
|
|
||||||
case storage == nil:
|
|
||||||
return nil, errors.New("empty policy storage")
|
|
||||||
case ffsid == nil:
|
|
||||||
return nil, errors.New("empty frostfsid")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &handler{
|
|
||||||
log: log,
|
|
||||||
obj: obj,
|
|
||||||
cfg: cfg,
|
|
||||||
ape: storage,
|
|
||||||
frostfsid: ffsid,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// pickCopiesNumbers chooses the return values following this logic:
|
|
||||||
// 1) array of copies numbers sent in request's header has the highest priority.
|
|
||||||
// 2) array of copies numbers with corresponding location constraint provided in the config file.
|
|
||||||
// 3) default copies number from the config file wrapped into array.
|
|
||||||
func (h *handler) pickCopiesNumbers(metadata map[string]string, namespace, locationConstraint string) ([]uint32, error) {
|
|
||||||
copiesNumbersStr, ok := metadata[layer.AttributeFrostfsCopiesNumber]
|
|
||||||
if ok {
|
|
||||||
result, err := parseCopiesNumbers(copiesNumbersStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
copiesNumbers, ok := h.cfg.CopiesNumbers(namespace, locationConstraint)
|
|
||||||
if ok {
|
|
||||||
return copiesNumbers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return h.cfg.DefaultCopiesNumbers(namespace), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseCopiesNumbers(copiesNumbersStr string) ([]uint32, error) {
|
|
||||||
var result []uint32
|
|
||||||
copiesNumbersSplit := strings.Split(copiesNumbersStr, ",")
|
|
||||||
|
|
||||||
for i := range copiesNumbersSplit {
|
|
||||||
item := strings.ReplaceAll(copiesNumbersSplit[i], " ", "")
|
|
||||||
if len(item) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
copiesNumber, err := strconv.ParseUint(item, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("pasrse copies number: %w", err)
|
|
||||||
}
|
|
||||||
result = append(result, uint32(copiesNumber))
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
|
@ -1,69 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCopiesNumberPicker(t *testing.T) {
|
|
||||||
var locationConstraints = map[string][]uint32{}
|
|
||||||
locationConstraint1 := "one"
|
|
||||||
locationConstraint2 := "two"
|
|
||||||
locationConstraints[locationConstraint1] = []uint32{2, 3, 4}
|
|
||||||
|
|
||||||
config := &configMock{
|
|
||||||
copiesNumbers: locationConstraints,
|
|
||||||
defaultCopiesNumbers: []uint32{1},
|
|
||||||
}
|
|
||||||
h := handler{
|
|
||||||
cfg: config,
|
|
||||||
}
|
|
||||||
|
|
||||||
metadata := map[string]string{}
|
|
||||||
|
|
||||||
t.Run("pick default copies number", func(t *testing.T) {
|
|
||||||
metadata["somekey1"] = "5, 6, 7"
|
|
||||||
expectedCopiesNumbers := []uint32{1}
|
|
||||||
|
|
||||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("pick copies number vector according to location constraint", func(t *testing.T) {
|
|
||||||
metadata["somekey2"] = "6, 7, 8"
|
|
||||||
expectedCopiesNumbers := []uint32{2, 3, 4}
|
|
||||||
|
|
||||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("pick copies number from metadata", func(t *testing.T) {
|
|
||||||
metadata["frostfs-copies-number"] = "7, 8, 9"
|
|
||||||
expectedCopiesNumbers := []uint32{7, 8, 9}
|
|
||||||
|
|
||||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("pick copies number from metadata with no space", func(t *testing.T) {
|
|
||||||
metadata["frostfs-copies-number"] = "7,8,9"
|
|
||||||
expectedCopiesNumbers := []uint32{7, 8, 9}
|
|
||||||
|
|
||||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("pick copies number from metadata with trailing comma", func(t *testing.T) {
|
|
||||||
metadata["frostfs-copies-number"] = "11, 12, 13, "
|
|
||||||
expectedCopiesNumbers := []uint32{11, 12, 13}
|
|
||||||
|
|
||||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,273 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
GetObjectAttributesResponse struct {
|
|
||||||
ETag string `xml:"ETag,omitempty"`
|
|
||||||
Checksum *Checksum `xml:"Checksum,omitempty"`
|
|
||||||
ObjectSize uint64 `xml:"ObjectSize,omitempty"`
|
|
||||||
StorageClass string `xml:"StorageClass,omitempty"`
|
|
||||||
ObjectParts *ObjectParts `xml:"ObjectParts,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
Checksum struct {
|
|
||||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
ObjectParts struct {
|
|
||||||
IsTruncated bool `xml:"IsTruncated,omitempty"`
|
|
||||||
MaxParts int `xml:"MaxParts,omitempty"`
|
|
||||||
NextPartNumberMarker int `xml:"NextPartNumberMarker,omitempty"`
|
|
||||||
PartNumberMarker int `xml:"PartNumberMarker,omitempty"`
|
|
||||||
Parts []Part `xml:"Part,omitempty"`
|
|
||||||
PartsCount int `xml:"PartsCount,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
Part struct {
|
|
||||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
|
||||||
PartNumber int `xml:"PartNumber,omitempty"`
|
|
||||||
Size int `xml:"Size,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
GetObjectAttributesArgs struct {
|
|
||||||
MaxParts int
|
|
||||||
PartNumberMarker int
|
|
||||||
Attributes []string
|
|
||||||
VersionID string
|
|
||||||
Conditional *conditionalArgs
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
eTag = "ETag"
|
|
||||||
checksum = "Checksum"
|
|
||||||
objectParts = "ObjectParts"
|
|
||||||
storageClass = "StorageClass"
|
|
||||||
objectSize = "ObjectSize"
|
|
||||||
)
|
|
||||||
|
|
||||||
var validAttributes = map[string]struct{}{
|
|
||||||
eTag: {},
|
|
||||||
checksum: {},
|
|
||||||
objectParts: {},
|
|
||||||
storageClass: {},
|
|
||||||
objectSize: {},
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.GetObjectAttributes")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
params, err := parseGetObjectAttributeArgs(r, h.reqLogger(ctx))
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "invalid request", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &layer.HeadObjectParams{
|
|
||||||
BktInfo: bktInfo,
|
|
||||||
Object: reqInfo.ObjectName,
|
|
||||||
VersionID: params.VersionID,
|
|
||||||
}
|
|
||||||
|
|
||||||
extendedInfo, err := h.obj.GetExtendedObjectInfo(ctx, p)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not fetch object info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
info := extendedInfo.ObjectInfo
|
|
||||||
|
|
||||||
encryptionParams, err := h.formEncryptionParams(r)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = checkPreconditions(info, params.Conditional, h.cfg.MD5Enabled()); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "precondition failed", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := encodeToObjectAttributesResponse(info, params, h.cfg.MD5Enabled())
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "couldn't encode object info to response", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
writeAttributesHeaders(w.Header(), extendedInfo, bktSettings.Unversioned())
|
|
||||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeAttributesHeaders(h http.Header, info *data.ExtendedObjectInfo, isBucketUnversioned bool) {
|
|
||||||
h.Set(api.LastModified, info.ObjectInfo.Created.UTC().Format(http.TimeFormat))
|
|
||||||
if !isBucketUnversioned {
|
|
||||||
h.Set(api.AmzVersionID, info.Version())
|
|
||||||
}
|
|
||||||
|
|
||||||
if info.NodeVersion.IsDeleteMarker {
|
|
||||||
h.Set(api.AmzDeleteMarker, strconv.FormatBool(true))
|
|
||||||
}
|
|
||||||
|
|
||||||
// x-amz-request-charged
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseGetObjectAttributeArgs(r *http.Request, log *zap.Logger) (*GetObjectAttributesArgs, error) {
|
|
||||||
res := &GetObjectAttributesArgs{
|
|
||||||
VersionID: r.URL.Query().Get(api.QueryVersionID),
|
|
||||||
}
|
|
||||||
|
|
||||||
attributesVal := r.Header.Get(api.AmzObjectAttributes)
|
|
||||||
if attributesVal == "" {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidAttributeName)
|
|
||||||
}
|
|
||||||
|
|
||||||
attributes := strings.Split(attributesVal, ",")
|
|
||||||
for _, a := range attributes {
|
|
||||||
if _, ok := validAttributes[a]; !ok {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidAttributeName)
|
|
||||||
}
|
|
||||||
res.Attributes = append(res.Attributes, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
maxPartsVal := r.Header.Get(api.AmzMaxParts)
|
|
||||||
if maxPartsVal == "" {
|
|
||||||
res.MaxParts = layer.MaxSizePartsList
|
|
||||||
} else if res.MaxParts, err = strconv.Atoi(maxPartsVal); err != nil || res.MaxParts < 0 {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidMaxKeys)
|
|
||||||
}
|
|
||||||
|
|
||||||
markerVal := r.Header.Get(api.AmzPartNumberMarker)
|
|
||||||
if markerVal != "" {
|
|
||||||
if res.PartNumberMarker, err = strconv.Atoi(markerVal); err != nil || res.PartNumberMarker < 0 {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidPartNumberMarker)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res.Conditional = parseConditionalHeaders(r.Header, log)
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttributesArgs, md5Enabled bool) (*GetObjectAttributesResponse, error) {
|
|
||||||
resp := &GetObjectAttributesResponse{}
|
|
||||||
|
|
||||||
for _, attr := range p.Attributes {
|
|
||||||
switch attr {
|
|
||||||
case eTag:
|
|
||||||
resp.ETag = data.Quote(info.ETag(md5Enabled))
|
|
||||||
case storageClass:
|
|
||||||
resp.StorageClass = api.DefaultStorageClass
|
|
||||||
case objectSize:
|
|
||||||
resp.ObjectSize = info.Size
|
|
||||||
case checksum:
|
|
||||||
checksumBytes, err := hex.DecodeString(info.HashSum)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("form upload attributes: %w", err)
|
|
||||||
}
|
|
||||||
resp.Checksum = &Checksum{ChecksumSHA256: base64.StdEncoding.EncodeToString(checksumBytes)}
|
|
||||||
case objectParts:
|
|
||||||
parts, err := formUploadAttributes(info, p.MaxParts, p.PartNumberMarker)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("form upload attributes: %w", err)
|
|
||||||
}
|
|
||||||
if parts != nil {
|
|
||||||
resp.ObjectParts = parts
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func formUploadAttributes(info *data.ObjectInfo, maxParts, marker int) (*ObjectParts, error) {
|
|
||||||
completedParts, ok := info.Headers[layer.UploadCompletedParts]
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
partInfos := strings.Split(completedParts, ",")
|
|
||||||
parts := make([]Part, len(partInfos))
|
|
||||||
for i, p := range partInfos {
|
|
||||||
part, err := layer.ParseCompletedPartHeader(p)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid completed part: %w", err)
|
|
||||||
}
|
|
||||||
// ETag value contains SHA256 checksum.
|
|
||||||
checksumBytes, err := hex.DecodeString(part.ETag)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid sha256 checksum in completed part: %w", err)
|
|
||||||
}
|
|
||||||
parts[i] = Part{
|
|
||||||
PartNumber: part.PartNumber,
|
|
||||||
Size: int(part.Size),
|
|
||||||
ChecksumSHA256: base64.StdEncoding.EncodeToString(checksumBytes),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res := &ObjectParts{
|
|
||||||
PartsCount: len(parts),
|
|
||||||
}
|
|
||||||
|
|
||||||
if marker != 0 {
|
|
||||||
res.PartNumberMarker = marker
|
|
||||||
var found bool
|
|
||||||
for i, n := range parts {
|
|
||||||
if n.PartNumber == marker {
|
|
||||||
parts = parts[i:]
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidPartNumberMarker)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res.MaxParts = maxParts
|
|
||||||
if len(parts) > maxParts {
|
|
||||||
res.IsTruncated = true
|
|
||||||
res.NextPartNumberMarker = parts[maxParts].PartNumber
|
|
||||||
parts = parts[:maxParts]
|
|
||||||
}
|
|
||||||
|
|
||||||
res.Parts = parts
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
|
@ -1,48 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/hex"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGetObjectPartsAttributes(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName := "bucket-get-attributes"
|
|
||||||
objName, objMultipartName := "object", "object-multipart"
|
|
||||||
partSize := 8
|
|
||||||
|
|
||||||
createTestBucket(hc, bktName)
|
|
||||||
|
|
||||||
putObject(hc, bktName, objName)
|
|
||||||
result := getObjectAttributes(hc, bktName, objName, objectParts)
|
|
||||||
require.Nil(t, result.ObjectParts)
|
|
||||||
|
|
||||||
multipartUpload := createMultipartUpload(hc, bktName, objMultipartName, map[string]string{})
|
|
||||||
etag, _ := uploadPart(hc, bktName, objMultipartName, multipartUpload.UploadID, 1, partSize)
|
|
||||||
completeMultipartUpload(hc, bktName, objMultipartName, multipartUpload.UploadID, []string{etag})
|
|
||||||
etagBytes, err := hex.DecodeString(etag[1 : len(etag)-1])
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
result = getObjectAttributes(hc, bktName, objMultipartName, objectParts)
|
|
||||||
require.NotNil(t, result.ObjectParts)
|
|
||||||
require.Len(t, result.ObjectParts.Parts, 1)
|
|
||||||
require.Equal(t, base64.StdEncoding.EncodeToString(etagBytes), result.ObjectParts.Parts[0].ChecksumSHA256)
|
|
||||||
require.Equal(t, partSize, result.ObjectParts.Parts[0].Size)
|
|
||||||
require.Equal(t, 1, result.ObjectParts.PartsCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getObjectAttributes(hc *handlerContext, bktName, objName string, attrs ...string) *GetObjectAttributesResponse {
|
|
||||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
|
||||||
r.Header.Set(api.AmzObjectAttributes, strings.Join(attrs, ","))
|
|
||||||
hc.Handler().GetObjectAttributesHandler(w, r)
|
|
||||||
result := &GetObjectAttributesResponse{}
|
|
||||||
parseTestResponse(hc.t, w, result)
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
|
@ -1,73 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
)
|
|
||||||
|
|
||||||
const maxBucketList = 10000
|
|
||||||
|
|
||||||
// ListBucketsHandler handles bucket listing requests.
|
|
||||||
func (h *handler) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.ListBuckets")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
params, err := parseListBucketParams(r)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "failed to parse params", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := h.obj.ListBuckets(ctx, params)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, encodeListBuckets(reqInfo.User, resp, params)); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeListBuckets(owner string, resp layer.ListBucketsResult, params layer.ListBucketsParams) *ListBucketsResponse {
|
|
||||||
res := &ListBucketsResponse{
|
|
||||||
Owner: Owner{
|
|
||||||
ID: owner,
|
|
||||||
DisplayName: owner,
|
|
||||||
},
|
|
||||||
ContinuationToken: resp.ContinuationToken,
|
|
||||||
Prefix: params.Prefix,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, item := range resp.Containers {
|
|
||||||
res.Buckets.Buckets = append(res.Buckets.Buckets, Bucket{
|
|
||||||
Name: item.Name,
|
|
||||||
CreationDate: item.Created.UTC().Format(time.RFC3339),
|
|
||||||
BucketRegion: item.LocationConstraint,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseListBucketParams(r *http.Request) (prm layer.ListBucketsParams, err error) {
|
|
||||||
prm.MaxBuckets = maxBucketList
|
|
||||||
strMaxBuckets := r.URL.Query().Get(middleware.QueryMaxBuckets)
|
|
||||||
if strMaxBuckets != "" {
|
|
||||||
if prm.MaxBuckets, err = strconv.Atoi(strMaxBuckets); err != nil || prm.MaxBuckets < 0 {
|
|
||||||
return layer.ListBucketsParams{}, errors.GetAPIError(errors.ErrInvalidMaxKeys)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
prm.Prefix = r.URL.Query().Get(middleware.QueryPrefix)
|
|
||||||
prm.BucketRegion = r.URL.Query().Get(middleware.QueryBucketRegion)
|
|
||||||
prm.ContinuationToken = r.URL.Query().Get(middleware.QueryContinuationToken)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
|
@ -1,174 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestHandler_ListBucketsHandler(t *testing.T) {
|
|
||||||
const defaultConstraint = "default"
|
|
||||||
|
|
||||||
region := "us-west-1"
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
hc.config.putLocationConstraint(region)
|
|
||||||
|
|
||||||
props := []Bucket{
|
|
||||||
{Name: "first"},
|
|
||||||
{Name: "regional", BucketRegion: "us-west-1"},
|
|
||||||
{Name: "third"},
|
|
||||||
}
|
|
||||||
sort.Slice(props, func(i, j int) bool {
|
|
||||||
return props[i].Name < props[j].Name
|
|
||||||
})
|
|
||||||
for _, bkt := range props {
|
|
||||||
createBucketWithConstraint(hc, bkt.Name, bkt.BucketRegion)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range []struct {
|
|
||||||
title string
|
|
||||||
token string
|
|
||||||
prefix string
|
|
||||||
bucketRegion string
|
|
||||||
maxBuckets string
|
|
||||||
expectErr bool
|
|
||||||
expected []Bucket
|
|
||||||
expectedToken string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
title: "no params",
|
|
||||||
expected: []Bucket{
|
|
||||||
{Name: "first", BucketRegion: defaultConstraint},
|
|
||||||
{Name: "regional", BucketRegion: "us-west-1"},
|
|
||||||
{Name: "third", BucketRegion: defaultConstraint},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
title: "negative max-buckets",
|
|
||||||
maxBuckets: "-1",
|
|
||||||
expected: []Bucket{},
|
|
||||||
expectErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
title: "zero max-buckets",
|
|
||||||
maxBuckets: "0",
|
|
||||||
expected: []Bucket{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
title: "prefix",
|
|
||||||
prefix: "thi",
|
|
||||||
expected: []Bucket{{Name: "third", BucketRegion: defaultConstraint}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
title: "wrong prefix",
|
|
||||||
prefix: "sdh",
|
|
||||||
expected: []Bucket{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
title: "bucket region",
|
|
||||||
bucketRegion: region,
|
|
||||||
expected: []Bucket{{Name: "regional", BucketRegion: "us-west-1"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
title: "default bucket region",
|
|
||||||
bucketRegion: defaultConstraint,
|
|
||||||
expected: []Bucket{
|
|
||||||
{Name: "first", BucketRegion: defaultConstraint},
|
|
||||||
{Name: "third", BucketRegion: defaultConstraint},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
title: "wrong bucket region",
|
|
||||||
bucketRegion: "sj dfdlsj",
|
|
||||||
expected: []Bucket{},
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tt.title, func(t *testing.T) {
|
|
||||||
if tt.expectErr {
|
|
||||||
listBucketsErr(hc, tt.prefix, tt.token, tt.bucketRegion, tt.maxBuckets, apierr.GetAPIError(apierr.ErrInvalidMaxKeys))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := listBuckets(hc, tt.prefix, tt.token, tt.bucketRegion, tt.maxBuckets)
|
|
||||||
require.Len(t, resp.Buckets.Buckets, len(tt.expected))
|
|
||||||
require.Equal(t, tt.prefix, resp.Prefix)
|
|
||||||
require.Equal(t, hc.owner.String(), resp.Owner.ID)
|
|
||||||
if len(resp.Buckets.Buckets) > 0 {
|
|
||||||
t.Log(resp.Buckets.Buckets[0].Name)
|
|
||||||
}
|
|
||||||
for i, bkt := range resp.Buckets.Buckets {
|
|
||||||
require.Equal(t, tt.expected[i].Name, bkt.Name)
|
|
||||||
require.Equal(t, tt.expected[i].BucketRegion, bkt.BucketRegion)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("pagination", func(t *testing.T) {
|
|
||||||
t.Run("happy path", func(t *testing.T) {
|
|
||||||
resp := listBuckets(hc, "", "", "", "1")
|
|
||||||
require.Len(t, resp.Buckets.Buckets, 1)
|
|
||||||
require.Equal(t, props[0].Name, resp.Buckets.Buckets[0].Name)
|
|
||||||
require.NotEmpty(t, resp.ContinuationToken)
|
|
||||||
|
|
||||||
resp = listBuckets(hc, "", resp.ContinuationToken, "", "1")
|
|
||||||
require.Len(t, resp.Buckets.Buckets, 1)
|
|
||||||
require.Equal(t, props[1].Name, resp.Buckets.Buckets[0].Name)
|
|
||||||
require.NotEmpty(t, resp.ContinuationToken)
|
|
||||||
|
|
||||||
resp = listBuckets(hc, "", resp.ContinuationToken, "", "1")
|
|
||||||
require.Len(t, resp.Buckets.Buckets, 1)
|
|
||||||
require.Equal(t, props[2].Name, resp.Buckets.Buckets[0].Name)
|
|
||||||
require.Empty(t, resp.ContinuationToken)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("wrong continuation-token", func(t *testing.T) {
|
|
||||||
resp := listBuckets(hc, "", "CebuVwfRpdMqi9dvgV2SUNbrkfteGtudchKKhNabXUu9", "", "1")
|
|
||||||
require.Len(t, resp.Buckets.Buckets, 0)
|
|
||||||
require.Empty(t, resp.ContinuationToken)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func listBuckets(hc *handlerContext, prefix, token, bucketRegion, maxBuckets string) ListBucketsResponse {
|
|
||||||
query := url.Values{
|
|
||||||
middleware.QueryPrefix: []string{prefix},
|
|
||||||
middleware.QueryContinuationToken: []string{token},
|
|
||||||
middleware.QueryBucketRegion: []string{bucketRegion},
|
|
||||||
middleware.QueryMaxBuckets: []string{maxBuckets},
|
|
||||||
}
|
|
||||||
w := listBucketsBase(hc, bucketPrm{query: query})
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
var resp ListBucketsResponse
|
|
||||||
err := xml.NewDecoder(w.Body).Decode(&resp)
|
|
||||||
require.NoError(hc.t, err)
|
|
||||||
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
func listBucketsErr(hc *handlerContext, prefix, token, bucketRegion, maxBuckets string, err apierr.Error) {
|
|
||||||
query := url.Values{
|
|
||||||
middleware.QueryPrefix: []string{prefix},
|
|
||||||
middleware.QueryContinuationToken: []string{token},
|
|
||||||
middleware.QueryBucketRegion: []string{bucketRegion},
|
|
||||||
middleware.QueryMaxBuckets: []string{maxBuckets},
|
|
||||||
}
|
|
||||||
w := listBucketsBase(hc, bucketPrm{query: query})
|
|
||||||
assertS3Error(hc.t, w, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func listBucketsBase(hc *handlerContext, prm bucketPrm) *httptest.ResponseRecorder {
|
|
||||||
box, _ := createAccessBox(hc.t)
|
|
||||||
w, r := prepareTestFullRequest(hc, "", "", prm.query, nil)
|
|
||||||
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
hc.Handler().ListBucketsHandler(w, r)
|
|
||||||
|
|
||||||
return w
|
|
||||||
}
|
|
|
@ -1,315 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"regexp"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type copyObjectArgs struct {
|
|
||||||
Conditional *conditionalArgs
|
|
||||||
MetadataDirective string
|
|
||||||
TaggingDirective string
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
replaceDirective = "REPLACE"
|
|
||||||
copyDirective = "COPY"
|
|
||||||
)
|
|
||||||
|
|
||||||
var copySourceMatcher = auth.NewRegexpMatcher(regexp.MustCompile(`^/?(?P<bucket_name>[a-z0-9.\-]{3,63})/(?P<object_name>.+)$`))
|
|
||||||
|
|
||||||
// path2BucketObject returns a bucket and an object.
|
|
||||||
func path2BucketObject(path string) (string, string, error) {
|
|
||||||
matches := copySourceMatcher.GetSubmatches(path)
|
|
||||||
if len(matches) != 2 {
|
|
||||||
return "", "", errors.GetAPIError(errors.ErrInvalidRequest)
|
|
||||||
}
|
|
||||||
|
|
||||||
return matches["bucket_name"], matches["object_name"], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.CopyObject")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
versionID string
|
|
||||||
metadata map[string]string
|
|
||||||
tagSet map[string]string
|
|
||||||
)
|
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
cannedACLStatus := aclHeadersStatus(r)
|
|
||||||
|
|
||||||
src := r.Header.Get(api.AmzCopySource)
|
|
||||||
// Check https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html
|
|
||||||
// Regardless of whether you have enabled versioning, each object in your bucket
|
|
||||||
// has a version ID. If you have not enabled versioning, Amazon S3 sets the value
|
|
||||||
// of the version ID to null. If you have enabled versioning, Amazon S3 assigns a
|
|
||||||
// unique version ID value for the object.
|
|
||||||
if u, err := url.Parse(src); err == nil {
|
|
||||||
versionID = u.Query().Get(api.QueryVersionID)
|
|
||||||
src = u.Path
|
|
||||||
}
|
|
||||||
|
|
||||||
srcBucket, srcObject, err := path2BucketObject(src)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "invalid source copy", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
srcObjPrm := &layer.HeadObjectParams{
|
|
||||||
Object: srcObject,
|
|
||||||
VersionID: versionID,
|
|
||||||
}
|
|
||||||
|
|
||||||
if srcObjPrm.BktInfo, err = h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "couldn't get source bucket", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
dstBktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "couldn't get target bucket", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(ctx, dstBktInfo)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if cannedACLStatus == aclStatusYes {
|
|
||||||
h.logAndSendError(ctx, w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not find object", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
srcObjInfo := extendedSrcObjInfo.ObjectInfo
|
|
||||||
|
|
||||||
srcEncryptionParams, err := h.formCopySourceEncryptionParams(r)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dstEncryptionParams, err := h.formEncryptionParams(r)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = srcEncryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
|
|
||||||
if errors.IsS3Error(err, errors.ErrInvalidEncryptionParameters) || errors.IsS3Error(err, errors.ErrSSEEncryptedObject) ||
|
|
||||||
errors.IsS3Error(err, errors.ErrInvalidSSECustomerParameters) {
|
|
||||||
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, err, zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var dstSize uint64
|
|
||||||
srcSize, err := layer.GetObjectSize(srcObjInfo)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "failed to get source object size", reqInfo, err)
|
|
||||||
return
|
|
||||||
} else if srcSize > layer.UploadMaxSize { // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
|
|
||||||
h.logAndSendError(ctx, w, "too bid object to copy with single copy operation, use multipart upload copy instead", reqInfo, errors.GetAPIError(errors.ErrInvalidRequestLargeCopy))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dstSize = srcSize
|
|
||||||
|
|
||||||
args, err := parseCopyObjectArgs(r.Header)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not parse request params", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if isCopyingToItselfForbidden(reqInfo, srcBucket, srcObject, settings, args) {
|
|
||||||
h.logAndSendError(ctx, w, "copying to itself without changing anything", reqInfo, errors.GetAPIError(errors.ErrInvalidCopyDest))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if args.MetadataDirective == replaceDirective {
|
|
||||||
metadata = parseMetadata(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
if args.TaggingDirective == replaceDirective {
|
|
||||||
tagSet, err = parseTaggingHeader(r.Header)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not parse tagging header", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
tagPrm := &data.GetObjectTaggingParams{
|
|
||||||
ObjectVersion: &data.ObjectVersion{
|
|
||||||
BktInfo: srcObjPrm.BktInfo,
|
|
||||||
ObjectName: srcObject,
|
|
||||||
VersionID: srcObjInfo.VersionID(),
|
|
||||||
},
|
|
||||||
NodeVersion: extendedSrcObjInfo.NodeVersion,
|
|
||||||
}
|
|
||||||
|
|
||||||
_, tagSet, err = h.obj.GetObjectTagging(ctx, tagPrm)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get object tagging", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = checkPreconditions(srcObjInfo, args.Conditional, h.cfg.MD5Enabled()); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if metadata == nil {
|
|
||||||
if len(srcObjInfo.ContentType) > 0 {
|
|
||||||
srcObjInfo.Headers[api.ContentType] = srcObjInfo.ContentType
|
|
||||||
}
|
|
||||||
metadata = makeCopyMap(srcObjInfo.Headers)
|
|
||||||
filterMetadataMap(metadata)
|
|
||||||
} else if contentType := r.Header.Get(api.ContentType); len(contentType) > 0 {
|
|
||||||
metadata[api.ContentType] = contentType
|
|
||||||
}
|
|
||||||
|
|
||||||
params := &layer.CopyObjectParams{
|
|
||||||
SrcVersioned: srcObjPrm.Versioned(),
|
|
||||||
SrcObject: srcObjInfo,
|
|
||||||
ScrBktInfo: srcObjPrm.BktInfo,
|
|
||||||
DstBktInfo: dstBktInfo,
|
|
||||||
DstObject: reqInfo.ObjectName,
|
|
||||||
DstSize: dstSize,
|
|
||||||
Header: metadata,
|
|
||||||
SrcEncryption: srcEncryptionParams,
|
|
||||||
DstEncryption: dstEncryptionParams,
|
|
||||||
}
|
|
||||||
|
|
||||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, dstBktInfo.LocationConstraint)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
params.Lock, err = formObjectLock(ctx, dstBktInfo, settings.LockConfiguration, r.Header)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not form object lock", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
additional := []zap.Field{zap.String("src_bucket_name", srcBucket), zap.String("src_object_name", srcObject)}
|
|
||||||
extendedDstObjInfo, err := h.obj.CopyObject(ctx, params)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "couldn't copy object", reqInfo, err, additional...)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dstObjInfo := extendedDstObjInfo.ObjectInfo
|
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, &CopyObjectResponse{
|
|
||||||
LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339),
|
|
||||||
ETag: data.Quote(dstObjInfo.ETag(h.cfg.MD5Enabled())),
|
|
||||||
}); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err, additional...)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if tagSet != nil {
|
|
||||||
tagPrm := &data.PutObjectTaggingParams{
|
|
||||||
ObjectVersion: &data.ObjectVersion{
|
|
||||||
BktInfo: dstBktInfo,
|
|
||||||
ObjectName: reqInfo.ObjectName,
|
|
||||||
VersionID: dstObjInfo.VersionID(),
|
|
||||||
},
|
|
||||||
TagSet: tagSet,
|
|
||||||
NodeVersion: extendedDstObjInfo.NodeVersion,
|
|
||||||
}
|
|
||||||
if err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not upload object tagging", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
h.reqLogger(ctx).Info(logs.ObjectIsCopied, zap.Stringer("object_id", dstObjInfo.ID), logs.TagField(logs.TagExternalStorage))
|
|
||||||
|
|
||||||
if dstEncryptionParams.Enabled() {
|
|
||||||
addSSECHeaders(w.Header(), r.Header)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeCopyMap(headers map[string]string) map[string]string {
|
|
||||||
res := make(map[string]string, len(headers))
|
|
||||||
for key, val := range headers {
|
|
||||||
res[key] = val
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func filterMetadataMap(metadata map[string]string) {
|
|
||||||
delete(metadata, layer.MultipartObjectSize) // object payload will be real one rather than list of compound parts
|
|
||||||
for key := range layer.EncryptionMetadata {
|
|
||||||
delete(metadata, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isCopyingToItselfForbidden(reqInfo *middleware.ReqInfo, srcBucket string, srcObject string, settings *data.BucketSettings, args *copyObjectArgs) bool {
|
|
||||||
if reqInfo.BucketName != srcBucket || reqInfo.ObjectName != srcObject {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if !settings.Unversioned() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return args.MetadataDirective != replaceDirective
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseCopyObjectArgs(headers http.Header) (*copyObjectArgs, error) {
|
|
||||||
var err error
|
|
||||||
args := &conditionalArgs{
|
|
||||||
IfMatch: data.UnQuote(headers.Get(api.AmzCopyIfMatch)),
|
|
||||||
IfNoneMatch: data.UnQuote(headers.Get(api.AmzCopyIfNoneMatch)),
|
|
||||||
}
|
|
||||||
|
|
||||||
if args.IfModifiedSince, err = parseHTTPTime(headers.Get(api.AmzCopyIfModifiedSince)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if args.IfUnmodifiedSince, err = parseHTTPTime(headers.Get(api.AmzCopyIfUnmodifiedSince)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
copyArgs := ©ObjectArgs{Conditional: args}
|
|
||||||
|
|
||||||
copyArgs.MetadataDirective = headers.Get(api.AmzMetadataDirective)
|
|
||||||
if !isValidDirective(copyArgs.MetadataDirective) {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidMetadataDirective)
|
|
||||||
}
|
|
||||||
|
|
||||||
copyArgs.TaggingDirective = headers.Get(api.AmzTaggingDirective)
|
|
||||||
if !isValidDirective(copyArgs.TaggingDirective) {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidTaggingDirective)
|
|
||||||
}
|
|
||||||
|
|
||||||
return copyArgs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidDirective(directive string) bool {
|
|
||||||
return len(directive) == 0 ||
|
|
||||||
directive == replaceDirective || directive == copyDirective
|
|
||||||
}
|
|
|
@ -1,395 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/md5"
|
|
||||||
"crypto/tls"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/xml"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
type CopyMeta struct {
|
|
||||||
TaggingDirective string
|
|
||||||
Tags map[string]string
|
|
||||||
MetadataDirective string
|
|
||||||
Metadata map[string]string
|
|
||||||
Headers map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCopyWithTaggingDirective(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-copy", "object-from-copy"
|
|
||||||
objToCopy, objToCopy2 := "object-to-copy", "object-to-copy-2"
|
|
||||||
createBucketAndObject(tc, bktName, objName)
|
|
||||||
|
|
||||||
putObjectTagging(t, tc, bktName, objName, map[string]string{"key": "val"})
|
|
||||||
|
|
||||||
copyMeta := CopyMeta{
|
|
||||||
Tags: map[string]string{"key2": "val"},
|
|
||||||
}
|
|
||||||
copyObject(tc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
|
|
||||||
tagging := getObjectTagging(t, tc, bktName, objToCopy, emptyVersion)
|
|
||||||
require.Len(t, tagging.TagSet, 1)
|
|
||||||
require.Equal(t, "key", tagging.TagSet[0].Key)
|
|
||||||
require.Equal(t, "val", tagging.TagSet[0].Value)
|
|
||||||
|
|
||||||
copyMeta.TaggingDirective = replaceDirective
|
|
||||||
copyObject(tc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
|
|
||||||
tagging = getObjectTagging(t, tc, bktName, objToCopy2, emptyVersion)
|
|
||||||
require.Len(t, tagging.TagSet, 1)
|
|
||||||
require.Equal(t, "key2", tagging.TagSet[0].Key)
|
|
||||||
require.Equal(t, "val", tagging.TagSet[0].Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCopyToItself(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-copy", "object-for-copy"
|
|
||||||
createBucketAndObject(tc, bktName, objName)
|
|
||||||
|
|
||||||
copyMeta := CopyMeta{MetadataDirective: replaceDirective}
|
|
||||||
|
|
||||||
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusBadRequest)
|
|
||||||
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
|
||||||
|
|
||||||
putBucketVersioning(t, tc, bktName, true)
|
|
||||||
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
|
||||||
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
|
||||||
|
|
||||||
putBucketVersioning(t, tc, bktName, false)
|
|
||||||
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
|
||||||
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCopyMultipart(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-copy", "object-for-copy"
|
|
||||||
createTestBucket(hc, bktName)
|
|
||||||
|
|
||||||
partSize := layer.UploadMinSize
|
|
||||||
objLen := 6 * partSize
|
|
||||||
headers := map[string]string{}
|
|
||||||
|
|
||||||
data := multipartUpload(hc, bktName, objName, headers, objLen, partSize)
|
|
||||||
require.Equal(t, objLen, len(data))
|
|
||||||
|
|
||||||
objToCopy := "copy-target"
|
|
||||||
var copyMeta CopyMeta
|
|
||||||
copyObject(hc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
|
|
||||||
|
|
||||||
copiedData, _ := getObject(hc, bktName, objToCopy)
|
|
||||||
equalDataSlices(t, data, copiedData)
|
|
||||||
|
|
||||||
result := getObjectAttributes(hc, bktName, objToCopy, objectParts)
|
|
||||||
require.NotNil(t, result.ObjectParts)
|
|
||||||
|
|
||||||
objToCopy2 := "copy-target2"
|
|
||||||
copyMeta.MetadataDirective = replaceDirective
|
|
||||||
copyObject(hc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
|
|
||||||
|
|
||||||
result = getObjectAttributes(hc, bktName, objToCopy2, objectParts)
|
|
||||||
require.Nil(t, result.ObjectParts)
|
|
||||||
|
|
||||||
copiedData, _ = getObject(hc, bktName, objToCopy2)
|
|
||||||
equalDataSlices(t, data, copiedData)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCopyEncryptedToUnencrypted(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, srcObjName := "bucket-for-copy", "object-for-copy"
|
|
||||||
key1 := []byte("firstencriptionkeyofsourceobject")
|
|
||||||
key1Md5 := md5.Sum(key1)
|
|
||||||
key2 := []byte("anotherencriptionkeysourceobject")
|
|
||||||
key2Md5 := md5.Sum(key2)
|
|
||||||
bktInfo := createTestBucket(tc, bktName)
|
|
||||||
|
|
||||||
srcEnc, err := encryption.NewParams(key1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
srcObjInfo := createTestObject(tc, bktInfo, srcObjName, *srcEnc)
|
|
||||||
require.True(t, containEncryptionMetadataHeaders(srcObjInfo.Headers))
|
|
||||||
|
|
||||||
dstObjName := "copy-object"
|
|
||||||
|
|
||||||
// empty copy-source-sse headers
|
|
||||||
w, r := prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusBadRequest)
|
|
||||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrSSEEncryptedObject))
|
|
||||||
|
|
||||||
// empty copy-source-sse-custom-key
|
|
||||||
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusBadRequest)
|
|
||||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrMissingSSECustomerKey))
|
|
||||||
|
|
||||||
// empty copy-source-sse-custom-algorithm
|
|
||||||
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key1))
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusBadRequest)
|
|
||||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrMissingSSECustomerAlgorithm))
|
|
||||||
|
|
||||||
// invalid copy-source-sse-custom-key
|
|
||||||
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key2))
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key2Md5[:]))
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusBadRequest)
|
|
||||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrInvalidSSECustomerParameters))
|
|
||||||
|
|
||||||
// success copy
|
|
||||||
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key1))
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key1Md5[:]))
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
dstObjInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: dstObjName})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, srcObjInfo.Headers[layer.AttributeDecryptedSize], strconv.Itoa(int(dstObjInfo.Size)))
|
|
||||||
require.False(t, containEncryptionMetadataHeaders(dstObjInfo.Headers))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCopyUnencryptedToEncrypted(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, srcObjName := "bucket-for-copy", "object-for-copy"
|
|
||||||
key := []byte("firstencriptionkeyofsourceobject")
|
|
||||||
keyMd5 := md5.Sum(key)
|
|
||||||
bktInfo := createTestBucket(tc, bktName)
|
|
||||||
|
|
||||||
srcObjInfo := createTestObject(tc, bktInfo, srcObjName, encryption.Params{})
|
|
||||||
require.False(t, containEncryptionMetadataHeaders(srcObjInfo.Headers))
|
|
||||||
|
|
||||||
dstObjName := "copy-object"
|
|
||||||
|
|
||||||
// invalid copy-source-sse headers
|
|
||||||
w, r := prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key))
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMd5[:]))
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusBadRequest)
|
|
||||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrInvalidEncryptionParameters))
|
|
||||||
|
|
||||||
// success copy
|
|
||||||
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key))
|
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMd5[:]))
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
dstObjInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: dstObjName})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.True(t, containEncryptionMetadataHeaders(dstObjInfo.Headers))
|
|
||||||
require.Equal(t, strconv.Itoa(int(srcObjInfo.Size)), dstObjInfo.Headers[layer.AttributeDecryptedSize])
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCopyEncryptedToEncryptedWithAnotherKey(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, srcObjName := "bucket-for-copy", "object-for-copy"
|
|
||||||
key1 := []byte("firstencriptionkeyofsourceobject")
|
|
||||||
key1Md5 := md5.Sum(key1)
|
|
||||||
key2 := []byte("anotherencriptionkeysourceobject")
|
|
||||||
key2Md5 := md5.Sum(key2)
|
|
||||||
bktInfo := createTestBucket(tc, bktName)
|
|
||||||
|
|
||||||
srcEnc, err := encryption.NewParams(key1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
srcObjInfo := createTestObject(tc, bktInfo, srcObjName, *srcEnc)
|
|
||||||
require.True(t, containEncryptionMetadataHeaders(srcObjInfo.Headers))
|
|
||||||
|
|
||||||
dstObjName := "copy-object"
|
|
||||||
|
|
||||||
w, r := prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key1))
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key1Md5[:]))
|
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key2))
|
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key2Md5[:]))
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
dstObjInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: dstObjName})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.True(t, containEncryptionMetadataHeaders(dstObjInfo.Headers))
|
|
||||||
require.Equal(t, srcObjInfo.Headers[layer.AttributeDecryptedSize], dstObjInfo.Headers[layer.AttributeDecryptedSize])
|
|
||||||
}
|
|
||||||
|
|
||||||
func containEncryptionMetadataHeaders(headers map[string]string) bool {
|
|
||||||
for k := range headers {
|
|
||||||
if _, ok := layer.EncryptionMetadata[k]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyObject(hc *handlerContext, bktName, fromObject, toObject string, copyMeta CopyMeta, statusCode int) {
|
|
||||||
w, r := prepareTestRequest(hc, bktName, toObject, nil)
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+fromObject)
|
|
||||||
|
|
||||||
r.Header.Set(api.AmzMetadataDirective, copyMeta.MetadataDirective)
|
|
||||||
for key, val := range copyMeta.Metadata {
|
|
||||||
r.Header.Set(api.MetadataPrefix+key, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Header.Set(api.AmzTaggingDirective, copyMeta.TaggingDirective)
|
|
||||||
tagsQuery := make(url.Values)
|
|
||||||
for key, val := range copyMeta.Tags {
|
|
||||||
tagsQuery.Set(key, val)
|
|
||||||
}
|
|
||||||
r.Header.Set(api.AmzTagging, tagsQuery.Encode())
|
|
||||||
|
|
||||||
for key, val := range copyMeta.Headers {
|
|
||||||
r.Header.Set(key, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
hc.Handler().CopyObjectHandler(w, r)
|
|
||||||
assertStatus(hc.t, w, statusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func putObjectTagging(t *testing.T, tc *handlerContext, bktName, objName string, tags map[string]string) {
|
|
||||||
body := &data.Tagging{
|
|
||||||
TagSet: make([]data.Tag, 0, len(tags)),
|
|
||||||
}
|
|
||||||
|
|
||||||
for key, val := range tags {
|
|
||||||
body.TagSet = append(body.TagSet, data.Tag{
|
|
||||||
Key: key,
|
|
||||||
Value: val,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
w, r := prepareTestRequest(tc, bktName, objName, body)
|
|
||||||
middleware.GetReqInfo(r.Context()).Tagging = body
|
|
||||||
tc.Handler().PutObjectTaggingHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getObjectTagging(t *testing.T, tc *handlerContext, bktName, objName, version string) *data.Tagging {
|
|
||||||
query := make(url.Values)
|
|
||||||
query.Add(api.QueryVersionID, version)
|
|
||||||
|
|
||||||
w, r := prepareTestFullRequest(tc, bktName, objName, query, nil)
|
|
||||||
tc.Handler().GetObjectTaggingHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
|
|
||||||
tagging := &data.Tagging{}
|
|
||||||
err := xml.NewDecoder(w.Result().Body).Decode(tagging)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return tagging
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSourceCopyRegexp(t *testing.T) {
|
|
||||||
for _, tc := range []struct {
|
|
||||||
path string
|
|
||||||
err bool
|
|
||||||
bktName string
|
|
||||||
objName string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
path: "/bucket/object",
|
|
||||||
err: false,
|
|
||||||
bktName: "bucket",
|
|
||||||
objName: "object",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
path: "bucket/object",
|
|
||||||
err: false,
|
|
||||||
bktName: "bucket",
|
|
||||||
objName: "object",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
path: "sub-bucket/object",
|
|
||||||
err: false,
|
|
||||||
bktName: "sub-bucket",
|
|
||||||
objName: "object",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
path: "bucket.domain/object",
|
|
||||||
err: false,
|
|
||||||
bktName: "bucket.domain",
|
|
||||||
objName: "object",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
path: "bucket/object/deep",
|
|
||||||
err: false,
|
|
||||||
bktName: "bucket",
|
|
||||||
objName: "object/deep",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
path: "bucket",
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
path: "/bucket",
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
path: "invalid+bucket/object",
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
path: "invaliDBucket/object",
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
path: "i/object",
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run("", func(t *testing.T) {
|
|
||||||
bktName, objName, err := path2BucketObject(tc.path)
|
|
||||||
if tc.err {
|
|
||||||
require.Error(t, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, tc.bktName, bktName)
|
|
||||||
require.Equal(t, tc.objName, objName)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,264 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
qostagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/util"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultMaxAge is a default value of Access-Control-Max-Age if this value is not set in a rule.
|
|
||||||
DefaultMaxAge = 600
|
|
||||||
wildcard = "*"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.GetBucketCors")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ctx = qostagging.ContextWithIOTag(ctx, util.InternalIOTag)
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo, h.cfg.NewXMLDecoder)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get cors", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, cors); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not encode cors to response", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.PutBucketCors")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ctx = qostagging.ContextWithIOTag(ctx, util.InternalIOTag)
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &layer.PutCORSParams{
|
|
||||||
BktInfo: bktInfo,
|
|
||||||
Reader: r.Body,
|
|
||||||
NewDecoder: h.cfg.NewXMLDecoder,
|
|
||||||
UserAgent: r.UserAgent(),
|
|
||||||
}
|
|
||||||
|
|
||||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = h.obj.PutBucketCORS(ctx, p); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not put cors configuration", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "write response", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.DeleteBucketCors")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ctx = qostagging.ContextWithIOTag(ctx, util.InternalIOTag)
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = h.obj.DeleteBucketCORS(ctx, bktInfo); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not delete cors", reqInfo, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.AppendCORSHeaders")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if r.Method == http.MethodOptions {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
origin := r.Header.Get(api.Origin)
|
|
||||||
if origin == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = qostagging.ContextWithIOTag(ctx, util.InternalIOTag)
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
if reqInfo.BucketName == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.reqLogger(ctx).Warn(logs.GetBucketInfo, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo, h.cfg.NewXMLDecoder)
|
|
||||||
if err != nil {
|
|
||||||
h.reqLogger(ctx).Warn(logs.GetBucketCors, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
withCredentials := r.Header.Get(api.Authorization) != ""
|
|
||||||
|
|
||||||
for _, rule := range cors.CORSRules {
|
|
||||||
for _, o := range rule.AllowedOrigins {
|
|
||||||
if o == origin {
|
|
||||||
for _, m := range rule.AllowedMethods {
|
|
||||||
if m == r.Method {
|
|
||||||
w.Header().Set(api.AccessControlAllowOrigin, origin)
|
|
||||||
w.Header().Set(api.AccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
|
|
||||||
w.Header().Set(api.AccessControlAllowCredentials, "true")
|
|
||||||
w.Header().Set(api.Vary, api.Origin)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if o == wildcard {
|
|
||||||
for _, m := range rule.AllowedMethods {
|
|
||||||
if m == r.Method {
|
|
||||||
if withCredentials {
|
|
||||||
w.Header().Set(api.AccessControlAllowOrigin, origin)
|
|
||||||
w.Header().Set(api.AccessControlAllowCredentials, "true")
|
|
||||||
w.Header().Set(api.Vary, api.Origin)
|
|
||||||
} else {
|
|
||||||
w.Header().Set(api.AccessControlAllowOrigin, o)
|
|
||||||
}
|
|
||||||
w.Header().Set(api.AccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.Preflight")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ctx = qostagging.ContextWithIOTag(ctx, util.InternalIOTag)
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
origin := r.Header.Get(api.Origin)
|
|
||||||
if origin == "" {
|
|
||||||
h.logAndSendError(ctx, w, "origin request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
|
||||||
}
|
|
||||||
|
|
||||||
method := r.Header.Get(api.AccessControlRequestMethod)
|
|
||||||
if method == "" {
|
|
||||||
h.logAndSendError(ctx, w, "Access-Control-Request-Method request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var headers []string
|
|
||||||
requestHeaders := r.Header.Get(api.AccessControlRequestHeaders)
|
|
||||||
if requestHeaders != "" {
|
|
||||||
headers = strings.Split(requestHeaders, ", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo, h.cfg.NewXMLDecoder)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get cors", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, rule := range cors.CORSRules {
|
|
||||||
for _, o := range rule.AllowedOrigins {
|
|
||||||
if o == origin || o == wildcard {
|
|
||||||
for _, m := range rule.AllowedMethods {
|
|
||||||
if m == method {
|
|
||||||
if !checkSubslice(rule.AllowedHeaders, headers) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
w.Header().Set(api.AccessControlAllowOrigin, origin)
|
|
||||||
w.Header().Set(api.AccessControlAllowMethods, method)
|
|
||||||
if headers != nil {
|
|
||||||
w.Header().Set(api.AccessControlAllowHeaders, requestHeaders)
|
|
||||||
}
|
|
||||||
if rule.ExposeHeaders != nil {
|
|
||||||
w.Header().Set(api.AccessControlExposeHeaders, strings.Join(rule.ExposeHeaders, ", "))
|
|
||||||
}
|
|
||||||
if rule.MaxAgeSeconds > 0 || rule.MaxAgeSeconds == -1 {
|
|
||||||
w.Header().Set(api.AccessControlMaxAge, strconv.Itoa(rule.MaxAgeSeconds))
|
|
||||||
} else {
|
|
||||||
w.Header().Set(api.AccessControlMaxAge, strconv.Itoa(h.cfg.DefaultMaxAge()))
|
|
||||||
}
|
|
||||||
if o != wildcard {
|
|
||||||
w.Header().Set(api.AccessControlAllowCredentials, "true")
|
|
||||||
}
|
|
||||||
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "write response", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
h.logAndSendError(ctx, w, "Forbidden", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkSubslice(slice []string, subSlice []string) bool {
|
|
||||||
if sliceContains(slice, wildcard) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if len(subSlice) > len(slice) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, r := range subSlice {
|
|
||||||
if !sliceContains(slice, r) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func sliceContains(slice []string, str string) bool {
|
|
||||||
for _, s := range slice {
|
|
||||||
if s == str {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
|
@ -1,238 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCORSOriginWildcard(t *testing.T) {
|
|
||||||
body := `
|
|
||||||
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
|
||||||
<CORSRule>
|
|
||||||
<AllowedMethod>GET</AllowedMethod>
|
|
||||||
<AllowedOrigin>*</AllowedOrigin>
|
|
||||||
</CORSRule>
|
|
||||||
</CORSConfiguration>
|
|
||||||
`
|
|
||||||
bodyNoXmlns := `
|
|
||||||
<CORSConfiguration>
|
|
||||||
<CORSRule>
|
|
||||||
<AllowedMethod>GET</AllowedMethod>
|
|
||||||
<AllowedOrigin>*</AllowedOrigin>
|
|
||||||
</CORSRule>
|
|
||||||
</CORSConfiguration>`
|
|
||||||
hc := prepareHandlerContextWithMinCache(t)
|
|
||||||
|
|
||||||
bktName := "bucket-for-cors"
|
|
||||||
box, _ := createAccessBox(t)
|
|
||||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
|
||||||
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
r.Header.Add(api.AmzACL, "public-read")
|
|
||||||
hc.Handler().CreateBucketHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
|
|
||||||
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(body))
|
|
||||||
ctx = middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
hc.Handler().PutBucketCorsHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
|
|
||||||
w, r = prepareTestPayloadRequest(hc, bktName, "", nil)
|
|
||||||
hc.Handler().GetBucketCorsHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
|
|
||||||
hc.config.useDefaultXMLNS = true
|
|
||||||
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(bodyNoXmlns))
|
|
||||||
ctx = middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
hc.Handler().PutBucketCorsHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
|
|
||||||
w, r = prepareTestPayloadRequest(hc, bktName, "", nil)
|
|
||||||
hc.Handler().GetBucketCorsHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPreflight(t *testing.T) {
|
|
||||||
body := `
|
|
||||||
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
|
||||||
<CORSRule>
|
|
||||||
<AllowedMethod>GET</AllowedMethod>
|
|
||||||
<AllowedOrigin>http://www.example.com</AllowedOrigin>
|
|
||||||
<AllowedHeader>Authorization</AllowedHeader>
|
|
||||||
<ExposeHeader>x-amz-*</ExposeHeader>
|
|
||||||
<ExposeHeader>X-Amz-*</ExposeHeader>
|
|
||||||
<MaxAgeSeconds>600</MaxAgeSeconds>
|
|
||||||
</CORSRule>
|
|
||||||
</CORSConfiguration>
|
|
||||||
`
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName := "bucket-preflight-test"
|
|
||||||
box, _ := createAccessBox(t)
|
|
||||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
|
||||||
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
hc.Handler().CreateBucketHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
|
|
||||||
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(body))
|
|
||||||
ctx = middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
hc.Handler().PutBucketCorsHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
origin string
|
|
||||||
method string
|
|
||||||
headers string
|
|
||||||
expectedStatus int
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "Valid",
|
|
||||||
origin: "http://www.example.com",
|
|
||||||
method: "GET",
|
|
||||||
headers: "Authorization",
|
|
||||||
expectedStatus: http.StatusOK,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Empty origin",
|
|
||||||
method: "GET",
|
|
||||||
headers: "Authorization",
|
|
||||||
expectedStatus: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Empty request method",
|
|
||||||
origin: "http://www.example.com",
|
|
||||||
headers: "Authorization",
|
|
||||||
expectedStatus: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Not allowed method",
|
|
||||||
origin: "http://www.example.com",
|
|
||||||
method: "PUT",
|
|
||||||
headers: "Authorization",
|
|
||||||
expectedStatus: http.StatusForbidden,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Not allowed headers",
|
|
||||||
origin: "http://www.example.com",
|
|
||||||
method: "GET",
|
|
||||||
headers: "Authorization, Last-Modified",
|
|
||||||
expectedStatus: http.StatusForbidden,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
w, r = prepareTestPayloadRequest(hc, bktName, "", nil)
|
|
||||||
r.Header.Set(api.Origin, tc.origin)
|
|
||||||
r.Header.Set(api.AccessControlRequestMethod, tc.method)
|
|
||||||
r.Header.Set(api.AccessControlRequestHeaders, tc.headers)
|
|
||||||
hc.Handler().Preflight(w, r)
|
|
||||||
assertStatus(t, w, tc.expectedStatus)
|
|
||||||
|
|
||||||
if tc.expectedStatus == http.StatusOK {
|
|
||||||
require.Equal(t, tc.origin, w.Header().Get(api.AccessControlAllowOrigin))
|
|
||||||
require.Equal(t, tc.method, w.Header().Get(api.AccessControlAllowMethods))
|
|
||||||
require.Equal(t, tc.headers, w.Header().Get(api.AccessControlAllowHeaders))
|
|
||||||
require.Equal(t, "x-amz-*, X-Amz-*", w.Header().Get(api.AccessControlExposeHeaders))
|
|
||||||
require.Equal(t, "true", w.Header().Get(api.AccessControlAllowCredentials))
|
|
||||||
require.Equal(t, "600", w.Header().Get(api.AccessControlMaxAge))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPreflightWildcardOrigin(t *testing.T) {
|
|
||||||
body := `
|
|
||||||
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
|
||||||
<CORSRule>
|
|
||||||
<AllowedMethod>GET</AllowedMethod>
|
|
||||||
<AllowedMethod>PUT</AllowedMethod>
|
|
||||||
<AllowedOrigin>*</AllowedOrigin>
|
|
||||||
<AllowedHeader>*</AllowedHeader>
|
|
||||||
</CORSRule>
|
|
||||||
</CORSConfiguration>
|
|
||||||
`
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName := "bucket-preflight-wildcard-test"
|
|
||||||
box, _ := createAccessBox(t)
|
|
||||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
|
||||||
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
hc.Handler().CreateBucketHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
|
|
||||||
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(body))
|
|
||||||
ctx = middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
hc.Handler().PutBucketCorsHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
origin string
|
|
||||||
method string
|
|
||||||
headers string
|
|
||||||
expectedStatus int
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "Valid get",
|
|
||||||
origin: "http://www.example.com",
|
|
||||||
method: "GET",
|
|
||||||
headers: "Authorization, Last-Modified",
|
|
||||||
expectedStatus: http.StatusOK,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Valid put",
|
|
||||||
origin: "http://example.com",
|
|
||||||
method: "PUT",
|
|
||||||
headers: "Authorization, Content-Type",
|
|
||||||
expectedStatus: http.StatusOK,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Empty origin",
|
|
||||||
method: "GET",
|
|
||||||
headers: "Authorization, Last-Modified",
|
|
||||||
expectedStatus: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Empty request method",
|
|
||||||
origin: "http://www.example.com",
|
|
||||||
headers: "Authorization, Last-Modified",
|
|
||||||
expectedStatus: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Not allowed method",
|
|
||||||
origin: "http://www.example.com",
|
|
||||||
method: "DELETE",
|
|
||||||
headers: "Authorization, Last-Modified",
|
|
||||||
expectedStatus: http.StatusForbidden,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
w, r = prepareTestPayloadRequest(hc, bktName, "", nil)
|
|
||||||
r.Header.Set(api.Origin, tc.origin)
|
|
||||||
r.Header.Set(api.AccessControlRequestMethod, tc.method)
|
|
||||||
r.Header.Set(api.AccessControlRequestHeaders, tc.headers)
|
|
||||||
hc.Handler().Preflight(w, r)
|
|
||||||
assertStatus(t, w, tc.expectedStatus)
|
|
||||||
|
|
||||||
if tc.expectedStatus == http.StatusOK {
|
|
||||||
require.Equal(t, tc.origin, w.Header().Get(api.AccessControlAllowOrigin))
|
|
||||||
require.Equal(t, tc.method, w.Header().Get(api.AccessControlAllowMethods))
|
|
||||||
require.Equal(t, tc.headers, w.Header().Get(api.AccessControlAllowHeaders))
|
|
||||||
require.Empty(t, w.Header().Get(api.AccessControlExposeHeaders))
|
|
||||||
require.Empty(t, w.Header().Get(api.AccessControlAllowCredentials))
|
|
||||||
require.Equal(t, "0", w.Header().Get(api.AccessControlMaxAge))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,289 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
|
||||||
)
|
|
||||||
|
|
||||||
// limitation of AWS https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
|
|
||||||
const maxObjectsToDelete = 1000
|
|
||||||
|
|
||||||
// DeleteObjectsRequest -- xml carrying the object key names which should be deleted.
|
|
||||||
type DeleteObjectsRequest struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delete" json:"-"`
|
|
||||||
// Element to enable quiet mode for the request
|
|
||||||
Quiet bool `xml:"Quiet,omitempty"`
|
|
||||||
// List of objects to be deleted
|
|
||||||
Objects []ObjectIdentifier `xml:"Object"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectIdentifier carries the key name for the object to delete.
|
|
||||||
type ObjectIdentifier struct {
|
|
||||||
ObjectName string `xml:"Key"`
|
|
||||||
VersionID string `xml:"VersionId,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeletedObject carries the key name for the object to delete.
|
|
||||||
type DeletedObject struct {
|
|
||||||
ObjectIdentifier
|
|
||||||
DeleteMarker bool `xml:"DeleteMarker,omitempty"`
|
|
||||||
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteError structure.
|
|
||||||
type DeleteError struct {
|
|
||||||
Code string `xml:"Code,omitempty"`
|
|
||||||
Message string `xml:"Message,omitempty"`
|
|
||||||
Key string `xml:"Key,omitempty"`
|
|
||||||
VersionID string `xml:"VersionId,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteObjectsResponse container for multiple object deletes.
|
|
||||||
type DeleteObjectsResponse struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"`
|
|
||||||
|
|
||||||
// Collection of all deleted objects
|
|
||||||
DeletedObjects []DeletedObject `xml:"Deleted,omitempty"`
|
|
||||||
|
|
||||||
// Collection of errors deleting certain objects.
|
|
||||||
Errors []DeleteError `xml:"Error,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.DeleteObject")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
versionID := reqInfo.URL.Query().Get(api.QueryVersionID)
|
|
||||||
versionedObject := []*layer.VersionedObject{{
|
|
||||||
Name: reqInfo.ObjectName,
|
|
||||||
VersionID: versionID,
|
|
||||||
}}
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get network info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &layer.DeleteObjectParams{
|
|
||||||
BktInfo: bktInfo,
|
|
||||||
Objects: versionedObject,
|
|
||||||
Settings: bktSettings,
|
|
||||||
NetworkInfo: networkInfo,
|
|
||||||
}
|
|
||||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
|
||||||
deletedObject := deletedObjects[0]
|
|
||||||
if deletedObject.Error != nil {
|
|
||||||
if isErrObjectLocked(deletedObject.Error) {
|
|
||||||
h.logAndSendError(ctx, w, "object is locked", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
|
||||||
} else {
|
|
||||||
h.logAndSendError(ctx, w, "could not delete object", reqInfo, deletedObject.Error)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if deletedObject.VersionID != "" {
|
|
||||||
w.Header().Set(api.AmzVersionID, deletedObject.VersionID)
|
|
||||||
}
|
|
||||||
if deletedObject.DeleteMarkVersion != "" {
|
|
||||||
w.Header().Set(api.AmzDeleteMarker, strconv.FormatBool(true))
|
|
||||||
if deletedObject.VersionID == "" {
|
|
||||||
w.Header().Set(api.AmzVersionID, deletedObject.DeleteMarkVersion)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isErrObjectLocked(err error) bool {
|
|
||||||
switch err.(type) {
|
|
||||||
default:
|
|
||||||
return strings.Contains(err.Error(), "object is locked")
|
|
||||||
case *apistatus.ObjectLocked:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteMultipleObjectsHandler handles multiple delete requests.
|
|
||||||
func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.DeleteMultipleObjects")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
// Content-Length is required and should be non-zero
|
|
||||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
|
||||||
if r.ContentLength <= 0 {
|
|
||||||
h.logAndSendError(ctx, w, "missing Content-Length", reqInfo, errors.GetAPIError(errors.ErrMissingContentLength))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal list of keys to be deleted.
|
|
||||||
requested := &DeleteObjectsRequest{}
|
|
||||||
if err := h.cfg.NewXMLDecoder(r.Body, r.UserAgent()).Decode(requested); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "couldn't decode body", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(requested.Objects) == 0 || len(requested.Objects) > maxObjectsToDelete {
|
|
||||||
h.logAndSendError(ctx, w, "number of objects to delete must be greater than 0 and less or equal to 1000", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
unique := make(map[string]struct{})
|
|
||||||
toRemove := make([]*layer.VersionedObject, 0, len(requested.Objects))
|
|
||||||
for _, obj := range requested.Objects {
|
|
||||||
versionedObj := &layer.VersionedObject{
|
|
||||||
Name: obj.ObjectName,
|
|
||||||
VersionID: obj.VersionID,
|
|
||||||
}
|
|
||||||
key := versionedObj.String()
|
|
||||||
if _, ok := unique[key]; !ok {
|
|
||||||
toRemove = append(toRemove, versionedObj)
|
|
||||||
unique[key] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
response := &DeleteObjectsResponse{
|
|
||||||
Errors: make([]DeleteError, 0, len(toRemove)),
|
|
||||||
DeletedObjects: make([]DeletedObject, 0, len(toRemove)),
|
|
||||||
}
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get network info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &layer.DeleteObjectParams{
|
|
||||||
BktInfo: bktInfo,
|
|
||||||
Objects: toRemove,
|
|
||||||
Settings: bktSettings,
|
|
||||||
NetworkInfo: networkInfo,
|
|
||||||
IsMultiple: true,
|
|
||||||
}
|
|
||||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
|
||||||
|
|
||||||
for _, obj := range deletedObjects {
|
|
||||||
if obj.Error != nil {
|
|
||||||
code := "BadRequest"
|
|
||||||
if s3err, ok := obj.Error.(errors.Error); ok {
|
|
||||||
code = s3err.Code
|
|
||||||
}
|
|
||||||
response.Errors = append(response.Errors, DeleteError{
|
|
||||||
Code: code,
|
|
||||||
Message: obj.Error.Error(),
|
|
||||||
Key: obj.Name,
|
|
||||||
VersionID: obj.VersionID,
|
|
||||||
})
|
|
||||||
} else if !requested.Quiet {
|
|
||||||
deletedObj := DeletedObject{
|
|
||||||
ObjectIdentifier: ObjectIdentifier{
|
|
||||||
ObjectName: obj.Name,
|
|
||||||
VersionID: obj.VersionID,
|
|
||||||
},
|
|
||||||
DeleteMarkerVersionID: obj.DeleteMarkVersion,
|
|
||||||
}
|
|
||||||
if deletedObj.DeleteMarkerVersionID != "" {
|
|
||||||
deletedObj.DeleteMarker = true
|
|
||||||
}
|
|
||||||
response.DeletedObjects = append(response.DeletedObjects, deletedObj)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not write response", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(r.Context(), "handler.DeleteBucket")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = checkOwner(bktInfo, reqInfo.User); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "request owner id does not match bucket owner id", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var sessionToken *session.Container
|
|
||||||
|
|
||||||
boxData, err := middleware.GetBoxData(ctx)
|
|
||||||
if err == nil {
|
|
||||||
sessionToken = boxData.Gate.SessionTokenForDelete()
|
|
||||||
}
|
|
||||||
|
|
||||||
skipObjCheck := false
|
|
||||||
if value, ok := r.Header[api.AmzForceBucketDelete]; ok {
|
|
||||||
s := value[0]
|
|
||||||
if s == "true" {
|
|
||||||
skipObjCheck = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = h.obj.DeleteBucket(ctx, &layer.DeleteBucketParams{
|
|
||||||
BktInfo: bktInfo,
|
|
||||||
SessionToken: sessionToken,
|
|
||||||
SkipCheck: skipObjCheck,
|
|
||||||
}); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "couldn't delete bucket", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
chainIDs := []chain.ID{
|
|
||||||
getBucketChainID(chain.S3, bktInfo),
|
|
||||||
getBucketChainID(chain.Ingress, bktInfo),
|
|
||||||
getBucketCannedChainID(chain.S3, bktInfo.CID),
|
|
||||||
getBucketCannedChainID(chain.Ingress, bktInfo.CID),
|
|
||||||
}
|
|
||||||
if err = h.ape.DeleteBucketPolicy(reqInfo.Namespace, bktInfo.CID, chainIDs); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "failed to delete policy from storage", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
|
||||||
}
|
|
|
@ -1,627 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/xml"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"net/url"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
emptyVersion = ""
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDeleteBucketOnAlreadyRemovedError(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
bktInfo := createTestBucket(hc, bktName)
|
|
||||||
|
|
||||||
putObject(hc, bktName, objName)
|
|
||||||
|
|
||||||
addr := getAddressOfLastVersion(hc, bktInfo, objName)
|
|
||||||
hc.tp.SetObjectError(addr, &apistatus.ObjectAlreadyRemoved{})
|
|
||||||
|
|
||||||
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
|
||||||
|
|
||||||
hc.owner = bktInfo.Owner
|
|
||||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAddressOfLastVersion(hc *handlerContext, bktInfo *data.BucketInfo, objName string) oid.Address {
|
|
||||||
nodeVersion, err := hc.tree.GetLatestVersion(hc.context, bktInfo, objName)
|
|
||||||
require.NoError(hc.t, err)
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(bktInfo.CID)
|
|
||||||
addr.SetObject(nodeVersion.OID)
|
|
||||||
return addr
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteBucket(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
bktInfo, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
|
|
||||||
|
|
||||||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
require.True(t, isDeleteMarker)
|
|
||||||
|
|
||||||
tc.owner = bktInfo.Owner
|
|
||||||
deleteBucket(t, tc, bktName, http.StatusConflict)
|
|
||||||
deleteObject(t, tc, bktName, objName, objInfo.VersionID())
|
|
||||||
deleteBucket(t, tc, bktName, http.StatusConflict)
|
|
||||||
deleteObject(t, tc, bktName, objName, deleteMarkerVersion)
|
|
||||||
deleteBucket(t, tc, bktName, http.StatusNoContent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteBucketOnNotFoundError(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
bktInfo := createTestBucket(hc, bktName)
|
|
||||||
|
|
||||||
putObject(hc, bktName, objName)
|
|
||||||
|
|
||||||
nodeVersion, err := hc.tree.GetUnversioned(hc.context, bktInfo, objName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(bktInfo.CID)
|
|
||||||
addr.SetObject(nodeVersion.OID)
|
|
||||||
hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{})
|
|
||||||
|
|
||||||
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
|
||||||
|
|
||||||
hc.owner = bktInfo.Owner
|
|
||||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestForceDeleteBucket(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
bktInfo := createTestBucket(hc, bktName)
|
|
||||||
|
|
||||||
putObject(hc, bktName, objName)
|
|
||||||
|
|
||||||
nodeVersion, err := hc.tree.GetUnversioned(hc.context, bktInfo, objName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(bktInfo.CID)
|
|
||||||
addr.SetObject(nodeVersion.OID)
|
|
||||||
|
|
||||||
hc.owner = bktInfo.Owner
|
|
||||||
deleteBucketForce(t, hc, bktName, http.StatusConflict, "false")
|
|
||||||
deleteBucketForce(t, hc, bktName, http.StatusNoContent, "true")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteMultipleObjectCheckUniqueness(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket", "object"
|
|
||||||
createTestBucket(hc, bktName)
|
|
||||||
|
|
||||||
putObject(hc, bktName, objName)
|
|
||||||
|
|
||||||
resp := deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}, {objName, emptyVersion}})
|
|
||||||
require.Empty(t, resp.Errors)
|
|
||||||
require.Len(t, resp.DeletedObjects, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteObjectsError(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
bktInfo := createTestBucket(hc, bktName)
|
|
||||||
putBucketVersioning(t, hc, bktName, true)
|
|
||||||
|
|
||||||
putObject(hc, bktName, objName)
|
|
||||||
|
|
||||||
nodeVersion, err := hc.tree.GetLatestVersion(hc.context, bktInfo, objName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(bktInfo.CID)
|
|
||||||
addr.SetObject(nodeVersion.OID)
|
|
||||||
|
|
||||||
expectedError := apierr.GetAPIError(apierr.ErrAccessDenied)
|
|
||||||
hc.tp.SetObjectError(addr, expectedError)
|
|
||||||
|
|
||||||
w := deleteObjectsBase(hc, bktName, [][2]string{{objName, nodeVersion.OID.EncodeToString()}})
|
|
||||||
var buf bytes.Buffer
|
|
||||||
res := &DeleteObjectsResponse{}
|
|
||||||
err = xml.NewDecoder(io.TeeReader(w.Result().Body, &buf)).Decode(res)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Contains(t, buf.String(), "VersionId")
|
|
||||||
require.ElementsMatch(t, []DeleteError{{
|
|
||||||
Code: expectedError.Code,
|
|
||||||
Key: objName,
|
|
||||||
Message: expectedError.Error(),
|
|
||||||
VersionID: nodeVersion.OID.EncodeToString(),
|
|
||||||
}}, res.Errors)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteObject(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
bktInfo, objInfo := createBucketAndObject(tc, bktName, objName)
|
|
||||||
|
|
||||||
checkFound(t, tc, bktName, objName, emptyVersion)
|
|
||||||
deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
|
||||||
|
|
||||||
require.False(t, existInMockedFrostFS(tc, bktInfo, objInfo))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteObjectFromSuspended(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
bktName, objName := "bucket-versioned-for-removal", "object-to-delete"
|
|
||||||
|
|
||||||
createSuspendedBucket(t, tc, bktName)
|
|
||||||
putObject(tc, bktName, objName)
|
|
||||||
|
|
||||||
versionID, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
require.True(t, isDeleteMarker)
|
|
||||||
require.Equal(t, data.UnversionedObjectVersionID, versionID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteDeletedObject(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
t.Run("unversioned bucket", func(t *testing.T) {
|
|
||||||
bktName, objName := "bucket-unversioned-removal", "object-to-delete"
|
|
||||||
createBucketAndObject(tc, bktName, objName)
|
|
||||||
|
|
||||||
versionID, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
require.Empty(t, versionID)
|
|
||||||
require.False(t, isDeleteMarker)
|
|
||||||
versionID, isDeleteMarker = deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
require.Empty(t, versionID)
|
|
||||||
require.False(t, isDeleteMarker)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("versioned bucket", func(t *testing.T) {
|
|
||||||
bktName, objName := "bucket-versioned-for-removal", "object-to-delete"
|
|
||||||
createVersionedBucketAndObject(t, tc, bktName, objName)
|
|
||||||
|
|
||||||
_, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
require.True(t, isDeleteMarker)
|
|
||||||
_, isDeleteMarker = deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
require.True(t, isDeleteMarker)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("versioned bucket not found obj", func(t *testing.T) {
|
|
||||||
bktName, objName := "bucket-versioned-for-removal-not-found", "object-to-delete"
|
|
||||||
_, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
|
|
||||||
|
|
||||||
versionID, isDeleteMarker := deleteObject(t, tc, bktName, objName, objInfo.VersionID())
|
|
||||||
require.False(t, isDeleteMarker)
|
|
||||||
require.Equal(t, objInfo.VersionID(), versionID)
|
|
||||||
|
|
||||||
versionID2, isDeleteMarker := deleteObject(t, tc, bktName, objName, versionID)
|
|
||||||
require.False(t, isDeleteMarker)
|
|
||||||
require.Equal(t, objInfo.VersionID(), versionID2)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteObjectVersioned(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
bktInfo, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
|
|
||||||
|
|
||||||
checkFound(t, tc, bktName, objName, emptyVersion)
|
|
||||||
deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
|
||||||
|
|
||||||
checkFound(t, tc, bktName, objName, objInfo.VersionID())
|
|
||||||
deleteObject(t, tc, bktName, objName, objInfo.VersionID())
|
|
||||||
checkNotFound(t, tc, bktName, objName, objInfo.VersionID())
|
|
||||||
|
|
||||||
require.False(t, existInMockedFrostFS(tc, bktInfo, objInfo), "object exists but shouldn't")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteObjectUnversioned(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal-unversioned", "object-to-delete-unversioned"
|
|
||||||
bktInfo, objInfo := createBucketAndObject(tc, bktName, objName)
|
|
||||||
|
|
||||||
checkFound(t, tc, bktName, objName, emptyVersion)
|
|
||||||
deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
|
||||||
|
|
||||||
versions := listVersions(t, tc, bktName)
|
|
||||||
require.Len(t, versions.DeleteMarker, 0, "delete markers must be empty")
|
|
||||||
require.Len(t, versions.Version, 0, "versions must be empty")
|
|
||||||
|
|
||||||
require.False(t, existInMockedFrostFS(tc, bktInfo, objInfo), "object exists but shouldn't")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRemoveDeleteMarker(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
bktInfo, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
|
|
||||||
|
|
||||||
checkFound(t, tc, bktName, objName, emptyVersion)
|
|
||||||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
require.True(t, isDeleteMarker)
|
|
||||||
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
|
||||||
|
|
||||||
checkFound(t, tc, bktName, objName, objInfo.VersionID())
|
|
||||||
deleteObject(t, tc, bktName, objName, deleteMarkerVersion)
|
|
||||||
checkFound(t, tc, bktName, objName, emptyVersion)
|
|
||||||
|
|
||||||
require.True(t, existInMockedFrostFS(tc, bktInfo, objInfo), "object doesn't exist but should")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteMarkerVersioned(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
createVersionedBucketAndObject(t, tc, bktName, objName)
|
|
||||||
|
|
||||||
t.Run("not create new delete marker if last version is delete marker", func(t *testing.T) {
|
|
||||||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
require.True(t, isDeleteMarker)
|
|
||||||
versions := listVersions(t, tc, bktName)
|
|
||||||
require.Len(t, versions.DeleteMarker, 1)
|
|
||||||
require.Equal(t, deleteMarkerVersion, versions.DeleteMarker[0].VersionID)
|
|
||||||
|
|
||||||
_, isDeleteMarker = deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
require.True(t, isDeleteMarker)
|
|
||||||
versions = listVersions(t, tc, bktName)
|
|
||||||
require.Len(t, versions.DeleteMarker, 1)
|
|
||||||
require.Equal(t, deleteMarkerVersion, versions.DeleteMarker[0].VersionID)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("do not create delete marker if object does not exist", func(t *testing.T) {
|
|
||||||
versionsBefore := listVersions(t, tc, bktName)
|
|
||||||
_, isDeleteMarker := deleteObject(t, tc, bktName, "dummy", emptyVersion)
|
|
||||||
require.False(t, isDeleteMarker)
|
|
||||||
versionsAfter := listVersions(t, tc, bktName)
|
|
||||||
require.Equal(t, versionsBefore, versionsAfter)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteMarkerSuspended(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
bktInfo, _ := createVersionedBucketAndObject(t, tc, bktName, objName)
|
|
||||||
putBucketVersioning(t, tc, bktName, false)
|
|
||||||
|
|
||||||
t.Run("not create new delete marker if last version is delete marker", func(t *testing.T) {
|
|
||||||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
require.True(t, isDeleteMarker)
|
|
||||||
require.Equal(t, data.UnversionedObjectVersionID, deleteMarkerVersion)
|
|
||||||
|
|
||||||
deleteMarkerVersion, isDeleteMarker = deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
require.True(t, isDeleteMarker)
|
|
||||||
require.Equal(t, data.UnversionedObjectVersionID, deleteMarkerVersion)
|
|
||||||
|
|
||||||
versions := listVersions(t, tc, bktName)
|
|
||||||
require.Len(t, versions.DeleteMarker, 1)
|
|
||||||
require.Equal(t, deleteMarkerVersion, versions.DeleteMarker[0].VersionID)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("do not create delete marker if object does not exist", func(t *testing.T) {
|
|
||||||
versionsBefore := listVersions(t, tc, bktName)
|
|
||||||
_, isDeleteMarker := deleteObject(t, tc, bktName, "dummy", emptyVersion)
|
|
||||||
require.False(t, isDeleteMarker)
|
|
||||||
versionsAfter := listVersions(t, tc, bktName)
|
|
||||||
require.Equal(t, versionsBefore, versionsAfter)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("remove last unversioned non delete marker", func(t *testing.T) {
|
|
||||||
objName := "obj3"
|
|
||||||
putObject(tc, bktName, objName)
|
|
||||||
|
|
||||||
nodeVersion, err := tc.tree.GetUnversioned(tc.Context(), bktInfo, objName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
require.True(t, isDeleteMarker)
|
|
||||||
require.Equal(t, data.UnversionedObjectVersionID, deleteMarkerVersion)
|
|
||||||
|
|
||||||
objVersions := getVersion(listVersions(t, tc, bktName), objName)
|
|
||||||
require.Len(t, objVersions, 0)
|
|
||||||
|
|
||||||
require.False(t, tc.MockedPool().ObjectExists(nodeVersion.OID))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteObjectCombined(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
bktInfo, objInfo := createBucketAndObject(tc, bktName, objName)
|
|
||||||
|
|
||||||
putBucketVersioning(t, tc, bktName, true)
|
|
||||||
|
|
||||||
checkFound(t, tc, bktName, objName, emptyVersion)
|
|
||||||
deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
|
||||||
|
|
||||||
checkFound(t, tc, bktName, objName, objInfo.VersionID())
|
|
||||||
|
|
||||||
require.True(t, existInMockedFrostFS(tc, bktInfo, objInfo), "object doesn't exist but should")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteObjectSuspended(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
bktInfo, objInfo := createBucketAndObject(tc, bktName, objName)
|
|
||||||
|
|
||||||
putBucketVersioning(t, tc, bktName, true)
|
|
||||||
|
|
||||||
checkFound(t, tc, bktName, objName, emptyVersion)
|
|
||||||
deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
|
||||||
|
|
||||||
putBucketVersioning(t, tc, bktName, false)
|
|
||||||
|
|
||||||
deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
checkNotFound(t, tc, bktName, objName, objInfo.VersionID())
|
|
||||||
|
|
||||||
require.False(t, existInMockedFrostFS(tc, bktInfo, objInfo), "object exists but shouldn't")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteMarkers(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
createTestBucket(tc, bktName)
|
|
||||||
putBucketVersioning(t, tc, bktName, true)
|
|
||||||
|
|
||||||
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
|
||||||
deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
|
|
||||||
versions := listVersions(t, tc, bktName)
|
|
||||||
require.Len(t, versions.DeleteMarker, 0, "invalid delete markers length")
|
|
||||||
require.Len(t, versions.Version, 0, "versions must be empty")
|
|
||||||
|
|
||||||
require.Len(t, listOIDsFromMockedFrostFS(t, tc, bktName), 0, "shouldn't be any object in frostfs")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetHeadDeleteMarker(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
createTestBucket(hc, bktName)
|
|
||||||
putBucketVersioning(t, hc, bktName, true)
|
|
||||||
|
|
||||||
putObject(hc, bktName, objName)
|
|
||||||
|
|
||||||
deleteMarkerVersionID, _ := deleteObject(t, hc, bktName, objName, emptyVersion)
|
|
||||||
|
|
||||||
w := headObjectBase(hc, bktName, objName, deleteMarkerVersionID)
|
|
||||||
require.Equal(t, w.Code, http.StatusMethodNotAllowed)
|
|
||||||
require.Equal(t, w.Result().Header.Get(api.AmzDeleteMarker), "true")
|
|
||||||
|
|
||||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
|
||||||
hc.Handler().GetObjectHandler(w, r)
|
|
||||||
assertStatus(hc.t, w, http.StatusNotFound)
|
|
||||||
require.Equal(t, w.Result().Header.Get(api.AmzDeleteMarker), "true")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteObjectFromListCache(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
bktInfo, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
|
|
||||||
|
|
||||||
versions := listObjectsV1(tc, bktName, "", "", "", -1)
|
|
||||||
require.Len(t, versions.Contents, 1)
|
|
||||||
|
|
||||||
checkFound(t, tc, bktName, objName, objInfo.VersionID())
|
|
||||||
deleteObject(t, tc, bktName, objName, objInfo.VersionID())
|
|
||||||
checkNotFound(t, tc, bktName, objName, objInfo.VersionID())
|
|
||||||
|
|
||||||
// check cache is clean after object removal
|
|
||||||
versions = listObjectsV1(tc, bktName, "", "", "", -1)
|
|
||||||
require.Len(t, versions.Contents, 0)
|
|
||||||
|
|
||||||
require.False(t, existInMockedFrostFS(tc, bktInfo, objInfo))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteObjectCheckMarkerReturn(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
|
||||||
createVersionedBucketAndObject(t, tc, bktName, objName)
|
|
||||||
|
|
||||||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
|
||||||
require.True(t, isDeleteMarker)
|
|
||||||
|
|
||||||
versions := listVersions(t, tc, bktName)
|
|
||||||
require.Len(t, versions.DeleteMarker, 1)
|
|
||||||
require.Equal(t, deleteMarkerVersion, versions.DeleteMarker[0].VersionID)
|
|
||||||
|
|
||||||
deleteMarkerVersion2, isDeleteMarker2 := deleteObject(t, tc, bktName, objName, deleteMarkerVersion)
|
|
||||||
require.True(t, isDeleteMarker2)
|
|
||||||
versions = listVersions(t, tc, bktName)
|
|
||||||
require.Len(t, versions.DeleteMarker, 0)
|
|
||||||
require.Equal(t, deleteMarkerVersion, deleteMarkerVersion2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteBucketByNotOwner(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName := "bucket-name"
|
|
||||||
bktInfo := createTestBucket(hc, bktName)
|
|
||||||
deleteBucket(t, hc, bktName, http.StatusForbidden)
|
|
||||||
|
|
||||||
hc.owner = bktInfo.Owner
|
|
||||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func createBucketAndObject(tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
|
|
||||||
bktInfo := createTestBucket(tc, bktName)
|
|
||||||
|
|
||||||
objInfo := createTestObject(tc, bktInfo, objName, encryption.Params{})
|
|
||||||
|
|
||||||
return bktInfo, objInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func createVersionedBucketAndObject(_ *testing.T, tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
|
|
||||||
bktInfo := createVersionedBucket(tc, bktName)
|
|
||||||
objInfo := createTestObject(tc, bktInfo, objName, encryption.Params{})
|
|
||||||
|
|
||||||
return bktInfo, objInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func createVersionedBucket(hc *handlerContext, bktName string) *data.BucketInfo {
|
|
||||||
bktInfo := createTestBucket(hc, bktName)
|
|
||||||
putBucketVersioning(hc.t, hc, bktName, true)
|
|
||||||
|
|
||||||
return bktInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func putBucketVersioning(t *testing.T, tc *handlerContext, bktName string, enabled bool) {
|
|
||||||
cfg := &VersioningConfiguration{Status: "Suspended"}
|
|
||||||
if enabled {
|
|
||||||
cfg.Status = "Enabled"
|
|
||||||
}
|
|
||||||
w, r := prepareTestRequest(tc, bktName, "", cfg)
|
|
||||||
tc.Handler().PutBucketVersioningHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBucketVersioning(hc *handlerContext, bktName string) *VersioningConfiguration {
|
|
||||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
|
||||||
hc.Handler().GetBucketVersioningHandler(w, r)
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
|
|
||||||
res := &VersioningConfiguration{}
|
|
||||||
parseTestResponse(hc.t, w, res)
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteObject(t *testing.T, tc *handlerContext, bktName, objName, version string) (string, bool) {
|
|
||||||
query := make(url.Values)
|
|
||||||
query.Add(api.QueryVersionID, version)
|
|
||||||
|
|
||||||
w, r := prepareTestFullRequest(tc, bktName, objName, query, nil)
|
|
||||||
tc.Handler().DeleteObjectHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusNoContent)
|
|
||||||
|
|
||||||
return w.Header().Get(api.AmzVersionID), w.Header().Get(api.AmzDeleteMarker) != ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteObjects(t *testing.T, tc *handlerContext, bktName string, objVersions [][2]string) *DeleteObjectsResponse {
|
|
||||||
w := deleteObjectsBase(tc, bktName, objVersions)
|
|
||||||
|
|
||||||
res := &DeleteObjectsResponse{}
|
|
||||||
parseTestResponse(t, w, res)
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteObjectsBase(hc *handlerContext, bktName string, objVersions [][2]string) *httptest.ResponseRecorder {
|
|
||||||
req := &DeleteObjectsRequest{}
|
|
||||||
for _, version := range objVersions {
|
|
||||||
req.Objects = append(req.Objects, ObjectIdentifier{
|
|
||||||
ObjectName: version[0],
|
|
||||||
VersionID: version[1],
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
w, r := prepareTestRequest(hc, bktName, "", req)
|
|
||||||
hc.Handler().DeleteMultipleObjectsHandler(w, r)
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteBucketForce(t *testing.T, tc *handlerContext, bktName string, code int, value string) {
|
|
||||||
w, r := prepareTestRequest(tc, bktName, "", nil)
|
|
||||||
r.Header.Set(api.AmzForceBucketDelete, value)
|
|
||||||
tc.Handler().DeleteBucketHandler(w, r)
|
|
||||||
assertStatus(t, w, code)
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteBucket(t *testing.T, tc *handlerContext, bktName string, code int) {
|
|
||||||
w, r := prepareTestRequest(tc, bktName, "", nil)
|
|
||||||
tc.Handler().DeleteBucketHandler(w, r)
|
|
||||||
assertStatus(t, w, code)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkNotFound(t *testing.T, hc *handlerContext, bktName, objName, version string) {
|
|
||||||
w := headObjectBase(hc, bktName, objName, version)
|
|
||||||
assertStatus(t, w, http.StatusNotFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
func headObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apierr.ErrorCode) {
|
|
||||||
w := headObjectBase(hc, bktName, objName, version)
|
|
||||||
assertS3Error(hc.t, w, apierr.GetAPIError(code))
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkFound(t *testing.T, hc *handlerContext, bktName, objName, version string) {
|
|
||||||
w := headObjectBase(hc, bktName, objName, version)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func headObjectWithHeaders(hc *handlerContext, bktName, objName, version string, headers map[string]string) *httptest.ResponseRecorder {
|
|
||||||
query := make(url.Values)
|
|
||||||
query.Add(api.QueryVersionID, version)
|
|
||||||
|
|
||||||
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
|
|
||||||
for k, v := range headers {
|
|
||||||
r.Header.Set(k, v)
|
|
||||||
}
|
|
||||||
hc.Handler().HeadObjectHandler(w, r)
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
func headObjectBase(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
|
|
||||||
query := make(url.Values)
|
|
||||||
query.Add(api.QueryVersionID, version)
|
|
||||||
|
|
||||||
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
|
|
||||||
hc.Handler().HeadObjectHandler(w, r)
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
func listVersions(_ *testing.T, tc *handlerContext, bktName string) *ListObjectsVersionsResponse {
|
|
||||||
return listObjectsVersions(tc, bktName, "", "", "", "", -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getVersion(resp *ListObjectsVersionsResponse, objName string) []*ObjectVersionResponse {
|
|
||||||
var res []*ObjectVersionResponse
|
|
||||||
for i, version := range resp.Version {
|
|
||||||
if version.Key == objName {
|
|
||||||
res = append(res, &resp.Version[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func putObject(hc *handlerContext, bktName, objName string) {
|
|
||||||
body := bytes.NewReader([]byte("content"))
|
|
||||||
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
|
|
||||||
hc.Handler().PutObjectHandler(w, r)
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func createSuspendedBucket(t *testing.T, tc *handlerContext, bktName string) *data.BucketInfo {
|
|
||||||
createTestBucket(tc, bktName)
|
|
||||||
bktInfo, err := tc.Layer().GetBucketInfo(tc.Context(), bktName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
putBucketVersioning(t, tc, bktName, false)
|
|
||||||
return bktInfo
|
|
||||||
}
|
|
|
@ -1,455 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/tls"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
aes256Key = "MTIzNDU2Nzg5MHF3ZXJ0eXVpb3Bhc2RmZ2hqa2x6eGM="
|
|
||||||
aes256KeyMD5 = "NtkH/y2maPit+yUkhq4Q7A=="
|
|
||||||
partNumberQuery = "partNumber"
|
|
||||||
uploadIDQuery = "uploadId"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSimpleGetEncrypted(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-sse-c", "object-to-encrypt"
|
|
||||||
bktInfo := createTestBucket(tc, bktName)
|
|
||||||
|
|
||||||
content := "content"
|
|
||||||
putEncryptedObject(t, tc, bktName, objName, content)
|
|
||||||
|
|
||||||
objInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: objName})
|
|
||||||
require.NoError(t, err)
|
|
||||||
obj, err := tc.MockedPool().GetObject(tc.Context(), frostfs.PrmObjectGet{Container: bktInfo.CID, Object: objInfo.ID})
|
|
||||||
require.NoError(t, err)
|
|
||||||
encryptedContent, err := io.ReadAll(obj.Payload)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotEqual(t, content, string(encryptedContent))
|
|
||||||
|
|
||||||
response, _ := getEncryptedObject(tc, bktName, objName)
|
|
||||||
require.Equal(t, content, string(response))
|
|
||||||
|
|
||||||
result := listVersions(t, tc, bktName)
|
|
||||||
require.Len(t, result.Version, 1)
|
|
||||||
require.Equal(t, uint64(len(content)), result.Version[0].Size)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMD5HeaderBadOrEmpty(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-sse-c", "object-to-encrypt"
|
|
||||||
createTestBucket(tc, bktName)
|
|
||||||
content := "content"
|
|
||||||
|
|
||||||
headers := map[string]string{
|
|
||||||
api.ContentMD5: "",
|
|
||||||
}
|
|
||||||
putEncryptedObjectWithHeadersErr(t, tc, bktName, objName, content, headers, errors.ErrInvalidDigest)
|
|
||||||
|
|
||||||
headers = map[string]string{
|
|
||||||
api.ContentMD5: "yZRvHQZYwL5V7+k2pcwHLg==",
|
|
||||||
}
|
|
||||||
|
|
||||||
putEncryptedObjectWithHeadersErr(t, tc, bktName, objName, content, headers, errors.ErrBadDigest)
|
|
||||||
|
|
||||||
headers = map[string]string{
|
|
||||||
api.ContentMD5: "dGhlIHF1aWNrIGJyb3dF",
|
|
||||||
}
|
|
||||||
|
|
||||||
putEncryptedObjectWithHeadersErr(t, tc, bktName, objName, content, headers, errors.ErrInvalidDigest)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetEncryptedRange(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-sse-c", "object-to-encrypt"
|
|
||||||
createTestBucket(tc, bktName)
|
|
||||||
|
|
||||||
var sb strings.Builder
|
|
||||||
for i := 0; i < 1<<16+11; i++ {
|
|
||||||
switch i {
|
|
||||||
case 0:
|
|
||||||
sb.Write([]byte("b"))
|
|
||||||
case 1<<16 - 2:
|
|
||||||
sb.Write([]byte("c"))
|
|
||||||
case 1<<16 - 1:
|
|
||||||
sb.Write([]byte("d"))
|
|
||||||
case 1 << 16:
|
|
||||||
sb.Write([]byte("e"))
|
|
||||||
case 1<<16 + 1:
|
|
||||||
sb.Write([]byte("f"))
|
|
||||||
case 1<<16 + 10:
|
|
||||||
sb.Write([]byte("g"))
|
|
||||||
default:
|
|
||||||
sb.Write([]byte("a"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
content := sb.String()
|
|
||||||
putEncryptedObject(t, tc, bktName, objName, content)
|
|
||||||
|
|
||||||
full := getEncryptedObjectRange(t, tc, bktName, objName, 0, sb.Len()-1)
|
|
||||||
require.Equalf(t, content, string(full), "expected len: %d, actual len: %d", len(content), len(full))
|
|
||||||
|
|
||||||
beginning := getEncryptedObjectRange(t, tc, bktName, objName, 0, 3)
|
|
||||||
require.Equal(t, content[:4], string(beginning))
|
|
||||||
|
|
||||||
middle := getEncryptedObjectRange(t, tc, bktName, objName, 1<<16-3, 1<<16+2)
|
|
||||||
require.Equal(t, "acdefa", string(middle))
|
|
||||||
|
|
||||||
end := getEncryptedObjectRange(t, tc, bktName, objName, 1<<16+2, len(content)-1)
|
|
||||||
require.Equal(t, "aaaaaaaag", string(end))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestS3EncryptionSSECMultipartUpload(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
bktName, objName := "bucket-for-sse-c-multipart-s3-tests", "multipart_enc"
|
|
||||||
createTestBucket(tc, bktName)
|
|
||||||
|
|
||||||
objLen := 30 * 1024 * 1024
|
|
||||||
partSize := objLen / 6
|
|
||||||
headerMetaKey := api.MetadataPrefix + "foo"
|
|
||||||
headers := map[string]string{
|
|
||||||
headerMetaKey: "bar",
|
|
||||||
api.ContentType: "text/plain",
|
|
||||||
}
|
|
||||||
|
|
||||||
data := multipartUploadEncrypted(tc, bktName, objName, headers, objLen, partSize)
|
|
||||||
require.Equal(t, objLen, len(data))
|
|
||||||
|
|
||||||
resData, resHeader := getEncryptedObject(tc, bktName, objName)
|
|
||||||
equalDataSlices(t, data, resData)
|
|
||||||
require.Equal(t, headers[api.ContentType], resHeader.Get(api.ContentType))
|
|
||||||
require.Equal(t, headers[headerMetaKey], resHeader[headerMetaKey][0])
|
|
||||||
require.Equal(t, strconv.Itoa(objLen), resHeader.Get(api.ContentLength))
|
|
||||||
|
|
||||||
checkContentUsingRangeEnc(tc, bktName, objName, data, 1000000)
|
|
||||||
checkContentUsingRangeEnc(tc, bktName, objName, data, 10000000)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMultipartUploadGetRange(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
bktName, objName := "bucket-for-multipart-s3-tests", "multipart_obj"
|
|
||||||
createTestBucket(hc, bktName)
|
|
||||||
|
|
||||||
objLen := 30 * 1024 * 1024
|
|
||||||
partSize := objLen / 6
|
|
||||||
headerMetaKey := api.MetadataPrefix + "foo"
|
|
||||||
headers := map[string]string{
|
|
||||||
headerMetaKey: "bar",
|
|
||||||
api.ContentType: "text/plain",
|
|
||||||
}
|
|
||||||
|
|
||||||
data := multipartUpload(hc, bktName, objName, headers, objLen, partSize)
|
|
||||||
require.Equal(t, objLen, len(data))
|
|
||||||
|
|
||||||
resData, resHeader := getObject(hc, bktName, objName)
|
|
||||||
equalDataSlices(t, data, resData)
|
|
||||||
require.Equal(t, headers[api.ContentType], resHeader.Get(api.ContentType))
|
|
||||||
require.Equal(t, headers[headerMetaKey], resHeader[headerMetaKey][0])
|
|
||||||
require.Equal(t, strconv.Itoa(objLen), resHeader.Get(api.ContentLength))
|
|
||||||
|
|
||||||
checkContentUsingRange(hc, bktName, objName, data, 1000000)
|
|
||||||
checkContentUsingRange(hc, bktName, objName, data, 10000000)
|
|
||||||
}
|
|
||||||
|
|
||||||
func equalDataSlices(t *testing.T, expected, actual []byte) {
|
|
||||||
require.Equal(t, len(expected), len(actual), "sizes don't match")
|
|
||||||
|
|
||||||
if bytes.Equal(expected, actual) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(expected); i++ {
|
|
||||||
if expected[i] != actual[i] {
|
|
||||||
require.Equalf(t, expected[i], actual[i], "differ start with '%d' position, length: %d", i, len(expected))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkContentUsingRangeEnc(hc *handlerContext, bktName, objName string, data []byte, step int) {
|
|
||||||
checkContentUsingRangeBase(hc, bktName, objName, data, step, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkContentUsingRange(hc *handlerContext, bktName, objName string, data []byte, step int) {
|
|
||||||
checkContentUsingRangeBase(hc, bktName, objName, data, step, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkContentUsingRangeBase(hc *handlerContext, bktName, objName string, data []byte, step int, encrypted bool) {
|
|
||||||
var off, toRead, end int
|
|
||||||
|
|
||||||
for off < len(data) {
|
|
||||||
toRead = len(data) - off
|
|
||||||
if toRead > step {
|
|
||||||
toRead = step
|
|
||||||
}
|
|
||||||
end = off + toRead - 1
|
|
||||||
|
|
||||||
var rangeData []byte
|
|
||||||
if encrypted {
|
|
||||||
rangeData = getEncryptedObjectRange(hc.t, hc, bktName, objName, off, end)
|
|
||||||
} else {
|
|
||||||
rangeData = getObjectRange(hc.t, hc, bktName, objName, off, end)
|
|
||||||
}
|
|
||||||
|
|
||||||
equalDataSlices(hc.t, data[off:end+1], rangeData)
|
|
||||||
|
|
||||||
off += step
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func multipartUploadEncrypted(hc *handlerContext, bktName, objName string, headers map[string]string, objLen, partsSize int) (objData []byte) {
|
|
||||||
multipartInfo := createMultipartUploadEncrypted(hc, bktName, objName, headers)
|
|
||||||
|
|
||||||
var sum, currentPart int
|
|
||||||
var etags []string
|
|
||||||
adjustedSize := partsSize
|
|
||||||
|
|
||||||
for sum < objLen {
|
|
||||||
currentPart++
|
|
||||||
|
|
||||||
sum += partsSize
|
|
||||||
if sum > objLen {
|
|
||||||
adjustedSize = objLen - sum
|
|
||||||
}
|
|
||||||
|
|
||||||
etag, data := uploadPartEncrypted(hc, bktName, objName, multipartInfo.UploadID, currentPart, adjustedSize)
|
|
||||||
etags = append(etags, etag)
|
|
||||||
objData = append(objData, data...)
|
|
||||||
}
|
|
||||||
|
|
||||||
completeMultipartUpload(hc, bktName, objName, multipartInfo.UploadID, etags)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func multipartUpload(hc *handlerContext, bktName, objName string, headers map[string]string, objLen, partsSize int) (objData []byte) {
|
|
||||||
multipartInfo := createMultipartUpload(hc, bktName, objName, headers)
|
|
||||||
|
|
||||||
var sum, currentPart int
|
|
||||||
var etags []string
|
|
||||||
adjustedSize := partsSize
|
|
||||||
|
|
||||||
for sum < objLen {
|
|
||||||
currentPart++
|
|
||||||
|
|
||||||
sum += partsSize
|
|
||||||
if sum > objLen {
|
|
||||||
adjustedSize = objLen - sum
|
|
||||||
}
|
|
||||||
|
|
||||||
etag, data := uploadPart(hc, bktName, objName, multipartInfo.UploadID, currentPart, adjustedSize)
|
|
||||||
etags = append(etags, etag)
|
|
||||||
objData = append(objData, data...)
|
|
||||||
}
|
|
||||||
|
|
||||||
completeMultipartUpload(hc, bktName, objName, multipartInfo.UploadID, etags)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func createMultipartUploadEncrypted(hc *handlerContext, bktName, objName string, headers map[string]string) *InitiateMultipartUploadResponse {
|
|
||||||
return createMultipartUploadOkBase(hc, bktName, objName, true, headers)
|
|
||||||
}
|
|
||||||
|
|
||||||
func createMultipartUpload(hc *handlerContext, bktName, objName string, headers map[string]string) *InitiateMultipartUploadResponse {
|
|
||||||
return createMultipartUploadOkBase(hc, bktName, objName, false, headers)
|
|
||||||
}
|
|
||||||
|
|
||||||
func createMultipartUploadOkBase(hc *handlerContext, bktName, objName string, encrypted bool, headers map[string]string) *InitiateMultipartUploadResponse {
|
|
||||||
w := createMultipartUploadBase(hc, bktName, objName, encrypted, headers)
|
|
||||||
multipartInitInfo := &InitiateMultipartUploadResponse{}
|
|
||||||
readResponse(hc.t, w, http.StatusOK, multipartInitInfo)
|
|
||||||
return multipartInitInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func createMultipartUploadAssertS3Error(hc *handlerContext, bktName, objName string, headers map[string]string, code errors.ErrorCode) {
|
|
||||||
w := createMultipartUploadBase(hc, bktName, objName, false, headers)
|
|
||||||
assertS3Error(hc.t, w, errors.GetAPIError(code))
|
|
||||||
}
|
|
||||||
|
|
||||||
func createMultipartUploadBase(hc *handlerContext, bktName, objName string, encrypted bool, headers map[string]string) *httptest.ResponseRecorder {
|
|
||||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
|
||||||
if encrypted {
|
|
||||||
setEncryptHeaders(r)
|
|
||||||
}
|
|
||||||
setHeaders(r, headers)
|
|
||||||
hc.Handler().CreateMultipartUploadHandler(w, r)
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
func completeMultipartUpload(hc *handlerContext, bktName, objName, uploadID string, partsETags []string) {
|
|
||||||
w := completeMultipartUploadBase(hc, bktName, objName, uploadID, partsETags)
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func completeMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID string, partsETags []string) *httptest.ResponseRecorder {
|
|
||||||
query := make(url.Values)
|
|
||||||
query.Set(uploadIDQuery, uploadID)
|
|
||||||
complete := &CompleteMultipartUpload{
|
|
||||||
Parts: []*layer.CompletedPart{},
|
|
||||||
}
|
|
||||||
for i, tag := range partsETags {
|
|
||||||
complete.Parts = append(complete.Parts, &layer.CompletedPart{
|
|
||||||
ETag: tag,
|
|
||||||
PartNumber: i + 1,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
w, r := prepareTestFullRequest(hc, bktName, objName, query, complete)
|
|
||||||
hc.Handler().CompleteMultipartUploadHandler(w, r)
|
|
||||||
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
func abortMultipartUpload(hc *handlerContext, bktName, objName, uploadID string) {
|
|
||||||
w := abortMultipartUploadBase(hc, bktName, objName, uploadID)
|
|
||||||
assertStatus(hc.t, w, http.StatusNoContent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func abortMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID string) *httptest.ResponseRecorder {
|
|
||||||
query := make(url.Values)
|
|
||||||
query.Set(uploadIDQuery, uploadID)
|
|
||||||
|
|
||||||
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
|
|
||||||
hc.Handler().AbortMultipartUploadHandler(w, r)
|
|
||||||
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
func uploadPartEncrypted(hc *handlerContext, bktName, objName, uploadID string, num, size int) (string, []byte) {
|
|
||||||
return uploadPartBase(hc, bktName, objName, true, uploadID, num, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
func uploadPart(hc *handlerContext, bktName, objName, uploadID string, num, size int) (string, []byte) {
|
|
||||||
return uploadPartBase(hc, bktName, objName, false, uploadID, num, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
func uploadPartBase(hc *handlerContext, bktName, objName string, encrypted bool, uploadID string, num, size int) (string, []byte) {
|
|
||||||
partBody := make([]byte, size)
|
|
||||||
_, err := rand.Read(partBody)
|
|
||||||
require.NoError(hc.t, err)
|
|
||||||
|
|
||||||
query := make(url.Values)
|
|
||||||
query.Set(uploadIDQuery, uploadID)
|
|
||||||
query.Set(partNumberQuery, strconv.Itoa(num))
|
|
||||||
|
|
||||||
w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, partBody)
|
|
||||||
if encrypted {
|
|
||||||
setEncryptHeaders(r)
|
|
||||||
}
|
|
||||||
hc.Handler().UploadPartHandler(w, r)
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
|
|
||||||
return w.Header().Get(api.ETag), partBody
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMultipartEncrypted(t *testing.T) {
|
|
||||||
partSize := 5*1048576 + 1<<16 - 5 // 5MB (min part size) + 64kb (cipher block size) - 5 (to check corner range)
|
|
||||||
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-sse-c-multipart", "object-to-encrypt-multipart"
|
|
||||||
createTestBucket(hc, bktName)
|
|
||||||
|
|
||||||
multipartInitInfo := createMultipartUploadEncrypted(hc, bktName, objName, map[string]string{})
|
|
||||||
part1ETag, part1 := uploadPartEncrypted(hc, bktName, objName, multipartInitInfo.UploadID, 1, partSize)
|
|
||||||
part2ETag, part2 := uploadPartEncrypted(hc, bktName, objName, multipartInitInfo.UploadID, 2, 5)
|
|
||||||
completeMultipartUpload(hc, bktName, objName, multipartInitInfo.UploadID, []string{part1ETag, part2ETag})
|
|
||||||
|
|
||||||
res, _ := getEncryptedObject(hc, bktName, objName)
|
|
||||||
require.Equal(t, len(part1)+len(part2), len(res))
|
|
||||||
require.Equal(t, append(part1, part2...), res)
|
|
||||||
|
|
||||||
part2Range := getEncryptedObjectRange(t, hc, bktName, objName, len(part1), len(part1)+len(part2)-1)
|
|
||||||
require.Equal(t, part2[0:], part2Range)
|
|
||||||
|
|
||||||
result := listVersions(t, hc, bktName)
|
|
||||||
require.Len(t, result.Version, 1)
|
|
||||||
require.EqualValues(t, uint64(partSize+5), result.Version[0].Size)
|
|
||||||
}
|
|
||||||
|
|
||||||
func putEncryptedObject(t *testing.T, tc *handlerContext, bktName, objName, content string) {
|
|
||||||
body := bytes.NewReader([]byte(content))
|
|
||||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, body)
|
|
||||||
setEncryptHeaders(r)
|
|
||||||
tc.Handler().PutObjectHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func putEncryptedObjectWithHeadersErr(t *testing.T, tc *handlerContext, bktName, objName, content string, headers map[string]string, code errors.ErrorCode) {
|
|
||||||
body := bytes.NewReader([]byte(content))
|
|
||||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, body)
|
|
||||||
setHeaders(r, headers)
|
|
||||||
|
|
||||||
tc.Handler().PutObjectHandler(w, r)
|
|
||||||
assertS3Error(t, w, errors.GetAPIError(code))
|
|
||||||
}
|
|
||||||
|
|
||||||
func getEncryptedObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header) {
|
|
||||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
|
||||||
setEncryptHeaders(r)
|
|
||||||
return getObjectBase(hc, w, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header) {
|
|
||||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
|
||||||
return getObjectBase(hc, w, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getObjectWithHeaders(hc *handlerContext, bktName, objName string, headers map[string]string) *httptest.ResponseRecorder {
|
|
||||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
|
||||||
for k, v := range headers {
|
|
||||||
r.Header.Set(k, v)
|
|
||||||
}
|
|
||||||
hc.Handler().GetObjectHandler(w, r)
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
func getObjectBase(hc *handlerContext, w *httptest.ResponseRecorder, r *http.Request) ([]byte, http.Header) {
|
|
||||||
hc.Handler().GetObjectHandler(w, r)
|
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
|
||||||
content, err := io.ReadAll(w.Result().Body)
|
|
||||||
require.NoError(hc.t, err)
|
|
||||||
return content, w.Header()
|
|
||||||
}
|
|
||||||
|
|
||||||
func getEncryptedObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, start, end int) []byte {
|
|
||||||
w, r := prepareTestRequest(tc, bktName, objName, nil)
|
|
||||||
setEncryptHeaders(r)
|
|
||||||
r.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
|
|
||||||
tc.Handler().GetObjectHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusPartialContent)
|
|
||||||
content, err := io.ReadAll(w.Result().Body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return content
|
|
||||||
}
|
|
||||||
|
|
||||||
func setEncryptHeaders(r *http.Request) {
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKey, aes256Key)
|
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKeyMD5, aes256KeyMD5)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setHeaders(r *http.Request, header map[string]string) {
|
|
||||||
for key, val := range header {
|
|
||||||
r.Header.Set(key, val)
|
|
||||||
}
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue