Compare commits
No commits in common. "master" and "master" have entirely different histories.
301 changed files with 14137 additions and 40406 deletions
|
@ -1,14 +1,13 @@
|
||||||
FROM golang:1.22 AS builder
|
FROM golang:1.19 as builder
|
||||||
|
|
||||||
ARG BUILD=now
|
ARG BUILD=now
|
||||||
ARG REPO=git.frostfs.info/TrueCloudLab/frostfs-s3-gw
|
ARG REPO=git.frostfs.info/TrueCloudLab/frostfs-s3-gw
|
||||||
ARG VERSION=dev
|
ARG VERSION=dev
|
||||||
ARG GOFLAGS=""
|
|
||||||
|
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
COPY . /src
|
COPY . /src
|
||||||
|
|
||||||
RUN make GOFLAGS=${GOFLAGS}
|
RUN make
|
||||||
|
|
||||||
# Executable image
|
# Executable image
|
||||||
FROM alpine AS frostfs-s3-gw
|
FROM alpine AS frostfs-s3-gw
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
.git
|
.git
|
||||||
.cache
|
.cache
|
||||||
.forgejo
|
.github
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
builds:
|
|
||||||
name: Builds
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
go_versions: [ '1.22', '1.23' ]
|
|
||||||
fail-fast: false
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '${{ matrix.go_versions }}'
|
|
||||||
|
|
||||||
- name: Build binary
|
|
||||||
run: make
|
|
||||||
|
|
||||||
- name: Check dirty suffix
|
|
||||||
run: if [[ $(make version) == *"dirty"* ]]; then echo "Version has dirty suffix" && exit 1; fi
|
|
|
@ -1,20 +0,0 @@
|
||||||
on: [pull_request]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
dco:
|
|
||||||
name: DCO
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.23'
|
|
||||||
|
|
||||||
- name: Run commit format checker
|
|
||||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
|
||||||
with:
|
|
||||||
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
|
|
@ -1,27 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
image:
|
|
||||||
name: OCI image
|
|
||||||
runs-on: docker
|
|
||||||
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
|
|
||||||
steps:
|
|
||||||
- name: Clone git repo
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Build OCI image
|
|
||||||
run: make image
|
|
||||||
|
|
||||||
- name: Push image to OCI registry
|
|
||||||
run: |
|
|
||||||
echo "$REGISTRY_PASSWORD" \
|
|
||||||
| docker login --username truecloudlab --password-stdin git.frostfs.info
|
|
||||||
make image-push
|
|
||||||
if: >-
|
|
||||||
startsWith(github.ref, 'refs/tags/v') &&
|
|
||||||
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
|
|
||||||
env:
|
|
||||||
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}
|
|
|
@ -1,45 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
lint:
|
|
||||||
name: Lint
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.23'
|
|
||||||
cache: true
|
|
||||||
|
|
||||||
- name: Install linters
|
|
||||||
run: make lint-install
|
|
||||||
|
|
||||||
- name: Run linters
|
|
||||||
run: make lint
|
|
||||||
|
|
||||||
tests:
|
|
||||||
name: Tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
go_versions: [ '1.22', '1.23' ]
|
|
||||||
fail-fast: false
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '${{ matrix.go_versions }}'
|
|
||||||
|
|
||||||
- name: Update Go modules
|
|
||||||
run: make dep
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: make test
|
|
|
@ -1,25 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
vulncheck:
|
|
||||||
name: Vulncheck
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.23'
|
|
||||||
|
|
||||||
- name: Install govulncheck
|
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
|
||||||
|
|
||||||
- name: Run govulncheck
|
|
||||||
run: govulncheck ./...
|
|
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
* @alexvanin @KirillovDenis
|
0
.forgejo/logo.svg → .github/logo.svg
vendored
0
.forgejo/logo.svg → .github/logo.svg
vendored
Before Width: | Height: | Size: 5.5 KiB After Width: | Height: | Size: 5.5 KiB |
73
.github/workflows/builds.yml
vendored
Normal file
73
.github/workflows/builds.yml
vendored
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
name: Builds
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- 'support/*'
|
||||||
|
types: [ opened, synchronize ]
|
||||||
|
paths-ignore:
|
||||||
|
- '**/*.md'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build_cli:
|
||||||
|
name: Build CLI
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.19
|
||||||
|
|
||||||
|
- name: Restore Go modules from cache
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: /home/runner/go/pkg/mod
|
||||||
|
key: deps-${{ hashFiles('go.sum') }}
|
||||||
|
|
||||||
|
- name: Get tree-service client
|
||||||
|
run: make sync-tree
|
||||||
|
|
||||||
|
- name: Update Go modules
|
||||||
|
run: make dep
|
||||||
|
|
||||||
|
- name: Build CLI
|
||||||
|
run: make
|
||||||
|
|
||||||
|
- name: Check version
|
||||||
|
run: if [[ $(make version) == *"dirty"* ]]; then exit 1; fi
|
||||||
|
|
||||||
|
build_image:
|
||||||
|
needs: build_cli
|
||||||
|
name: Build Docker image
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.19
|
||||||
|
|
||||||
|
- name: Restore Go modules from cache
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: /home/runner/go/pkg/mod
|
||||||
|
key: deps-${{ hashFiles('go.sum') }}
|
||||||
|
|
||||||
|
- name: Get tree-service client
|
||||||
|
run: make sync-tree
|
||||||
|
|
||||||
|
- name: Update Go modules
|
||||||
|
run: make dep
|
||||||
|
|
||||||
|
- name: Build Docker image
|
||||||
|
run: make image
|
67
.github/workflows/codeql-analysis.yml
vendored
Normal file
67
.github/workflows/codeql-analysis.yml
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
# For most projects, this workflow file will not need changing; you simply need
|
||||||
|
# to commit it to your repository.
|
||||||
|
#
|
||||||
|
# You may wish to alter this file to override the set of languages analyzed,
|
||||||
|
# or to provide custom queries or build logic.
|
||||||
|
#
|
||||||
|
# ******** NOTE ********
|
||||||
|
# We have attempted to detect the languages in your repository. Please check
|
||||||
|
# the `language` matrix defined below to confirm you have the correct set of
|
||||||
|
# supported CodeQL languages.
|
||||||
|
#
|
||||||
|
name: "CodeQL"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master, 'support/*' ]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: [ master, 'support/*' ]
|
||||||
|
schedule:
|
||||||
|
- cron: '35 8 * * 1'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
language: [ 'go' ]
|
||||||
|
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||||
|
# Learn more:
|
||||||
|
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
# Initializes the CodeQL tools for scanning.
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v2
|
||||||
|
with:
|
||||||
|
languages: ${{ matrix.language }}
|
||||||
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
|
# By default, queries listed here will override any specified in a config file.
|
||||||
|
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||||
|
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||||
|
|
||||||
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v2
|
||||||
|
|
||||||
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
|
# 📚 https://git.io/JvXDl
|
||||||
|
|
||||||
|
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||||
|
# and modify them (or add more) to build your code if your project
|
||||||
|
# uses a compiled language
|
||||||
|
|
||||||
|
#- run: |
|
||||||
|
# make bootstrap
|
||||||
|
# make release
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v2
|
22
.github/workflows/dco.yml
vendored
Normal file
22
.github/workflows/dco.yml
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
name: DCO check
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- 'support/*'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
commits_check_job:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Commits Check
|
||||||
|
steps:
|
||||||
|
- name: Get PR Commits
|
||||||
|
id: 'get-pr-commits'
|
||||||
|
uses: tim-actions/get-pr-commits@master
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: DCO Check
|
||||||
|
uses: tim-actions/dco@master
|
||||||
|
with:
|
||||||
|
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
96
.github/workflows/tests.yml
vendored
Normal file
96
.github/workflows/tests.yml
vendored
Normal file
|
@ -0,0 +1,96 @@
|
||||||
|
name: Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- 'support/*'
|
||||||
|
types: [opened, synchronize]
|
||||||
|
paths-ignore:
|
||||||
|
- '**/*.md'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint:
|
||||||
|
name: Lint
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Get tree-service client
|
||||||
|
run: make sync-tree
|
||||||
|
|
||||||
|
- name: golangci-lint
|
||||||
|
uses: golangci/golangci-lint-action@v2
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
|
||||||
|
cover:
|
||||||
|
name: Coverage
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
|
||||||
|
env:
|
||||||
|
CGO_ENABLED: 1
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.19
|
||||||
|
|
||||||
|
- name: Restore Go modules from cache
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: /home/runner/go/pkg/mod
|
||||||
|
key: deps-${{ hashFiles('go.sum') }}
|
||||||
|
|
||||||
|
- name: Get tree-service client
|
||||||
|
run: make sync-tree
|
||||||
|
|
||||||
|
- name: Update Go modules
|
||||||
|
run: make dep
|
||||||
|
|
||||||
|
- name: Test and write coverage profile
|
||||||
|
run: make cover
|
||||||
|
|
||||||
|
- name: Upload coverage results to Codecov
|
||||||
|
uses: codecov/codecov-action@v1
|
||||||
|
with:
|
||||||
|
fail_ci_if_error: false
|
||||||
|
path_to_write_report: ./coverage.txt
|
||||||
|
verbose: true
|
||||||
|
|
||||||
|
tests:
|
||||||
|
name: Tests
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
go_versions: [ '1.18.x', '1.19.x' ]
|
||||||
|
fail-fast: false
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: '${{ matrix.go_versions }}'
|
||||||
|
|
||||||
|
- name: Restore Go modules from cache
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: /home/runner/go/pkg/mod
|
||||||
|
key: deps-${{ hashFiles('go.sum') }}
|
||||||
|
|
||||||
|
- name: Get tree-service client
|
||||||
|
run: make sync-tree
|
||||||
|
|
||||||
|
- name: Update Go modules
|
||||||
|
run: make dep
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: make test
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -2,6 +2,9 @@
|
||||||
.idea
|
.idea
|
||||||
.vscode
|
.vscode
|
||||||
|
|
||||||
|
# Tree service
|
||||||
|
internal/frostfs/services/tree/
|
||||||
|
|
||||||
# Vendoring
|
# Vendoring
|
||||||
vendor
|
vendor
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
# options for analysis running
|
# options for analysis running
|
||||||
run:
|
run:
|
||||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||||
timeout: 15m
|
timeout: 5m
|
||||||
|
|
||||||
# include test files or not, default is true
|
# include test files or not, default is true
|
||||||
tests: true
|
tests: true
|
||||||
|
@ -12,8 +12,7 @@ run:
|
||||||
# output configuration options
|
# output configuration options
|
||||||
output:
|
output:
|
||||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||||
formats:
|
format: tab
|
||||||
- format: tab
|
|
||||||
|
|
||||||
# all available settings of specific linters
|
# all available settings of specific linters
|
||||||
linters-settings:
|
linters-settings:
|
||||||
|
@ -25,16 +24,6 @@ linters-settings:
|
||||||
govet:
|
govet:
|
||||||
# report about shadowed variables
|
# report about shadowed variables
|
||||||
check-shadowing: false
|
check-shadowing: false
|
||||||
custom:
|
|
||||||
truecloudlab-linters:
|
|
||||||
path: bin/external_linters.so
|
|
||||||
original-url: git.frostfs.info/TrueCloudLab/linters.git
|
|
||||||
settings:
|
|
||||||
noliteral:
|
|
||||||
enable: true
|
|
||||||
target-methods: ["Fatal"]
|
|
||||||
disable-packages: ["codes", "tc"]
|
|
||||||
constants-package: "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
|
@ -56,7 +45,6 @@ linters:
|
||||||
- gofmt
|
- gofmt
|
||||||
- whitespace
|
- whitespace
|
||||||
- goimports
|
- goimports
|
||||||
- truecloudlab-linters
|
|
||||||
disable-all: true
|
disable-all: true
|
||||||
fast: false
|
fast: false
|
||||||
|
|
||||||
|
@ -66,6 +54,3 @@ issues:
|
||||||
- EXC0003 # test/Test ... consider calling this
|
- EXC0003 # test/Test ... consider calling this
|
||||||
- EXC0004 # govet
|
- EXC0004 # govet
|
||||||
- EXC0005 # C-style breaks
|
- EXC0005 # C-style breaks
|
||||||
exclude-dirs:
|
|
||||||
- api/auth/signer/v4asdk2
|
|
||||||
- api/auth/signer/v4sdk2
|
|
||||||
|
|
|
@ -30,23 +30,16 @@ repos:
|
||||||
hooks:
|
hooks:
|
||||||
- id: shellcheck
|
- id: shellcheck
|
||||||
|
|
||||||
|
- repo: https://github.com/golangci/golangci-lint
|
||||||
|
rev: v1.51.2
|
||||||
|
hooks:
|
||||||
|
- id: golangci-lint
|
||||||
|
|
||||||
- repo: local
|
- repo: local
|
||||||
hooks:
|
hooks:
|
||||||
- id: make-lint-install
|
- id: go-unit-tests
|
||||||
name: install linters
|
name: go unit tests
|
||||||
entry: make lint-install
|
entry: make test
|
||||||
language: system
|
pass_filenames: false
|
||||||
pass_filenames: false
|
types: [go]
|
||||||
|
language: system
|
||||||
- id: make-lint
|
|
||||||
name: run linters
|
|
||||||
entry: make lint
|
|
||||||
language: system
|
|
||||||
pass_filenames: false
|
|
||||||
|
|
||||||
- id: go-unit-tests
|
|
||||||
name: go unit tests
|
|
||||||
entry: make test
|
|
||||||
pass_filenames: false
|
|
||||||
types: [go]
|
|
||||||
language: system
|
|
||||||
|
|
780
CHANGELOG.md
780
CHANGELOG.md
|
@ -4,383 +4,431 @@ This document outlines major changes between releases.
|
||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
## [0.32.0] - Khumbu - 2024-12-20
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Metric of dropped logs by log sampler (#502)
|
|
||||||
- SigV4A signature algorithm (#339)
|
|
||||||
- TLS Termination header for SSE-C (#562)
|
|
||||||
- Kludge profile support (#147)
|
|
||||||
- Netmap support in tree pool (#577)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Improved multipart removal speed (#559)
|
|
||||||
- Updated tree service pool without api-go dependency (#570)
|
|
||||||
|
|
||||||
## [0.31.3] - 2024-12-17
|
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
- Return BucketAlreadyExists when global domain taken (#584)
|
- Get empty bucket CORS from frostfs (TrueCloudLab#36)
|
||||||
- Fix list-buckets vhs routing (#583)
|
|
||||||
- Skip port when matching listen domains (#586)
|
|
||||||
|
|
||||||
## [0.31.2] - 2024-12-13
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Unable to remove EC object (#576)
|
|
||||||
|
|
||||||
## [0.31.1] - 2024-11-28
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Ignore precondition headers with invalid date format (#563)
|
|
||||||
- MD5 calculation of object-part with SSE-C (#543)
|
|
||||||
|
|
||||||
## [0.31.0] - Rongbuk - 2024-11-20
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Docker warnings during image build (#421)
|
|
||||||
- `PartNumberMarker` in ListMultipart response (#451)
|
|
||||||
- PostObject handling (#456)
|
|
||||||
- Tag logging errors (#452)
|
|
||||||
- Removing of duplicated parts in tree service during split brain (#448)
|
|
||||||
- Container resolving (#482)
|
|
||||||
- FrostFS to S3 error transformation (#488)
|
|
||||||
- Default bucket routing (#507)
|
|
||||||
- encoding-type in ListBucketObjectVersions (#404)
|
|
||||||
- SIGHUP support for `tracing.enabled` config parameter (#520)
|
|
||||||
- `trace_id` parameter in logs (#501)
|
|
||||||
- Listing marker processing (#539)
|
|
||||||
- Content-MD5 header check (#540)
|
|
||||||
- Precondition check (#538)
|
|
||||||
- Bucket name check during all S3 operations (#556)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Support for separate container for all CORS settings (#422)
|
|
||||||
- `X-Amz-Force-Delete-Bucket` header for forced bucket removal (#31)
|
|
||||||
- `Location` support in CompleteMultipart response (#451)
|
|
||||||
- Tree pool request duration metric (#447)
|
|
||||||
- Expiration lifecycle configuration support (#42, #412, #459, #460, #516, #536)
|
|
||||||
- Add support for virtual hosted style addressing (#446, #449, #493)
|
|
||||||
- Support `frostfs.graceful_close_on_switch_timeout` (#475)
|
|
||||||
- Vulnerability report document (#413)
|
|
||||||
- Support patch object method (#462, #473, #466, #479)
|
|
||||||
- Enhanced logging and request reproducer (#369)
|
|
||||||
- Root CA configuration for tracing (#484)
|
|
||||||
- Log sampling policy configuration (#461)
|
|
||||||
- `sign` command to `frostfs-s3-authmate` (#467)
|
|
||||||
- Support custom aws credentials (#509)
|
|
||||||
- Source IP binding configuration for FrostFS requests (#521)
|
|
||||||
- Tracing attributes (#549)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Split `FrostFS` interface into separate read methods (#427)
|
|
||||||
- golangci-lint v1.60 support (#474)
|
|
||||||
- Updated Go version to 1.22 (#470)
|
|
||||||
- Container removal after failed bucket creation (#434)
|
|
||||||
- Explicit check for `.` symbol in bucket name (#506)
|
|
||||||
- Transaction waiter in contract clients (#522)
|
|
||||||
- Avoid maintenance mode storage node during object operations (#524)
|
|
||||||
- Content-Type does not include in Presigned URL of s3-authmate (#505)
|
|
||||||
- Check owner ID before deleting bucket (#528)
|
|
||||||
- S3-Authmate now uses APE instead basic-ACL (#553)
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
- Reduce using mutex when update app settings (#329)
|
|
||||||
|
|
||||||
## [0.30.9] - 2024-12-13
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Unable to remove EC object (#576)
|
|
||||||
|
|
||||||
## [0.30.8] - 2024-10-18
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Error handling for correct connection switch in SDK Pool (#517)
|
|
||||||
|
|
||||||
## [0.30.7] - 2024-10-03
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Correct aws-chunk encoding size handling (#511)
|
|
||||||
|
|
||||||
|
|
||||||
## [0.30.6] - 2024-09-17
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Object size of objects upload with aws-chunked encoding (#450)
|
|
||||||
- Object size of objects upload with negative Content-Length (#486)
|
|
||||||
|
|
||||||
## [0.30.5] - 2024-09-16
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Panic catchers for fuzzing tests (#492)
|
|
||||||
|
|
||||||
## [0.30.4] - 2024-09-03
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Fuzzing tests (#480)
|
|
||||||
|
|
||||||
## [0.30.3] - 2024-08-27
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Empty listing when multipart upload contains more than 1000 parts (#471)
|
|
||||||
|
|
||||||
## [0.30.2] - 2024-08-20
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Error counting in pool component before connection switch (#468)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Log of endpoint address during tree pool errors (#468)
|
|
||||||
|
|
||||||
## [0.30.1] - 2024-07-25
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Redundant system node removal in tree service (#437)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Log details on SDK Pool health status change (#439)
|
|
||||||
|
|
||||||
## [0.30.0] - Kangshung -2024-07-19
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Fix HTTP/2 requests (#341)
|
|
||||||
- Fix Decoder.CharsetReader is nil (#379)
|
|
||||||
- Fix flaky ACL encode test (#340)
|
|
||||||
- Docs grammar (#432)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Add new `reconnect_interval` config param for server rebinding (#291)
|
|
||||||
- Support `GetBucketPolicyStatus` (#301)
|
|
||||||
- Support request IP filter with policy (#371, #377)
|
|
||||||
- Support tag checks in policies (#357, #365, #392, #403, #411)
|
|
||||||
- Support IAM-MFA checks (#367)
|
|
||||||
- More docs (#334, #353)
|
|
||||||
- Add `register-user` command to `authmate` (#414)
|
|
||||||
- `User` field in request log (#396)
|
|
||||||
- Erasure coding support in placement policy (#400)
|
|
||||||
- Improved test coverage (#402)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Update dependencies noted by govulncheck (#368)
|
|
||||||
- Improve test coverage (#380, #387)
|
|
||||||
- Support updated naming in native policy JSON (#385)
|
|
||||||
- Improve determining AccessBox latest version (#335)
|
|
||||||
- Don't set full_control policy for bucket owner (#407)
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
- Remove control api (#406)
|
|
||||||
- Remove notifications (#401)
|
|
||||||
- Remove `layer.Client` interface (#410)
|
|
||||||
- Remove extended ACL related code (#372)
|
|
||||||
|
|
||||||
## [0.29.3] - 2024-07-19
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Support tree split environment when multiple nodes
|
|
||||||
may be part of the same sub path (#430)
|
|
||||||
- Collision of multipart name and system data in the tree (#430)
|
|
||||||
- Workaround for removal of multiple null versions in unversioned bucket (#430)
|
|
||||||
|
|
||||||
## [0.29.2] - 2024-07-03
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Parsing of put-bucket-setting retry configuration (#398)
|
|
||||||
|
|
||||||
## [0.29.1] - 2024-06-20
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- OPTIONS request processing for object operations (#399)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Retries of put-bucket-setting operation during container creation (#398)
|
|
||||||
|
|
||||||
## [0.29.0] - Zemu - 2024-05-27
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Fix marshaling errors in `DeleteObjects` method (#222)
|
|
||||||
- Fix status code in GET/HEAD delete marker (#226)
|
|
||||||
- Fix `NextVersionIDMarker` in `list-object-versions` (#248)
|
|
||||||
- Fix possibility of panic during SIGHUP (#288)
|
|
||||||
- Fix flaky `TestErrorTimeoutChecking` (`make test` sometimes failed) (#290)
|
|
||||||
- Fix log-level change on SIGHUP (#313)
|
|
||||||
- Fix anonymous put request (#311)
|
|
||||||
- Fix routine leak from nns resolver (#324)
|
|
||||||
- Fix svace errors (#325, #328)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Add new `frostfs.buffer_max_size_for_put` config param and sync TZ hash for PUT operations (#197)
|
|
||||||
- Add `X-Amz-Version-Id` header after complete multipart upload (#227)
|
|
||||||
- Add handling of `X-Amz-Copy-Source-Server-Side-Encryption-Customer-*` headers during copy (#217)
|
|
||||||
- Add new `logger.destination` config param (#236)
|
|
||||||
- Add `X-Amz-Content-Sha256` header validation (#218)
|
|
||||||
- Support frostfsid contract. See `frostfsid` config section (#260)
|
|
||||||
- Support per namespace placement policies configuration (see `namespaces.config` config param) (#266)
|
|
||||||
- Support control api to manage policies. See `control` config section (#258)
|
|
||||||
- Add `namespace` label to billing metrics (#271)
|
|
||||||
- Support policy-engine (#257, #259, #282, #283, #302, #307, #345, #351, #358, #360, #362, #383, #354)
|
|
||||||
- Support `proxy` contract (#287)
|
|
||||||
- Authmate: support custom attributes (#292)
|
|
||||||
- Add FrostfsID cache (#269)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Generalise config param `use_default_xmlns_for_complete_multipart` to `use_default_xmlns` so that use default xmlns for all requests (#221)
|
|
||||||
- Set server IdleTimeout and ReadHeaderTimeout to `30s` and allow to configure them (#220)
|
|
||||||
- Return `ETag` value in quotes (#219)
|
|
||||||
- Use tombstone when delete multipart upload (#275)
|
|
||||||
- Support new parameter `cache.accessbox.removing_check_interval` (#305)
|
|
||||||
- Use APE rules instead of eACL in container creation (#306)
|
|
||||||
- Rework bucket policy with policy-engine (#261)
|
|
||||||
- Improved object listing speed (#165, #347)
|
|
||||||
- Logging improvement (#300, #318)
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
- Drop sending whitespace characters during complete multipart upload and related config param `kludge.complete_multipart_keepalive` (#227)
|
|
||||||
- Unused legacy minio related code (#299)
|
|
||||||
- Redundant output with journald logging (#298)
|
|
||||||
|
|
||||||
## [0.28.2] - 2024-05-27
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- `anon` user in billing metrics (#321)
|
|
||||||
- Parts are not removed when multipart object removed (#370)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Put request in duration metrics (#280)
|
|
||||||
|
|
||||||
## [0.28.1] - 2024-01-24
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- MD5 hash as ETag and response header (#205)
|
|
||||||
- Tree pool traversal limit (#262)
|
|
||||||
|
|
||||||
### Updating from 0.28.0
|
|
||||||
|
|
||||||
See new `features.md5.enabled` and `frostfs.tree_pool_max_attempts` config
|
|
||||||
parameters.
|
|
||||||
|
|
||||||
## [0.28.0] - Academy of Sciences - 2023-12-07
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Handle negative `Content-Length` on put (#125)
|
|
||||||
- Use `DisableURIPathEscaping` to presign urls (#125)
|
|
||||||
- Use specific s3 errors instead of `InternalError` where possible (#143)
|
|
||||||
- `grpc` schemas in tree configuration (#166)
|
|
||||||
- Return appropriate 404 code when object missed in storage but there is in gate cache (#158)
|
|
||||||
- Replace part on re-upload when use multipart upload (#176)
|
|
||||||
- Fix goroutine leak on put object error (#178)
|
|
||||||
- Fix parsing signed headers in presigned urls (#182)
|
|
||||||
- Fix url escaping (#188)
|
|
||||||
- Use correct keys in `list-multipart-uploads` response (#185)
|
|
||||||
- Fix parsing `key-marker` for object list versions (#237)
|
|
||||||
- `GetSubTree` failures (#179)
|
|
||||||
- Unexpected EOF during multipart download (#210)
|
|
||||||
- Produce clean version in debian build (#245)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Add `trace_id` value into log record when tracing is enabled (#142)
|
|
||||||
- Add basic error types and exit codes to `frostfs-s3-authmate` (#152)
|
|
||||||
- Add a metric with addresses of nodes of the same and highest priority that are currently healthy (#186)
|
|
||||||
- Support dump metrics descriptions (#80)
|
|
||||||
- Add `copies_numbers` section to `placement_policy` in config file and support vectors of copies numbers (#70, #101)
|
|
||||||
- Support impersonate bearer token (#81, #105)
|
|
||||||
- Reload default and custom copies numbers on SIGHUP (#104)
|
|
||||||
- Tracing support (#84, #140)
|
|
||||||
- Return bearer token in `s3-authmate obtain-secret` result (#132)
|
|
||||||
- Support multiple version credentials using GSet (#135)
|
|
||||||
- Implement chunk uploading (#106)
|
|
||||||
- Add new `kludge.bypass_content_encoding_check_in_chunks` config param (#146)
|
|
||||||
- Add new `frostfs.client_cut` config param (#192)
|
|
||||||
- Add selection of the node of the latest version of the object (#231)
|
|
||||||
- Soft memory limit with `runtime.soft_memory_limit` (#196)
|
|
||||||
- `server_health` metric for every S3 endpoint status (#199)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Update prometheus to v1.15.0 (#94)
|
|
||||||
- Update go version to go1.19 (#118)
|
|
||||||
- Remove object from tree and reset its cache on object deletion when it is already removed from storage (#78)
|
|
||||||
- Finish rebranding (#2)
|
|
||||||
- Timeout errors has code 504 now (#103)
|
|
||||||
- Use request scope logger (#111)
|
|
||||||
- Add `s3-authmate update-secret` command (#131)
|
|
||||||
- Use default registerer for app metrics (#155)
|
|
||||||
- Use chi router instead of archived gorlilla/mux (#149, #174, #188)
|
|
||||||
- Complete multipart upload doesn't unnecessary copy now. Thus, the total time of multipart upload was reduced by 2 times (#63)
|
|
||||||
- Use gate key to form object owner (#175)
|
|
||||||
- Apply placement policies and copies if there is at least one valid value (#168)
|
|
||||||
- `statistic_tx_bytes_total` and `statistic_rx_bytes_total` metric to `statistic_bytes_total` metric with `direction` label (#153)
|
|
||||||
- Refactor of context-stored data receivers (#137)
|
|
||||||
- Refactor fetch/parse config parameters functions (#117)
|
|
||||||
- Move all log messages to constants (#96)
|
|
||||||
- Allow zero value of `part-number-marker` (#207)
|
|
||||||
- Clean tag node in the tree service instead of removal (#233)
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
- Drop `tree.service` param (now endpoints from `peers` section are used) (#133)
|
|
||||||
|
|
||||||
## [0.27.0] - Karpinsky - 2023-07-12
|
|
||||||
|
|
||||||
This is a first FrostFS S3 Gateway release named after
|
|
||||||
[Karpinsky glacier](https://en.wikipedia.org/wiki/Karpinsky_Glacier).
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Using multiple servers require only one healthy (#12)
|
|
||||||
- Renew token before it expires (#20)
|
|
||||||
- Add generated deb builder files to .gitignore, and fix typo (#28)
|
|
||||||
- Get empty bucket CORS from frostfs (#36)
|
|
||||||
- Don't count pool error on client abort (#35)
|
- Don't count pool error on client abort (#35)
|
||||||
- Handle request cancelling (#69)
|
|
||||||
- Clean up List and Name caches when object is missing in Tree service (#57)
|
|
||||||
- Don't create unnecessary delete-markers (#83)
|
|
||||||
- `Too many pings` error (#145)
|
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
- Billing metrics (#5, #26, #29)
|
- Return container name in `head-bucket` response (TrueCloudLab#18)
|
||||||
- Return container name in `head-bucket` response (#18)
|
- Billing metrics (TrueCloudLab#5)
|
||||||
- Multiple configs support (#21)
|
- Multiple configs support (TrueCloudLab#21)
|
||||||
- Bucket name resolving policy (#25)
|
- Bucket name resolving policy (TrueCloudLab#25)
|
||||||
- Support string `Action` and `Resource` fields in `bucketPolicy.Statement` (#32)
|
- Support string `Action` and `Resource` fields in `bucketPolicy.Statement` (TrueCloudLab#32)
|
||||||
- Add new `kludge.use_default_xmlns_for_complete_multipart` config param (#40)
|
- Add new `kludge.use_default_xmlns_for_complete_multipart` config param (TrueCloudLab#40)
|
||||||
- Return `X-Owner-Id` in `head-bucket` response (#79)
|
|
||||||
- Support multiple tree service endpoints (#74, #110, #114)
|
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
- Repository rebranding (#1)
|
|
||||||
- Update neo-go to v0.101.0 (#14)
|
- Update neo-go to v0.101.0 (#14)
|
||||||
- Update viper to v1.15.0 (#14)
|
- Update viper to v1.15.0 (#14)
|
||||||
- Update go version to go1.18 (#16)
|
- Using multiple servers require only one healthy (TrueCloudLab#12)
|
||||||
- Return error on invalid LocationConstraint (#23)
|
- Update go version to go1.18 (TrueCloudLab#16)
|
||||||
- Limit number of objects to delete at one time (#37)
|
- Return error on invalid LocationConstraint (TrueCloudLab#23)
|
||||||
|
- Place billing metrics to separate url path (TrueCloudLab#26)
|
||||||
|
- Add generated deb builder files to .gitignore, and fix typo (TrueCloudLab#28)
|
||||||
|
- Limit number of objects to delete at one time (TrueCloudLab#37)
|
||||||
- CompleteMultipartUpload handler now sends whitespace characters to keep alive client's connection (#60)
|
- CompleteMultipartUpload handler now sends whitespace characters to keep alive client's connection (#60)
|
||||||
- Support new system attributes (#64)
|
- Support new system attributes (#64)
|
||||||
- Abstract network communication in TreeClient (#59, #75)
|
|
||||||
- Changed values for `frostfs_s3_gw_state_health` metric (#91)
|
## [0.26.0] - 2022-12-28
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Use client time as `now` in some requests (#726)
|
||||||
|
- Reload policies on SIGHUP (#747)
|
||||||
|
- Authmate flags for pool timeouts (#760)
|
||||||
|
- Multiple server listeners (#742)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Placement policy configuration (#568)
|
||||||
|
- Improved debug logging of CID and OID values (#754)
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
- Deprecated linters (#755)
|
||||||
|
|
||||||
|
### Updating from v0.25.1
|
||||||
|
New config parameters were added. And old one `defaul_policy` were changed.
|
||||||
|
```yaml
|
||||||
|
placement_policy:
|
||||||
|
default: "REP 3"
|
||||||
|
region_mapping: /path/to/container/policies.json
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure you update the config accordingly:
|
||||||
|
If you configure application using environment variables change:
|
||||||
|
* `S3_GW_DEFAULT_POLICY` -> `S3_GW_PLACEMENT_POLICY_DEFAULT_POLICY`
|
||||||
|
* `S3_GW_LISTEN_ADDRESS` -> `S3_GW_SERVER_0_ADDRESS`
|
||||||
|
* `S3_GW_TLS_CERT_FILE` -> `S3_GW_SERVER_0_TLS_CERT_FILE` (and set `S3_GW_SERVER_0_TLS_ENABLED=true`)
|
||||||
|
* `S3_GW_TLS_KEY_FILE` -> `S3_GW_SERVER_0_TLS_KEY_FILE` (and set `S3_GW_SERVER_0_TLS_ENABLED=true`)
|
||||||
|
|
||||||
|
If you configure application using `.yaml` file change:
|
||||||
|
* `defaul_policy` -> `placement_policy.default`
|
||||||
|
* `listen_address` -> `server.0.address`
|
||||||
|
* `tls.cert_file` -> `server.0.tls.cert_file` (and set `server.0.tls.enabled: true`)
|
||||||
|
* `tls.key_file` -> `server.0.tls.key_file` (and set `server.0.tls.enabled: true`)
|
||||||
|
|
||||||
|
## [0.25.1] - 2022-10-30
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Empty bucket policy (#740)
|
||||||
|
- Big object removal (#749)
|
||||||
|
- Checksum panic (#741)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Debian packaging (#737)
|
||||||
|
- Timeout for individual operations in streaming RPC (#750)
|
||||||
|
|
||||||
|
## [0.25.0] - 2022-10-31
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Legal hold object lock enabling (#709)
|
||||||
|
- Errors at object locking (#719)
|
||||||
|
- Unrestricted access to not owned objects via cache (#713)
|
||||||
|
- Check tree service health (#699)
|
||||||
|
- Bucket names in listing (#733)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Config reloading on SIGHUP (#702, #715, #716)
|
||||||
|
- Stop pool dial on SIGINT (#712)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- GitHub actions update (#710)
|
||||||
|
- Makefile help (#725)
|
||||||
|
- Optimized object tags setting (#669)
|
||||||
|
- Improved logging (#728)
|
||||||
|
- Unified unit test names (#617)
|
||||||
|
- Improved docs (#732)
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
- Unused cache methods (#650)
|
||||||
|
|
||||||
|
### Updating from v0.24.0
|
||||||
|
New config parameters were added. Make sure the default parameters are appropriate for you.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
cache:
|
||||||
|
accesscontrol:
|
||||||
|
lifetime: 1m
|
||||||
|
size: 100000
|
||||||
|
```
|
||||||
|
|
||||||
|
## [0.24.0] - 2022-09-14
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Exposure of pool metrics (#615, #680)
|
||||||
|
- Configuration of `set_copies_number` (#634, #637)
|
||||||
|
- Configuration of list of allowed `AccessKeyID` prefixes (#674)
|
||||||
|
- Tagging directive for `CopyObject` (#666, #683)
|
||||||
|
- Customer encryption (#595)
|
||||||
|
- `CopiesNumber` configuration (#634, #637)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Improved wallet configuration via `.yaml` config and environment variables (#607)
|
||||||
|
- Update go version for build to 1.19 (#694, #705)
|
||||||
|
- Update version calculation (#653, #697)
|
||||||
|
- Optimized lock creation (#692)
|
||||||
|
- Update way to configure `listen_domains` (#667)
|
||||||
|
- Use `FilePath` instead of `FileName` for object keys (#657)
|
||||||
|
- Optimize listing (#625, #616)
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
- Drop any object search logic (#545)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Responses to `GetObject` and `HeadObject`: removed redundant `VersionID` (#577, #682)
|
||||||
|
- Replacement of object tagging in case of overwriting of an object (#645)
|
||||||
|
- Using tags cache with empty `versionId` (#643)
|
||||||
|
- Fix panic on go1.19 (#678)
|
||||||
|
- Fix panic on invalid versioning status (#660)
|
||||||
|
- Fix panic on missing decrypt reader (#704)
|
||||||
|
- Using multipart uploads with `/` in name (#671)
|
||||||
|
- Don't update settings cache when request fails (#661)
|
||||||
|
- Fix handling `X-Amz-Copy-Source` header (#672)
|
||||||
|
- ACL related problems (#676, #606)
|
||||||
|
- Using `ContinuationToken` for "directories" (#684)
|
||||||
|
- Fix `connection was closed` error (#656)
|
||||||
|
- Fix listing for nested objects (#624)
|
||||||
|
- Fix anon requests to tree service (#504, #505)
|
||||||
|
|
||||||
|
### Updating from v0.23.0
|
||||||
|
Make sure your configuration is valid:
|
||||||
|
|
||||||
|
If you configure application using environment variables change:
|
||||||
|
* `S3_GW_WALLET` -> `S3_GW_WALLET_PATH`
|
||||||
|
* `S3_GW_ADDRESS` -> `S3_GW_WALLET_ADDRESS`
|
||||||
|
* `S3_GW_LISTEN_DOMAINS_N` -> `S3_GW_LISTEN_DOMAINS` (use it as array variable)
|
||||||
|
|
||||||
|
If you configure application using `.yaml` file change:
|
||||||
|
* `wallet` -> `wallet.path`
|
||||||
|
* `address` -> `wallet.address`
|
||||||
|
* `listen_domains.n` -> `listen_domains` (use it as array param)
|
||||||
|
|
||||||
|
|
||||||
|
## [0.23.0] - 2022-08-01
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- System metadata are filtered now (#619)
|
||||||
|
- List objects in corner cases (#612, #627)
|
||||||
|
- Correct removal of a deleted object (#610)
|
||||||
|
- Bucket creation could lead to "no healthy client" error (#636)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- New param to configure pool error threshold (#633)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Pprof and prometheus metrics configuration (#591)
|
||||||
|
- Don't set sticky bit in authmate container (#540)
|
||||||
|
- Updated compatibility table (#638)
|
||||||
|
- Rely on string sanitizing from zap (#498)
|
||||||
|
|
||||||
|
### Updating from v0.22.0
|
||||||
|
1. To enable pprof use `pprof.enabled` instead of `pprof` in config.
|
||||||
|
To enable prometheus metrics use `prometheus.enabled` instead of `metrics` in config.
|
||||||
|
If you are using the command line flags you can skip this step.
|
||||||
|
|
||||||
|
## [0.22.0] - 2022-07-25
|
||||||
|
|
||||||
|
Tree service support
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Error logging (#450)
|
||||||
|
- Default bucket location constraint (#463)
|
||||||
|
- Suspended versioning status (#462)
|
||||||
|
- CodeQL warnings (#489, #522, #539)
|
||||||
|
- Bearer token behaviour with non-owned buckets (#459)
|
||||||
|
- ACL issues (#495, #553, #571, #573, #574, #580)
|
||||||
|
- Authmate policy parsing (#558)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Public key output in authmate issue-secret command (#482)
|
||||||
|
- Support of conditional headers (#484)
|
||||||
|
- Cache type cast error logging (#465)
|
||||||
|
- `docker/*` target in Makefile (#471)
|
||||||
|
- Pre signed requests (#529)
|
||||||
|
- Tagging and ACL notifications (#361)
|
||||||
|
- AWSv4 signer package to improve compatibility with S3 clients (#528)
|
||||||
|
- Extension mimetype detector (#289)
|
||||||
|
- Default params documentation (#592)
|
||||||
|
- Health metric (#600)
|
||||||
|
- Parallel object listing (#525)
|
||||||
|
- Tree service (see commit links from #609)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Reduce number of network requests (#439, #441)
|
||||||
|
- Renamed authmate to s3-authmate (#518)
|
||||||
|
- Version output (#578)
|
||||||
|
- Improved error messages (#539)
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
- `layer/neofs` package (#438)
|
||||||
|
|
||||||
|
## [0.21.1] - 2022-05-16
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Update go version to go1.17 (#427)
|
||||||
|
- Set homomorphic hashing disable attribute in container if required (#435)
|
||||||
|
|
||||||
|
## [0.21.0] - 2022-05-13
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Support of get-object-attributes (#430)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Reduced time of bucket creation (#426)
|
||||||
|
- Bucket removal (#428)
|
||||||
|
- Obtainment of ETag value (#431)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Authmate doesn't parse session context anymore, now it accepts application defined
|
||||||
|
flexible structure with container ID in human-readable format (#428)
|
||||||
|
|
||||||
|
## [0.20.0] - 2022-04-29
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Support of object locking (#195)
|
||||||
|
- Support of basic notifications (#357, #358, #359)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Logger behavior: now it writes to stderr instead of stdout, app name and
|
||||||
|
version are always presented and fixed, all user options except of `level` are
|
||||||
|
dropped (#380)
|
||||||
|
- Improved docs, added config examples (#396, #398)
|
||||||
|
- Updated NeoFS SDK (#365, #409)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Added check of `SetEACL` tokens before processing of requests (#347)
|
||||||
|
- Authmate: returned lost session tokens when a parameter `--session-token` is
|
||||||
|
omitted (#387)
|
||||||
|
- Error when a bucket hasn't a settings file (#389)
|
||||||
|
- Response to a request to delete not existing object (#392)
|
||||||
|
- Replaced gate key in ACL Grantee by key of bearer token issuer (#395)
|
||||||
|
- Missing attach of bearer token to requests to put system object (#399)
|
||||||
|
- Deletion of system object while CompleteMultipartUpload (#400)
|
||||||
|
- Improved English in docs and comments (#405)
|
||||||
|
- Authmate: reconsidered default bearer token rules (#406)
|
||||||
|
|
||||||
|
## [0.19.0] - 2022-03-16
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Authmate: support placement policy overriding (#343, #364)
|
||||||
|
- Managing bucket notification configuration (#340)
|
||||||
|
- Unit tests in go1.17 (#265)
|
||||||
|
- NATS settings in application config (#341)
|
||||||
|
- Support `Expires` and `Cache-Control` headers (#312)
|
||||||
|
- Support `%` as delimiter (#313)
|
||||||
|
- Support `null` version deletion (#319)
|
||||||
|
- Bucket name resolving order (#285)
|
||||||
|
- Authmate: added `timeout` flag (#290)
|
||||||
|
- MinIO results in s3 compatibility tables (#304)
|
||||||
|
- Support overriding response headers (#310)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Authmate: check parameters before container creation (#372)
|
||||||
|
- Unify cache invalidation on deletion (#368)
|
||||||
|
- Updated NeoFS SDK to v1.0.0-rc.3 (#297, #333, #346, #376)
|
||||||
|
- Authmate: changed session token rules handling (#329, #336, #338, #352)
|
||||||
|
- Changed status code for some failed requests (#308)
|
||||||
|
- GetBucketLocation returns policy name used at bucket creation (#301)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Waiting for bucket to be deleted (#366)
|
||||||
|
- Authmate: changed error message for session context building (#348)
|
||||||
|
- Authmate: fixed access key parsing in `obtain-secret` command (#295)
|
||||||
|
- Distinguishing `BucketAlreadyExists` errors (#354)
|
||||||
|
- Incorrect panic if handler not found (#305)
|
||||||
|
- Authmate: use container friendly name as system name (#299, #324)
|
||||||
|
- Use UTC `Last-Modified` timestamps (#331)
|
||||||
|
- Don't return object system metadata (#307)
|
||||||
|
- Handling empty post policy (#306)
|
||||||
|
- Use `X-Amz-Verion-Id` in `CompleteMulipartUpload` (#318)
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
- Drop MinIO related errors (#316)
|
||||||
|
|
||||||
|
## [0.18.0] - 2021-12-16
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Support for MultipartUpload (#186, #187)
|
||||||
|
- CORS support (#217)
|
||||||
|
- Authmate supports setting of tokens lifetime in a more convenient format (duration) (#258)
|
||||||
|
- Generation of a random key for `--no-sign-request` (#276)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Bucket name resolving mechanism from listing owner's containers to using DNS (#219)
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
- Deprecated golint, replaced by revive (#272)
|
||||||
|
|
||||||
|
## 0.17.0 (24 Sep 2021)
|
||||||
|
With this release we introduce [ceph-based](https://github.com/ceph/s3-tests) S3 compatibility results.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
* Versioning support (#122, #242, #263)
|
||||||
|
* Ceph S3 compatibility results (#150, #249, #266)
|
||||||
|
* Handling `X-Amz-Expected-Bucket-Owner` header (#216)
|
||||||
|
* `X-Container-Id` header for `HeadBucket` response (#220)
|
||||||
|
* Basic ACL support (#49, #213)
|
||||||
|
* Caching (#179, #206, #231, #236, #253)
|
||||||
|
* Metadata directive when copying (#191)
|
||||||
|
* Bucket name checking (189)
|
||||||
|
* Continuation token support (#112, #154, #180)
|
||||||
|
* Mapping `LocationConstraint` to `PlacementPolicy` (#89)
|
||||||
|
* Tagging support (#196)
|
||||||
|
* POST uploading support (#190)
|
||||||
|
* Delete marker support (#248)
|
||||||
|
* Expiration for access box (#255)
|
||||||
|
* AWS CLI credential generating by authmate (#241)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
* Default placement policy is now configurable (#218)
|
||||||
|
* README is split into different files (#210)
|
||||||
|
* Unified error handling (#89, #149, #184)
|
||||||
|
* Authmate issue-secret response contains container id (#163)
|
||||||
|
* Removed "github.com/nspcc-dev/neofs-node" dependency (#234)
|
||||||
|
* Removed GitHub workflow of image publishing (#243)
|
||||||
|
* Changed license to AGPLv3 (#264)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
* ListObjects results are now the same for different users (#230)
|
||||||
|
* Error response for invalid authentication header is now correct (#199)
|
||||||
|
* Saving object metadata (#198)
|
||||||
|
* Range header handling (#194)
|
||||||
|
* Correct status codes (#118, #262)
|
||||||
|
* HeadObject for "directories" (#160)
|
||||||
|
* Fetch-owner parameter support (#159)
|
||||||
|
|
||||||
|
## 0.16.0 (16 Jul 2021)
|
||||||
|
|
||||||
|
With this release we publish S3 gateway source code. It includes various S3
|
||||||
|
compatibility improvements, support of bucket management, unified secp256r1
|
||||||
|
cryptography with NEP-6 wallet support.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
* Allowed no-sign request (#65)
|
||||||
|
* Bearer token attached to all requests (#84)
|
||||||
|
* Time format in responses (#133)
|
||||||
|
* Max-keys checked in ListObjects (#135)
|
||||||
|
* Lost metadat in the objects (#131)
|
||||||
|
* Unique bucket name check (#125)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
* Bucket management operations (#47, #72)
|
||||||
|
* Node-specific owner IDs in bearer tokens (#83)
|
||||||
|
* AWS CLI usage section in README (#77)
|
||||||
|
* List object paging (#97)
|
||||||
|
* Lifetime for the tokens in auth-mate (#108)
|
||||||
|
* Support of range in GetObject request (#96)
|
||||||
|
* Support of NEP-6 wallets instead of binary encoded keys (#92)
|
||||||
|
* Support of JSON encoded rules in auth-mate (#71)
|
||||||
|
* Support of delimiters in ListObjects (#98)
|
||||||
|
* Support of object ETag (#93)
|
||||||
|
* Support of time-based conditional CopyObject and GetObject (#94)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
* Accesskey format: now `0` used as a delimiter between container ID and object
|
||||||
|
ID instead of `_` (#164)
|
||||||
|
* Accessbox is encoded in protobuf format (#48)
|
||||||
|
* Authentication uses secp256r1 instead of ed25519 (#75)
|
||||||
|
* Improved integration with NeoFS SDK and NeoFS API Go (#78, #88)
|
||||||
|
* Optimized object put execution (#155)
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
* GRPC keepalive options (#73)
|
||||||
|
|
||||||
|
## 0.15.0 (10 Jun 2021)
|
||||||
|
|
||||||
|
This release brings S3 gateway to the current state of NeoFS and fixes some
|
||||||
|
bugs, no new significant features introduced (other than moving here already
|
||||||
|
existing authmate component).
|
||||||
|
|
||||||
|
New features:
|
||||||
|
* authmate was moved into this repository and is now built along with the
|
||||||
|
gateway itself (#46)
|
||||||
|
|
||||||
|
Behavior changes:
|
||||||
|
* neofs-s3-gate was renamed to neofs-s3-gw (#50)
|
||||||
|
|
||||||
|
Improvements:
|
||||||
|
* better Makefile (#43, #45, #55)
|
||||||
|
* stricter linters (#45)
|
||||||
|
* removed non-standard errors package from dependencies (#54)
|
||||||
|
* refactoring, reusing new sdk-go component (#60, #62, #63)
|
||||||
|
* updated neofs-api-go for compatibility with current NeoFS node 0.21.0 (#60, #68)
|
||||||
|
* extended README (#67, #76)
|
||||||
|
|
||||||
|
Bugs fixed:
|
||||||
|
* wrong (as per AWS specification) access key ID generated (#64)
|
||||||
|
|
||||||
## Older versions
|
## Older versions
|
||||||
|
|
||||||
This project is a fork of [NeoFS S3 Gateway](https://github.com/nspcc-dev/neofs-s3-gw) from version v0.26.0.
|
Please refer to [Github
|
||||||
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-s3-gw/blob/master/CHANGELOG.md.
|
releases](https://github.com/nspcc-dev/neofs-s3-gw/releases/) for older
|
||||||
|
releases.
|
||||||
|
|
||||||
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/b2148cc3...v0.27.0
|
[0.18.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.17.0...v0.18.0
|
||||||
[0.28.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.27.0...v0.28.0
|
[0.19.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.18.0...v0.19.0
|
||||||
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.0...v0.28.1
|
[0.20.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.19.0...v0.20.0
|
||||||
[0.28.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.1...v0.28.2
|
[0.21.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.20.0...v0.21.0
|
||||||
[0.29.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.2...v0.29.0
|
[0.21.1]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.21.0...v0.21.1
|
||||||
[0.29.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.0...v0.29.1
|
[0.22.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.21.1...v0.22.0
|
||||||
[0.29.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.1...v0.29.2
|
[0.23.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.22.0...v0.23.0
|
||||||
[0.29.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.2...v0.29.3
|
[0.24.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.23.0...v0.24.0
|
||||||
[0.30.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.3...v0.30.0
|
[0.25.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.24.0...v0.25.0
|
||||||
[0.30.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.0...v0.30.1
|
[Unreleased]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.25.0...master
|
||||||
[0.30.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.1...v0.30.2
|
|
||||||
[0.30.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.2...v0.30.3
|
|
||||||
[0.30.4]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.3...v0.30.4
|
|
||||||
[0.30.5]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.4...v0.30.5
|
|
||||||
[0.30.6]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.5...v0.30.6
|
|
||||||
[0.30.7]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.6...v0.30.7
|
|
||||||
[0.30.8]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.7...v0.30.8
|
|
||||||
[0.30.9]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.8...v0.30.9
|
|
||||||
[0.31.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.9...v0.31.0
|
|
||||||
[0.31.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.0...v0.31.1
|
|
||||||
[0.31.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.1...v0.31.2
|
|
||||||
[0.31.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.2...v0.31.3
|
|
||||||
[0.32.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.3...v0.32.0
|
|
||||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.0...master
|
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
.* @TrueCloudLab/storage-services-developers @TrueCloudLab/storage-services-committers
|
|
||||||
.forgejo/.* @potyarkin
|
|
||||||
Makefile @potyarkin
|
|
|
@ -3,8 +3,8 @@
|
||||||
First, thank you for contributing! We love and encourage pull requests from
|
First, thank you for contributing! We love and encourage pull requests from
|
||||||
everyone. Please follow the guidelines:
|
everyone. Please follow the guidelines:
|
||||||
|
|
||||||
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/issues) and
|
- Check the open [issues](https://github.com/TrueCloudLab/frostfs-s3-gw/issues) and
|
||||||
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pulls) for existing
|
[pull requests](https://github.com/TrueCloudLab/frostfs-s3-gw/pulls) for existing
|
||||||
discussions.
|
discussions.
|
||||||
|
|
||||||
- Open an issue first, to discuss a new feature or enhancement.
|
- Open an issue first, to discuss a new feature or enhancement.
|
||||||
|
@ -27,20 +27,20 @@ Start by forking the `frostfs-s3-gw` repository, make changes in a branch and th
|
||||||
send a pull request. We encourage pull requests to discuss code changes. Here
|
send a pull request. We encourage pull requests to discuss code changes. Here
|
||||||
are the steps in details:
|
are the steps in details:
|
||||||
|
|
||||||
### Set up your git repository
|
### Set up your GitHub Repository
|
||||||
Fork [FrostFS S3 Gateway
|
Fork [FrostFS S3 Gateway
|
||||||
upstream](https://git.frostfs.info/repo/fork/15) source repository
|
upstream](https://github.com/TrueCloudLab/frostfs-s3-gw/fork) source repository
|
||||||
to your own personal repository. Copy the URL of your fork (you will need it for
|
to your own personal repository. Copy the URL of your fork (you will need it for
|
||||||
the `git clone` command below).
|
the `git clone` command below).
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ git clone https://git.frostfs.info/<username>/frostfs-s3-gw.git
|
$ git clone https://github.com/TrueCloudLab/frostfs-s3-gw
|
||||||
```
|
```
|
||||||
|
|
||||||
### Set up git remote as ``upstream``
|
### Set up git remote as ``upstream``
|
||||||
```sh
|
```sh
|
||||||
$ cd frostfs-s3-gw
|
$ cd frostfs-s3-gw
|
||||||
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw.git
|
$ git remote add upstream https://github.com/TrueCloudLab/frostfs-s3-gw
|
||||||
$ git fetch upstream
|
$ git fetch upstream
|
||||||
$ git merge upstream/master
|
$ git merge upstream/master
|
||||||
...
|
...
|
||||||
|
@ -90,8 +90,8 @@ $ git push origin feature/123-something_awesome
|
||||||
```
|
```
|
||||||
|
|
||||||
### Create a Pull Request
|
### Create a Pull Request
|
||||||
Pull requests can be created via Forgejo. Refer to [this
|
Pull requests can be created via GitHub. Refer to [this
|
||||||
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
|
document](https://help.github.com/articles/creating-a-pull-request/) for
|
||||||
detailed steps on how to create a pull request. After a Pull Request gets peer
|
detailed steps on how to create a pull request. After a Pull Request gets peer
|
||||||
reviewed and approved, it will be merged.
|
reviewed and approved, it will be merged.
|
||||||
|
|
||||||
|
|
88
Makefile
88
Makefile
|
@ -3,34 +3,19 @@
|
||||||
# Common variables
|
# Common variables
|
||||||
REPO ?= $(shell go list -m)
|
REPO ?= $(shell go list -m)
|
||||||
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||||
GO_VERSION ?= 1.22
|
GO_VERSION ?= 1.19
|
||||||
LINT_VERSION ?= 1.60.1
|
LINT_VERSION ?= 1.49.0
|
||||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
|
|
||||||
BINDIR = bin
|
BINDIR = bin
|
||||||
|
|
||||||
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
|
||||||
|
|
||||||
# Binaries to build
|
# Binaries to build
|
||||||
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
|
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
|
||||||
BINS = $(addprefix $(BINDIR)/, $(CMDS))
|
BINS = $(addprefix $(BINDIR)/, $(CMDS))
|
||||||
|
|
||||||
GOFLAGS ?=
|
|
||||||
|
|
||||||
# Variables for docker
|
# Variables for docker
|
||||||
REPO_BASENAME = $(shell basename `go list -m`)
|
REPO_BASENAME = $(shell basename `go list -m`)
|
||||||
HUB_IMAGE ?= "git.frostfs.info/truecloudlab/$(REPO_BASENAME)"
|
HUB_IMAGE ?= "truecloudlab/$(REPO_BASENAME)"
|
||||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||||
|
|
||||||
OUTPUT_LINT_DIR ?= $(shell pwd)/bin
|
|
||||||
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
|
|
||||||
TMP_DIR := .cache
|
|
||||||
|
|
||||||
# Variables for fuzzing
|
|
||||||
FUZZ_NGFUZZ_DIR ?= ""
|
|
||||||
FUZZ_TIMEOUT ?= 30
|
|
||||||
FUZZ_FUNCTIONS ?= "all"
|
|
||||||
FUZZ_AUX ?= ""
|
|
||||||
|
|
||||||
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean protoc
|
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean protoc
|
||||||
|
|
||||||
# .deb package versioning
|
# .deb package versioning
|
||||||
|
@ -43,10 +28,9 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
|
||||||
# Make all binaries
|
# Make all binaries
|
||||||
all: $(BINS)
|
all: $(BINS)
|
||||||
|
|
||||||
$(BINS): $(BINDIR) dep
|
$(BINS): sync-tree $(BINDIR) dep
|
||||||
@echo "⇒ Build $@"
|
@echo "⇒ Build $@"
|
||||||
CGO_ENABLED=0 \
|
CGO_ENABLED=0 \
|
||||||
GOFLAGS=$(GOFLAGS) \
|
|
||||||
go build -v -trimpath \
|
go build -v -trimpath \
|
||||||
-ldflags "-X $(REPO)/internal/version.Version=$(VERSION)" \
|
-ldflags "-X $(REPO)/internal/version.Version=$(VERSION)" \
|
||||||
-o $@ ./cmd/$(subst frostfs-,,$(notdir $@))
|
-o $@ ./cmd/$(subst frostfs-,,$(notdir $@))
|
||||||
|
@ -55,6 +39,10 @@ $(BINDIR):
|
||||||
@echo "⇒ Ensure dir: $@"
|
@echo "⇒ Ensure dir: $@"
|
||||||
@mkdir -p $@
|
@mkdir -p $@
|
||||||
|
|
||||||
|
# Synchronize tree service
|
||||||
|
sync-tree:
|
||||||
|
@./syncTree.sh
|
||||||
|
|
||||||
# Pull go dependencies
|
# Pull go dependencies
|
||||||
dep:
|
dep:
|
||||||
@printf "⇒ Download requirements: "
|
@printf "⇒ Download requirements: "
|
||||||
|
@ -73,7 +61,7 @@ docker/%:
|
||||||
-w /src \
|
-w /src \
|
||||||
-u `stat -c "%u:%g" .` \
|
-u `stat -c "%u:%g" .` \
|
||||||
--env HOME=/src \
|
--env HOME=/src \
|
||||||
golang:$(GO_VERSION) make GOFLAGS=$(GOFLAGS) $*,\
|
golang:$(GO_VERSION) make $*,\
|
||||||
@echo "supported docker targets: all $(BINS) lint")
|
@echo "supported docker targets: all $(BINS) lint")
|
||||||
|
|
||||||
# Run tests
|
# Run tests
|
||||||
|
@ -85,34 +73,6 @@ cover:
|
||||||
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
|
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
|
||||||
@go tool cover -html=coverage.txt -o coverage.html
|
@go tool cover -html=coverage.txt -o coverage.html
|
||||||
|
|
||||||
# Run fuzzing
|
|
||||||
CLANG := $(shell which clang-17 2>/dev/null)
|
|
||||||
.PHONY: check-clang all
|
|
||||||
check-clang:
|
|
||||||
ifeq ($(CLANG),)
|
|
||||||
@echo "clang-17 is not installed. Please install it before proceeding - https://apt.llvm.org/llvm.sh "
|
|
||||||
@exit 1
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: check-ngfuzz all
|
|
||||||
check-ngfuzz:
|
|
||||||
@if [ -z "$(FUZZ_NGFUZZ_DIR)" ]; then \
|
|
||||||
echo "Please set a variable FUZZ_NGFUZZ_DIR to specify path to the ngfuzz"; \
|
|
||||||
exit 1; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
.PHONY: install-fuzzing-deps
|
|
||||||
install-fuzzing-deps: check-clang check-ngfuzz
|
|
||||||
|
|
||||||
.PHONY: fuzz
|
|
||||||
fuzz: install-fuzzing-deps
|
|
||||||
@START_PATH=$$(pwd); \
|
|
||||||
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
|
|
||||||
cd $(FUZZ_NGFUZZ_DIR) && \
|
|
||||||
./ngfuzz -clean && \
|
|
||||||
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
|
|
||||||
./ngfuzz -report
|
|
||||||
|
|
||||||
# Reformat code
|
# Reformat code
|
||||||
format:
|
format:
|
||||||
@echo "⇒ Processing gofmt check"
|
@echo "⇒ Processing gofmt check"
|
||||||
|
@ -124,7 +84,6 @@ image:
|
||||||
@docker build \
|
@docker build \
|
||||||
--build-arg REPO=$(REPO) \
|
--build-arg REPO=$(REPO) \
|
||||||
--build-arg VERSION=$(VERSION) \
|
--build-arg VERSION=$(VERSION) \
|
||||||
--build-arg GOFLAGS=$(GOFLAGS) \
|
|
||||||
--rm \
|
--rm \
|
||||||
-f .docker/Dockerfile \
|
-f .docker/Dockerfile \
|
||||||
-t $(HUB_IMAGE):$(HUB_TAG) .
|
-t $(HUB_IMAGE):$(HUB_TAG) .
|
||||||
|
@ -144,23 +103,9 @@ dirty-image:
|
||||||
-f .docker/Dockerfile.dirty \
|
-f .docker/Dockerfile.dirty \
|
||||||
-t $(HUB_IMAGE)-dirty:$(HUB_TAG) .
|
-t $(HUB_IMAGE)-dirty:$(HUB_TAG) .
|
||||||
|
|
||||||
# Install linters
|
|
||||||
lint-install:
|
|
||||||
@mkdir -p $(TMP_DIR)
|
|
||||||
@rm -rf $(TMP_DIR)/linters
|
|
||||||
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
|
|
||||||
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
|
|
||||||
@rm -rf $(TMP_DIR)/linters
|
|
||||||
@rmdir $(TMP_DIR) 2>/dev/null || true
|
|
||||||
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
|
|
||||||
|
|
||||||
# Run linters
|
# Run linters
|
||||||
lint:
|
lint:
|
||||||
@if [ ! -d "$(LINT_DIR)" ]; then \
|
@golangci-lint --timeout=5m run
|
||||||
echo "Run make lint-install"; \
|
|
||||||
exit 1; \
|
|
||||||
fi
|
|
||||||
$(LINT_DIR)/golangci-lint --timeout=5m run
|
|
||||||
|
|
||||||
# Run linters in Docker
|
# Run linters in Docker
|
||||||
docker/lint:
|
docker/lint:
|
||||||
|
@ -189,16 +134,10 @@ clean:
|
||||||
|
|
||||||
# Generate code from .proto files
|
# Generate code from .proto files
|
||||||
protoc:
|
protoc:
|
||||||
# Install specific version for protobuf lib
|
|
||||||
@GOBIN=$(abspath $(BINDIR)) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen
|
|
||||||
@for f in `find . -type f -name '*.proto' -not -path './vendor/*'`; do \
|
@for f in `find . -type f -name '*.proto' -not -path './vendor/*'`; do \
|
||||||
echo "⇒ Processing $$f "; \
|
echo "⇒ Processing $$f "; \
|
||||||
protoc \
|
protoc \
|
||||||
--go_out=paths=source_relative:. \
|
--go_out=paths=source_relative:. $$f; \
|
||||||
--plugin=protoc-gen-go-frostfs=$(BINDIR)/protogen \
|
|
||||||
--go-frostfs_out=. --go-frostfs_opt=paths=source_relative \
|
|
||||||
--go-grpc_opt=require_unimplemented_servers=false \
|
|
||||||
--go-grpc_out=. --go-grpc_opt=paths=source_relative $$f; \
|
|
||||||
done
|
done
|
||||||
rm -rf vendor
|
rm -rf vendor
|
||||||
|
|
||||||
|
@ -214,9 +153,4 @@ debpackage:
|
||||||
debclean:
|
debclean:
|
||||||
dh clean
|
dh clean
|
||||||
|
|
||||||
# Dump metrics (use METRICS_DUMP_OUT variable to override default out file './metrics-dump.json')
|
|
||||||
.PHONY: dump-metrics
|
|
||||||
dump-metrics:
|
|
||||||
@go test ./metrics -run TestDescribeAll --tags=dump_metrics --out=$(abspath $(METRICS_DUMP_OUT))
|
|
||||||
|
|
||||||
include help.mk
|
include help.mk
|
||||||
|
|
22
README.md
22
README.md
|
@ -1,5 +1,5 @@
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo">
|
<img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
|
||||||
</p>
|
</p>
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
|
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
|
||||||
|
@ -7,8 +7,6 @@
|
||||||
|
|
||||||
---
|
---
|
||||||
[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-s3-gw)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-s3-gw)
|
[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-s3-gw)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-s3-gw)
|
||||||
![Release](https://img.shields.io/badge/dynamic/json.svg?label=release&url=https://git.frostfs.info/api/v1/repos/TrueCloudLab/frostfs-s3-gw/releases&query=$[0].tag_name&color=orange)
|
|
||||||
![License](https://img.shields.io/badge/license-GPL--3.0-orange.svg)
|
|
||||||
|
|
||||||
# FrostFS S3 Gateway
|
# FrostFS S3 Gateway
|
||||||
|
|
||||||
|
@ -93,24 +91,6 @@ HTTP/1.1 200 OK
|
||||||
|
|
||||||
Also, you can configure domains using `.env` variables or `yaml` file.
|
Also, you can configure domains using `.env` variables or `yaml` file.
|
||||||
|
|
||||||
## Fuzzing
|
|
||||||
To run fuzzing tests use the following command:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ make fuzz
|
|
||||||
```
|
|
||||||
|
|
||||||
This command will install dependencies for the fuzzing process and run existing fuzzing tests.
|
|
||||||
|
|
||||||
You can also use the following arguments:
|
|
||||||
|
|
||||||
```
|
|
||||||
FUZZ_TIMEOUT - time to run each fuzzing test (default 30)
|
|
||||||
FUZZ_FUNCTIONS - fuzzing tests that will be started (default "all")
|
|
||||||
FUZZ_AUX - additional parameters for the fuzzer (for example, "-debug")
|
|
||||||
FUZZ_NGFUZZ_DIR - path to ngfuzz tool
|
|
||||||
````
|
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
- [Configuration](./docs/configuration.md)
|
- [Configuration](./docs/configuration.md)
|
||||||
|
|
26
SECURITY.md
26
SECURITY.md
|
@ -1,26 +0,0 @@
|
||||||
# Security Policy
|
|
||||||
|
|
||||||
|
|
||||||
## How To Report a Vulnerability
|
|
||||||
|
|
||||||
If you think you have found a vulnerability in this repository, please report it to us through coordinated disclosure.
|
|
||||||
|
|
||||||
**Please do not report security vulnerabilities through public issues, discussions, or change requests.**
|
|
||||||
|
|
||||||
Instead, you can report it using one of the following ways:
|
|
||||||
|
|
||||||
* Contact the [TrueCloudLab Security Team](mailto:security@frostfs.info) via email
|
|
||||||
|
|
||||||
Please include as much of the information listed below as you can to help us better understand and resolve the issue:
|
|
||||||
|
|
||||||
* The type of issue (e.g., buffer overflow, or cross-site scripting)
|
|
||||||
* Affected version(s)
|
|
||||||
* Impact of the issue, including how an attacker might exploit the issue
|
|
||||||
* Step-by-step instructions to reproduce the issue
|
|
||||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
|
||||||
* Full paths of source file(s) related to the manifestation of the issue
|
|
||||||
* Any special configuration required to reproduce the issue
|
|
||||||
* Any log files that are related to this issue (if possible)
|
|
||||||
* Proof-of-concept or exploit code (if possible)
|
|
||||||
|
|
||||||
This information will help us triage your report more quickly.
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
v0.32.0
|
v0.26.0
|
||||||
|
|
|
@ -2,213 +2,162 @@ package auth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto"
|
|
||||||
"crypto/hmac"
|
"crypto/hmac"
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
|
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/v4"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// authorizationFieldRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
|
||||||
// AuthorizationFieldRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
|
var authorizationFieldRegexp = regexp.MustCompile(`AWS4-HMAC-SHA256 Credential=(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request,\s*SignedHeaders=(?P<signed_header_fields>.+),\s*Signature=(?P<v4_signature>.+)`)
|
||||||
AuthorizationFieldRegexp = regexp.MustCompile(`AWS4-HMAC-SHA256 Credential=(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request,\s*SignedHeaders=(?P<signed_header_fields>.+),\s*Signature=(?P<v4_signature>.+)`)
|
|
||||||
|
|
||||||
// authorizationFieldV4aRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
|
// postPolicyCredentialRegexp -- is regexp for credentials when uploading file using POST with policy.
|
||||||
authorizationFieldV4aRegexp = regexp.MustCompile(`AWS4-ECDSA-P256-SHA256 Credential=(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<service>[^/]+)/aws4_request,\s*SignedHeaders=(?P<signed_header_fields>.+),\s*Signature=(?P<v4_signature>.+)`)
|
var postPolicyCredentialRegexp = regexp.MustCompile(`(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request`)
|
||||||
|
|
||||||
// postPolicyCredentialRegexp -- is regexp for credentials when uploading file using POST with policy.
|
|
||||||
postPolicyCredentialRegexp = regexp.MustCompile(`(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request`)
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
type (
|
||||||
Center struct {
|
// Center is a user authentication interface.
|
||||||
|
Center interface {
|
||||||
|
Authenticate(request *http.Request) (*Box, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Box contains access box and additional info.
|
||||||
|
Box struct {
|
||||||
|
AccessBox *accessbox.Box
|
||||||
|
ClientTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
center struct {
|
||||||
reg *RegexpSubmatcher
|
reg *RegexpSubmatcher
|
||||||
regV4a *RegexpSubmatcher
|
|
||||||
postReg *RegexpSubmatcher
|
postReg *RegexpSubmatcher
|
||||||
cli tokens.Credentials
|
cli tokens.Credentials
|
||||||
allowedAccessKeyIDPrefixes []string // empty slice means all access key ids are allowed
|
allowedAccessKeyIDPrefixes []string // empty slice means all access key ids are allowed
|
||||||
settings CenterSettings
|
|
||||||
}
|
}
|
||||||
|
|
||||||
CenterSettings interface {
|
prs int
|
||||||
AccessBoxContainer() (cid.ID, bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
//nolint:revive
|
authHeader struct {
|
||||||
AuthHeader struct {
|
|
||||||
AccessKeyID string
|
AccessKeyID string
|
||||||
Service string
|
Service string
|
||||||
Region string
|
Region string
|
||||||
Signature string
|
SignatureV4 string
|
||||||
SignedFields []string
|
SignedFields []string
|
||||||
Date string
|
Date string
|
||||||
IsPresigned bool
|
IsPresigned bool
|
||||||
Expiration time.Duration
|
Expiration time.Duration
|
||||||
Preamble string
|
|
||||||
PayloadHash string
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
authHeaderPartsNum = 6
|
accessKeyPartsNum = 2
|
||||||
authHeaderV4aPartsNum = 5
|
authHeaderPartsNum = 6
|
||||||
maxFormSizeMemory = 50 * 1048576 // 50 MB
|
maxFormSizeMemory = 50 * 1048576 // 50 MB
|
||||||
|
|
||||||
AmzAlgorithm = "X-Amz-Algorithm"
|
AmzAlgorithm = "X-Amz-Algorithm"
|
||||||
AmzCredential = "X-Amz-Credential"
|
AmzCredential = "X-Amz-Credential"
|
||||||
AmzSignature = "X-Amz-Signature"
|
AmzSignature = "X-Amz-Signature"
|
||||||
AmzSignedHeaders = "X-Amz-SignedHeaders"
|
AmzSignedHeaders = "X-Amz-SignedHeaders"
|
||||||
AmzRegionSet = "X-Amz-Region-Set"
|
|
||||||
AmzExpires = "X-Amz-Expires"
|
AmzExpires = "X-Amz-Expires"
|
||||||
AmzDate = "X-Amz-Date"
|
AmzDate = "X-Amz-Date"
|
||||||
AmzContentSHA256 = "X-Amz-Content-Sha256"
|
|
||||||
AuthorizationHdr = "Authorization"
|
AuthorizationHdr = "Authorization"
|
||||||
ContentTypeHdr = "Content-Type"
|
ContentTypeHdr = "Content-Type"
|
||||||
|
|
||||||
UnsignedPayload = "UNSIGNED-PAYLOAD"
|
|
||||||
StreamingUnsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
|
|
||||||
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
|
||||||
StreamingContentSHA256Trailer = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
|
|
||||||
StreamingContentECDSASHA256 = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"
|
|
||||||
StreamingContentECDSASHA256Trailer = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var ContentSHA256HeaderStandardValue = map[string]struct{}{
|
// ErrNoAuthorizationHeader is returned for unauthenticated requests.
|
||||||
UnsignedPayload: {},
|
var ErrNoAuthorizationHeader = errors.New("no authorization header")
|
||||||
StreamingUnsignedPayloadTrailer: {},
|
|
||||||
StreamingContentSHA256: {},
|
func (p prs) Read(_ []byte) (n int, err error) {
|
||||||
StreamingContentSHA256Trailer: {},
|
panic("implement me")
|
||||||
StreamingContentECDSASHA256: {},
|
|
||||||
StreamingContentECDSASHA256Trailer: {},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p prs) Seek(_ int64, _ int) (int64, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ io.ReadSeeker = prs(0)
|
||||||
|
|
||||||
// New creates an instance of AuthCenter.
|
// New creates an instance of AuthCenter.
|
||||||
func New(creds tokens.Credentials, prefixes []string, settings CenterSettings) *Center {
|
func New(frostFS tokens.FrostFS, key *keys.PrivateKey, prefixes []string, config *cache.Config) Center {
|
||||||
return &Center{
|
return ¢er{
|
||||||
cli: creds,
|
cli: tokens.New(frostFS, key, config),
|
||||||
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||||
regV4a: NewRegexpMatcher(authorizationFieldV4aRegexp),
|
|
||||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||||
allowedAccessKeyIDPrefixes: prefixes,
|
allowedAccessKeyIDPrefixes: prefixes,
|
||||||
settings: settings,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
func (c *center) parseAuthHeader(header string) (*authHeader, error) {
|
||||||
signaturePreambleSigV4 = "AWS4-HMAC-SHA256"
|
submatches := c.reg.GetSubmatches(header)
|
||||||
signaturePreambleSigV4A = "AWS4-ECDSA-P256-SHA256"
|
if len(submatches) != authHeaderPartsNum {
|
||||||
)
|
return nil, apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed)
|
||||||
|
|
||||||
func (c *Center) parseAuthHeader(authHeader string, headers http.Header) (*AuthHeader, error) {
|
|
||||||
preamble, _, _ := strings.Cut(authHeader, " ")
|
|
||||||
|
|
||||||
var (
|
|
||||||
submatches map[string]string
|
|
||||||
region string
|
|
||||||
)
|
|
||||||
|
|
||||||
switch preamble {
|
|
||||||
case signaturePreambleSigV4:
|
|
||||||
submatches = c.reg.GetSubmatches(authHeader)
|
|
||||||
if len(submatches) != authHeaderPartsNum {
|
|
||||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), authHeader)
|
|
||||||
}
|
|
||||||
region = submatches["region"]
|
|
||||||
case signaturePreambleSigV4A:
|
|
||||||
submatches = c.regV4a.GetSubmatches(authHeader)
|
|
||||||
if len(submatches) != authHeaderV4aPartsNum {
|
|
||||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), authHeader)
|
|
||||||
}
|
|
||||||
region = headers.Get(AmzRegionSet)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), authHeader)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &AuthHeader{
|
accessKey := strings.Split(submatches["access_key_id"], "0")
|
||||||
|
if len(accessKey) != accessKeyPartsNum {
|
||||||
|
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID)
|
||||||
|
}
|
||||||
|
|
||||||
|
signedFields := strings.Split(submatches["signed_header_fields"], ";")
|
||||||
|
|
||||||
|
return &authHeader{
|
||||||
AccessKeyID: submatches["access_key_id"],
|
AccessKeyID: submatches["access_key_id"],
|
||||||
Service: submatches["service"],
|
Service: submatches["service"],
|
||||||
Region: region,
|
Region: submatches["region"],
|
||||||
Signature: submatches["v4_signature"],
|
SignatureV4: submatches["v4_signature"],
|
||||||
SignedFields: strings.Split(submatches["signed_header_fields"], ";"),
|
SignedFields: signedFields,
|
||||||
Date: submatches["date"],
|
Date: submatches["date"],
|
||||||
Preamble: preamble,
|
|
||||||
PayloadHash: headers.Get(AmzContentSHA256),
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsStandardContentSHA256(key string) bool {
|
func (a *authHeader) getAddress() (oid.Address, error) {
|
||||||
_, ok := ContentSHA256HeaderStandardValue[key]
|
var addr oid.Address
|
||||||
return ok
|
if err := addr.DecodeString(strings.ReplaceAll(a.AccessKeyID, "0", "/")); err != nil {
|
||||||
|
return addr, apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID)
|
||||||
|
}
|
||||||
|
return addr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
authHdr *AuthHeader
|
authHdr *authHeader
|
||||||
signatureDateTimeStr string
|
signatureDateTimeStr string
|
||||||
needClientTime bool
|
needClientTime bool
|
||||||
)
|
)
|
||||||
|
|
||||||
queryValues := r.URL.Query()
|
queryValues := r.URL.Query()
|
||||||
if queryValues.Get(AmzAlgorithm) == signaturePreambleSigV4 {
|
if queryValues.Get(AmzAlgorithm) == "AWS4-HMAC-SHA256" {
|
||||||
creds := strings.Split(queryValues.Get(AmzCredential), "/")
|
creds := strings.Split(queryValues.Get(AmzCredential), "/")
|
||||||
if len(creds) != 5 || creds[4] != "aws4_request" {
|
if len(creds) != 5 || creds[4] != "aws4_request" {
|
||||||
return nil, fmt.Errorf("bad X-Amz-Credential")
|
return nil, fmt.Errorf("bad X-Amz-Credential")
|
||||||
}
|
}
|
||||||
authHdr = &AuthHeader{
|
authHdr = &authHeader{
|
||||||
AccessKeyID: creds[0],
|
AccessKeyID: creds[0],
|
||||||
Service: creds[3],
|
Service: creds[3],
|
||||||
Region: creds[2],
|
Region: creds[2],
|
||||||
Signature: queryValues.Get(AmzSignature),
|
SignatureV4: queryValues.Get(AmzSignature),
|
||||||
SignedFields: strings.Split(queryValues.Get(AmzSignedHeaders), ";"),
|
SignedFields: queryValues[AmzSignedHeaders],
|
||||||
Date: creds[1],
|
Date: creds[1],
|
||||||
IsPresigned: true,
|
IsPresigned: true,
|
||||||
Preamble: signaturePreambleSigV4,
|
|
||||||
PayloadHash: r.Header.Get(AmzContentSHA256),
|
|
||||||
}
|
}
|
||||||
authHdr.Expiration, err = time.ParseDuration(queryValues.Get(AmzExpires) + "s")
|
authHdr.Expiration, err = time.ParseDuration(queryValues.Get(AmzExpires) + "s")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%w: couldn't parse X-Amz-Expires %v", apierr.GetAPIError(apierr.ErrMalformedExpires), err)
|
return nil, fmt.Errorf("couldn't parse X-Amz-Expires: %w", err)
|
||||||
}
|
|
||||||
signatureDateTimeStr = queryValues.Get(AmzDate)
|
|
||||||
} else if queryValues.Get(AmzAlgorithm) == signaturePreambleSigV4A {
|
|
||||||
creds := strings.Split(queryValues.Get(AmzCredential), "/")
|
|
||||||
if len(creds) != 4 || creds[3] != "aws4_request" {
|
|
||||||
return nil, fmt.Errorf("bad X-Amz-Credential")
|
|
||||||
}
|
|
||||||
authHdr = &AuthHeader{
|
|
||||||
AccessKeyID: creds[0],
|
|
||||||
Service: creds[2],
|
|
||||||
Region: queryValues.Get(AmzRegionSet),
|
|
||||||
Signature: queryValues.Get(AmzSignature),
|
|
||||||
SignedFields: strings.Split(queryValues.Get(AmzSignedHeaders), ";"),
|
|
||||||
Date: creds[1],
|
|
||||||
IsPresigned: true,
|
|
||||||
Preamble: signaturePreambleSigV4A,
|
|
||||||
PayloadHash: r.Header.Get(AmzContentSHA256),
|
|
||||||
}
|
|
||||||
authHdr.Expiration, err = time.ParseDuration(queryValues.Get(AmzExpires) + "s")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%w: couldn't parse X-Amz-Expires %v", apierr.GetAPIError(apierr.ErrMalformedExpires), err)
|
|
||||||
}
|
}
|
||||||
signatureDateTimeStr = queryValues.Get(AmzDate)
|
signatureDateTimeStr = queryValues.Get(AmzDate)
|
||||||
} else {
|
} else {
|
||||||
|
@ -217,9 +166,9 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
||||||
if strings.HasPrefix(r.Header.Get(ContentTypeHdr), "multipart/form-data") {
|
if strings.HasPrefix(r.Header.Get(ContentTypeHdr), "multipart/form-data") {
|
||||||
return c.checkFormData(r)
|
return c.checkFormData(r)
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("%w: %v", middleware.ErrNoAuthorizationHeader, authHeaderField)
|
return nil, ErrNoAuthorizationHeader
|
||||||
}
|
}
|
||||||
authHdr, err = c.parseAuthHeader(authHeaderField[0], r.Header)
|
authHdr, err = c.parseAuthHeader(authHeaderField[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -232,38 +181,26 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
||||||
return nil, fmt.Errorf("failed to parse x-amz-date header field: %w", err)
|
return nil, fmt.Errorf("failed to parse x-amz-date header field: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = c.checkAccessKeyID(authHdr.AccessKeyID); err != nil {
|
if err := c.checkAccessKeyID(authHdr.AccessKeyID); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
cnrID, err := c.getAccessBoxContainer(authHdr.AccessKeyID)
|
addr, err := authHdr.getAddress()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
box, attrs, err := c.cli.GetBox(r.Context(), cnrID, authHdr.AccessKeyID)
|
box, err := c.cli.GetBox(r.Context(), addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("get box by access key '%s': %w", authHdr.AccessKeyID, err)
|
return nil, fmt.Errorf("get box: %w", err)
|
||||||
}
|
|
||||||
|
|
||||||
if err = checkFormatHashContentSHA256(r.Header.Get(AmzContentSHA256)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
clonedRequest := cloneRequest(r, authHdr)
|
clonedRequest := cloneRequest(r, authHdr)
|
||||||
if err = c.checkSign(r.Context(), authHdr, box, clonedRequest, signatureDateTime); err != nil {
|
if err = c.checkSign(authHdr, box, clonedRequest, signatureDateTime); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
result := &middleware.Box{
|
result := &Box{AccessBox: box}
|
||||||
AccessBox: box,
|
|
||||||
AuthHeaders: &middleware.AuthHeader{
|
|
||||||
AccessKeyID: authHdr.AccessKeyID,
|
|
||||||
Region: authHdr.Region,
|
|
||||||
SignatureV4: authHdr.Signature,
|
|
||||||
},
|
|
||||||
Attributes: attrs,
|
|
||||||
}
|
|
||||||
if needClientTime {
|
if needClientTime {
|
||||||
result.ClientTime = signatureDateTime
|
result.ClientTime = signatureDateTime
|
||||||
}
|
}
|
||||||
|
@ -271,36 +208,7 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Center) getAccessBoxContainer(accessKeyID string) (cid.ID, error) {
|
func (c center) checkAccessKeyID(accessKeyID string) error {
|
||||||
var addr oid.Address
|
|
||||||
if err := addr.DecodeString(strings.ReplaceAll(accessKeyID, "0", "/")); err == nil {
|
|
||||||
return addr.Container(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
cnrID, ok := c.settings.AccessBoxContainer()
|
|
||||||
if ok {
|
|
||||||
return cnrID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return cid.ID{}, fmt.Errorf("%w: unknown container for creds '%s'", apierr.GetAPIError(apierr.ErrInvalidAccessKeyID), accessKeyID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkFormatHashContentSHA256(hash string) error {
|
|
||||||
if !IsStandardContentSHA256(hash) {
|
|
||||||
hashBinary, err := hex.DecodeString(hash)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%w: decode hash: %s: %s", apierr.GetAPIError(apierr.ErrContentSHA256Mismatch),
|
|
||||||
hash, err.Error())
|
|
||||||
}
|
|
||||||
if len(hashBinary) != sha256.Size && len(hash) != 0 {
|
|
||||||
return fmt.Errorf("%w: invalid hash size %d", apierr.GetAPIError(apierr.ErrContentSHA256Mismatch), len(hashBinary))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Center) checkAccessKeyID(accessKeyID string) error {
|
|
||||||
if len(c.allowedAccessKeyIDPrefixes) == 0 {
|
if len(c.allowedAccessKeyIDPrefixes) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -311,12 +219,12 @@ func (c Center) checkAccessKeyID(accessKeyID string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("%w: accesskeyID prefix isn't allowed", apierr.GetAPIError(apierr.ErrAccessDenied))
|
return apiErrors.GetAPIError(apiErrors.ErrAccessDenied)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
func (c *center) checkFormData(r *http.Request) (*Box, error) {
|
||||||
if err := r.ParseMultipartForm(maxFormSizeMemory); err != nil {
|
if err := r.ParseMultipartForm(maxFormSizeMemory); err != nil {
|
||||||
return nil, fmt.Errorf("%w: parse multipart form with max size %d", apierr.GetAPIError(apierr.ErrInvalidArgument), maxFormSizeMemory)
|
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidArgument)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := prepareForm(r.MultipartForm); err != nil {
|
if err := prepareForm(r.MultipartForm); err != nil {
|
||||||
|
@ -325,13 +233,12 @@ func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
||||||
|
|
||||||
policy := MultipartFormValue(r, "policy")
|
policy := MultipartFormValue(r, "policy")
|
||||||
if policy == "" {
|
if policy == "" {
|
||||||
return nil, fmt.Errorf("%w: missing policy", middleware.ErrNoAuthorizationHeader)
|
return nil, ErrNoAuthorizationHeader
|
||||||
}
|
}
|
||||||
|
|
||||||
creds := MultipartFormValue(r, "x-amz-credential")
|
submatches := c.postReg.GetSubmatches(MultipartFormValue(r, "x-amz-credential"))
|
||||||
submatches := c.postReg.GetSubmatches(creds)
|
|
||||||
if len(submatches) != 4 {
|
if len(submatches) != 4 {
|
||||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), creds)
|
return nil, apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed)
|
||||||
}
|
}
|
||||||
|
|
||||||
signatureDateTime, err := time.Parse("20060102T150405Z", MultipartFormValue(r, "x-amz-date"))
|
signatureDateTime, err := time.Parse("20060102T150405Z", MultipartFormValue(r, "x-amz-date"))
|
||||||
|
@ -339,40 +246,28 @@ func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
||||||
return nil, fmt.Errorf("failed to parse x-amz-date field: %w", err)
|
return nil, fmt.Errorf("failed to parse x-amz-date field: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
accessKeyID := submatches["access_key_id"]
|
var addr oid.Address
|
||||||
|
if err = addr.DecodeString(strings.ReplaceAll(submatches["access_key_id"], "0", "/")); err != nil {
|
||||||
cnrID, err := c.getAccessBoxContainer(accessKeyID)
|
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
box, attrs, err := c.cli.GetBox(r.Context(), cnrID, accessKeyID)
|
box, err := c.cli.GetBox(r.Context(), addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("get box by accessKeyID '%s': %w", accessKeyID, err)
|
return nil, fmt.Errorf("get box: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
secret := box.Gate.SecretKey
|
secret := box.Gate.AccessKey
|
||||||
service, region := submatches["service"], submatches["region"]
|
service, region := submatches["service"], submatches["region"]
|
||||||
|
|
||||||
signature := SignStr(secret, service, region, signatureDateTime, policy)
|
signature := signStr(secret, service, region, signatureDateTime, policy)
|
||||||
reqSignature := MultipartFormValue(r, "x-amz-signature")
|
if signature != MultipartFormValue(r, "x-amz-signature") {
|
||||||
if signature != reqSignature {
|
return nil, apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch)
|
||||||
return nil, fmt.Errorf("%w: %s != %s", apierr.GetAPIError(apierr.ErrSignatureDoesNotMatch),
|
|
||||||
reqSignature, signature)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &middleware.Box{
|
return &Box{AccessBox: box}, nil
|
||||||
AccessBox: box,
|
|
||||||
AuthHeaders: &middleware.AuthHeader{
|
|
||||||
AccessKeyID: accessKeyID,
|
|
||||||
Region: region,
|
|
||||||
SignatureV4: signature,
|
|
||||||
},
|
|
||||||
Attributes: attrs,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
func cloneRequest(r *http.Request, authHeader *authHeader) *http.Request {
|
||||||
otherRequest := r.Clone(context.TODO())
|
otherRequest := r.Clone(context.TODO())
|
||||||
otherRequest.Header = make(http.Header)
|
otherRequest.Header = make(http.Header)
|
||||||
|
|
||||||
|
@ -393,117 +288,44 @@ func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
||||||
return otherRequest
|
return otherRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Center) checkSign(ctx context.Context, authHeader *AuthHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
|
func (c *center) checkSign(authHeader *authHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
|
||||||
|
awsCreds := credentials.NewStaticCredentials(authHeader.AccessKeyID, box.Gate.AccessKey, "")
|
||||||
|
signer := v4.NewSigner(awsCreds)
|
||||||
|
|
||||||
var signature string
|
var signature string
|
||||||
|
if authHeader.IsPresigned {
|
||||||
switch authHeader.Preamble {
|
now := time.Now()
|
||||||
case signaturePreambleSigV4:
|
if signatureDateTime.Add(authHeader.Expiration).Before(now) {
|
||||||
creds := aws.Credentials{
|
return apiErrors.GetAPIError(apiErrors.ErrExpiredPresignRequest)
|
||||||
AccessKeyID: authHeader.AccessKeyID,
|
|
||||||
SecretAccessKey: box.Gate.SecretKey,
|
|
||||||
}
|
}
|
||||||
signer := v4.NewSigner(func(options *v4.SignerOptions) {
|
if now.Before(signatureDateTime) {
|
||||||
options.DisableURIPathEscaping = true
|
return apiErrors.GetAPIError(apiErrors.ErrBadRequest)
|
||||||
})
|
|
||||||
|
|
||||||
if authHeader.IsPresigned {
|
|
||||||
if err := checkPresignedDate(authHeader, signatureDateTime); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
signedURI, _, err := signer.PresignHTTP(ctx, creds, request, authHeader.PayloadHash, authHeader.Service, authHeader.Region, signatureDateTime)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to pre-sign temporary HTTP request: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
u, err := url.ParseRequestURI(signedURI)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
signature = u.Query().Get(AmzSignature)
|
|
||||||
} else {
|
|
||||||
if err := signer.SignHTTP(ctx, creds, request, authHeader.PayloadHash, authHeader.Service, authHeader.Region, signatureDateTime); err != nil {
|
|
||||||
return fmt.Errorf("failed to sign temporary HTTP request: %w", err)
|
|
||||||
}
|
|
||||||
signature = c.reg.GetSubmatches(request.Header.Get(AuthorizationHdr))["v4_signature"]
|
|
||||||
}
|
}
|
||||||
if authHeader.Signature != signature {
|
if _, err := signer.Presign(request, nil, authHeader.Service, authHeader.Region, authHeader.Expiration, signatureDateTime); err != nil {
|
||||||
return fmt.Errorf("%w: %s != %s: headers %v", apierr.GetAPIError(apierr.ErrSignatureDoesNotMatch),
|
return fmt.Errorf("failed to pre-sign temporary HTTP request: %w", err)
|
||||||
authHeader.Signature, signature, authHeader.SignedFields)
|
|
||||||
}
|
}
|
||||||
|
signature = request.URL.Query().Get(AmzSignature)
|
||||||
case signaturePreambleSigV4A:
|
} else {
|
||||||
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
|
signer.DisableURIPathEscaping = true
|
||||||
options.DisableURIPathEscaping = true
|
if _, err := signer.Sign(request, nil, authHeader.Service, authHeader.Region, signatureDateTime); err != nil {
|
||||||
})
|
return fmt.Errorf("failed to sign temporary HTTP request: %w", err)
|
||||||
|
|
||||||
credAdapter := v4a.SymmetricCredentialAdaptor{
|
|
||||||
SymmetricProvider: credentials.NewStaticCredentialsProvider(authHeader.AccessKeyID, box.Gate.SecretKey, ""),
|
|
||||||
}
|
}
|
||||||
|
signature = c.reg.GetSubmatches(request.Header.Get(AuthorizationHdr))["v4_signature"]
|
||||||
|
}
|
||||||
|
|
||||||
creds, err := credAdapter.RetrievePrivateKey(request.Context())
|
if authHeader.SignatureV4 != signature {
|
||||||
if err != nil {
|
return apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch)
|
||||||
return fmt.Errorf("failed to derive assymetric key from credentials: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !authHeader.IsPresigned {
|
|
||||||
return signer.VerifySignature(creds, request, authHeader.PayloadHash, authHeader.Service,
|
|
||||||
strings.Split(authHeader.Region, ","), signatureDateTime, authHeader.Signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = checkPresignedDate(authHeader, signatureDateTime); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return signer.VerifyPresigned(creds, request, authHeader.PayloadHash, authHeader.Service,
|
|
||||||
strings.Split(authHeader.Region, ","), signatureDateTime, authHeader.Signature)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid preamble: %s", authHeader.Preamble)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkPresignedDate(authHeader *AuthHeader, signatureDateTime time.Time) error {
|
func signStr(secret, service, region string, t time.Time, strToSign string) string {
|
||||||
now := time.Now()
|
|
||||||
if signatureDateTime.Add(authHeader.Expiration).Before(now) {
|
|
||||||
return fmt.Errorf("%w: expired: now %s, signature %s", apierr.GetAPIError(apierr.ErrExpiredPresignRequest),
|
|
||||||
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
|
|
||||||
}
|
|
||||||
if now.Before(signatureDateTime) {
|
|
||||||
return fmt.Errorf("%w: signature time from the future: now %s, signature %s", apierr.GetAPIError(apierr.ErrBadRequest),
|
|
||||||
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func SignStr(secret, service, region string, t time.Time, strToSign string) string {
|
|
||||||
creds := deriveKey(secret, service, region, t)
|
creds := deriveKey(secret, service, region, t)
|
||||||
signature := hmacSHA256(creds, []byte(strToSign))
|
signature := hmacSHA256(creds, []byte(strToSign))
|
||||||
return hex.EncodeToString(signature)
|
return hex.EncodeToString(signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
func SignStrV4A(ctx context.Context, cred aws.Credentials, strToSign string) (string, error) {
|
|
||||||
credAdapter := v4a.SymmetricCredentialAdaptor{
|
|
||||||
SymmetricProvider: credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, ""),
|
|
||||||
}
|
|
||||||
|
|
||||||
creds, err := credAdapter.RetrievePrivateKey(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
hash := sha256.New()
|
|
||||||
hash.Write([]byte(strToSign))
|
|
||||||
|
|
||||||
sig, err := creds.PrivateKey.Sign(rand.Reader, hash.Sum(nil), crypto.SHA256)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return hex.EncodeToString(sig), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func deriveKey(secret, service, region string, t time.Time) []byte {
|
func deriveKey(secret, service, region string, t time.Time) []byte {
|
||||||
hmacDate := hmacSHA256([]byte("AWS4"+secret), []byte(t.UTC().Format("20060102")))
|
hmacDate := hmacSHA256([]byte("AWS4"+secret), []byte(t.UTC().Format("20060102")))
|
||||||
hmacRegion := hmacSHA256(hmacDate, []byte(region))
|
hmacRegion := hmacSHA256(hmacDate, []byte(region))
|
||||||
|
|
|
@ -1,93 +0,0 @@
|
||||||
//go:build gofuzz
|
|
||||||
// +build gofuzz
|
|
||||||
|
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
utils "github.com/trailofbits/go-fuzz-utils"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
fuzzSuccessExitCode = 0
|
|
||||||
fuzzFailExitCode = -1
|
|
||||||
)
|
|
||||||
|
|
||||||
func InitFuzzAuthenticate() {
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzAuthenticate(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
var accessKeyAddr oid.Address
|
|
||||||
err = tp.Fill(accessKeyAddr)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
|
||||||
secretKey, err := tp.GetString()
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secretKey}
|
|
||||||
|
|
||||||
reqData := RequestData{
|
|
||||||
Method: "GET",
|
|
||||||
Endpoint: "http://localhost:8084",
|
|
||||||
Bucket: "my-bucket",
|
|
||||||
Object: "@obj/name",
|
|
||||||
}
|
|
||||||
presignData := PresignData{
|
|
||||||
Service: "s3",
|
|
||||||
Region: "spb",
|
|
||||||
Lifetime: 10 * time.Minute,
|
|
||||||
SignTime: time.Now().UTC(),
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := PresignRequest(context.Background(), awsCreds, reqData, presignData, zap.NewNop())
|
|
||||||
if req == nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
expBox := &accessbox.Box{
|
|
||||||
Gate: &accessbox.GateData{
|
|
||||||
SecretKey: secretKey,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
mock := newTokensFrostfsMock()
|
|
||||||
mock.addBox(accessKeyAddr, expBox)
|
|
||||||
|
|
||||||
c := &Center{
|
|
||||||
cli: mock,
|
|
||||||
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
|
||||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _ = c.Authenticate(req)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzAuthenticate(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzAuthenticate(data)
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,71 +1,36 @@
|
||||||
package auth
|
package auth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
|
|
||||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/v4"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
|
||||||
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/zap/zaptest"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type centerSettingsMock struct {
|
|
||||||
accessBoxContainer *cid.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *centerSettingsMock) AccessBoxContainer() (cid.ID, bool) {
|
|
||||||
if c.accessBoxContainer == nil {
|
|
||||||
return cid.ID{}, false
|
|
||||||
}
|
|
||||||
return *c.accessBoxContainer, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAuthHeaderParse(t *testing.T) {
|
func TestAuthHeaderParse(t *testing.T) {
|
||||||
defaultHeader := "AWS4-HMAC-SHA256 Credential=oid0cid/20210809/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=2811ccb9e242f41426738fb1f"
|
defaultHeader := "AWS4-HMAC-SHA256 Credential=oid0cid/20210809/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=2811ccb9e242f41426738fb1f"
|
||||||
|
|
||||||
center := &Center{
|
center := ¢er{
|
||||||
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||||
settings: ¢erSettingsMock{},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
header string
|
header string
|
||||||
err error
|
err error
|
||||||
expected *AuthHeader
|
expected *authHeader
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
header: defaultHeader,
|
header: defaultHeader,
|
||||||
err: nil,
|
err: nil,
|
||||||
expected: &AuthHeader{
|
expected: &authHeader{
|
||||||
AccessKeyID: "oid0cid",
|
AccessKeyID: "oid0cid",
|
||||||
Service: "s3",
|
Service: "s3",
|
||||||
Region: "us-east-1",
|
Region: "us-east-1",
|
||||||
Signature: "2811ccb9e242f41426738fb1f",
|
SignatureV4: "2811ccb9e242f41426738fb1f",
|
||||||
SignedFields: []string{"host", "x-amz-content-sha256", "x-amz-date"},
|
SignedFields: []string{"host", "x-amz-content-sha256", "x-amz-date"},
|
||||||
Date: "20210809",
|
Date: "20210809",
|
||||||
Preamble: signaturePreambleSigV4,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -73,13 +38,55 @@ func TestAuthHeaderParse(t *testing.T) {
|
||||||
err: errors.GetAPIError(errors.ErrAuthorizationHeaderMalformed),
|
err: errors.GetAPIError(errors.ErrAuthorizationHeaderMalformed),
|
||||||
expected: nil,
|
expected: nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
header: strings.ReplaceAll(defaultHeader, "oid0cid", "oidcid"),
|
||||||
|
err: errors.GetAPIError(errors.ErrInvalidAccessKeyID),
|
||||||
|
expected: nil,
|
||||||
|
},
|
||||||
} {
|
} {
|
||||||
authHeader, err := center.parseAuthHeader(tc.header, nil)
|
authHeader, err := center.parseAuthHeader(tc.header)
|
||||||
require.ErrorIs(t, err, tc.err, tc.header)
|
require.Equal(t, tc.err, err, tc.header)
|
||||||
require.Equal(t, tc.expected, authHeader, tc.header)
|
require.Equal(t, tc.expected, authHeader, tc.header)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAuthHeaderGetAddress(t *testing.T) {
|
||||||
|
defaulErr := errors.GetAPIError(errors.ErrInvalidAccessKeyID)
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
authHeader *authHeader
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
authHeader: &authHeader{
|
||||||
|
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJM0HrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
|
||||||
|
},
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
authHeader: &authHeader{
|
||||||
|
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJMHrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
|
||||||
|
},
|
||||||
|
err: defaulErr,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
authHeader: &authHeader{
|
||||||
|
AccessKeyID: "oid0cid",
|
||||||
|
},
|
||||||
|
err: defaulErr,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
authHeader: &authHeader{
|
||||||
|
AccessKeyID: "oidcid",
|
||||||
|
},
|
||||||
|
err: defaulErr,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
_, err := tc.authHeader.getAddress()
|
||||||
|
require.Equal(t, tc.err, err, tc.authHeader.AccessKeyID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestSignature(t *testing.T) {
|
func TestSignature(t *testing.T) {
|
||||||
secret := "66be461c3cd429941c55daf42fad2b8153e5a2016ba89c9494d97677cc9d3872"
|
secret := "66be461c3cd429941c55daf42fad2b8153e5a2016ba89c9494d97677cc9d3872"
|
||||||
strToSign := "eyAiZXhwaXJhdGlvbiI6ICIyMDE1LTEyLTMwVDEyOjAwOjAwLjAwMFoiLAogICJjb25kaXRpb25zIjogWwogICAgeyJidWNrZXQiOiAiYWNsIn0sCiAgICBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS8iXSwKICAgIHsic3VjY2Vzc19hY3Rpb25fcmVkaXJlY3QiOiAiaHR0cDovL2xvY2FsaG9zdDo4MDg0L2FjbCJ9LAogICAgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLAogICAgeyJ4LWFtei1tZXRhLXV1aWQiOiAiMTQzNjUxMjM2NTEyNzQifSwKICAgIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLAoKICAgIHsiWC1BbXotQ3JlZGVudGlhbCI6ICI4Vmk0MVBIbjVGMXNzY2J4OUhqMXdmMUU2aERUYURpNndxOGhxTU05NllKdTA1QzVDeUVkVlFoV1E2aVZGekFpTkxXaTlFc3BiUTE5ZDRuR3pTYnZVZm10TS8yMDE1MTIyOS91cy1lYXN0LTEvczMvYXdzNF9yZXF1ZXN0In0sCiAgICB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sCiAgICB7IlgtQW16LURhdGUiOiAiMjAxNTEyMjlUMDAwMDAwWiIgfSwKICAgIHsieC1pZ25vcmUtdG1wIjogInNvbWV0aGluZyIgfQogIF0KfQ=="
|
strToSign := "eyAiZXhwaXJhdGlvbiI6ICIyMDE1LTEyLTMwVDEyOjAwOjAwLjAwMFoiLAogICJjb25kaXRpb25zIjogWwogICAgeyJidWNrZXQiOiAiYWNsIn0sCiAgICBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS8iXSwKICAgIHsic3VjY2Vzc19hY3Rpb25fcmVkaXJlY3QiOiAiaHR0cDovL2xvY2FsaG9zdDo4MDg0L2FjbCJ9LAogICAgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLAogICAgeyJ4LWFtei1tZXRhLXV1aWQiOiAiMTQzNjUxMjM2NTEyNzQifSwKICAgIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLAoKICAgIHsiWC1BbXotQ3JlZGVudGlhbCI6ICI4Vmk0MVBIbjVGMXNzY2J4OUhqMXdmMUU2aERUYURpNndxOGhxTU05NllKdTA1QzVDeUVkVlFoV1E2aVZGekFpTkxXaTlFc3BiUTE5ZDRuR3pTYnZVZm10TS8yMDE1MTIyOS91cy1lYXN0LTEvczMvYXdzNF9yZXF1ZXN0In0sCiAgICB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sCiAgICB7IlgtQW16LURhdGUiOiAiMjAxNTEyMjlUMDAwMDAwWiIgfSwKICAgIHsieC1pZ25vcmUtdG1wIjogInNvbWV0aGluZyIgfQogIF0KfQ=="
|
||||||
|
@ -89,622 +96,6 @@ func TestSignature(t *testing.T) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
signature := SignStr(secret, "s3", "us-east-1", signTime, strToSign)
|
signature := signStr(secret, "s3", "us-east-1", signTime, strToSign)
|
||||||
require.Equal(t, "dfbe886241d9e369cf4b329ca0f15eb27306c97aa1022cc0bb5a914c4ef87634", signature)
|
require.Equal(t, "dfbe886241d9e369cf4b329ca0f15eb27306c97aa1022cc0bb5a914c4ef87634", signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSignatureV4A(t *testing.T) {
|
|
||||||
accessKeyID := "2XEbqH4M3ym7a3E3esxfZ2gRLnMwDXrCN4y1SkQg5fHa09sThVmVL3EE6xeKsyMzaqu5jPi41YCaVbnwbwCTF3bx1"
|
|
||||||
secretKey := "00637f53f842573aaa06c2164c598973cd986880987111416cf71f1619def537"
|
|
||||||
|
|
||||||
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
|
|
||||||
options.DisableURIPathEscaping = true
|
|
||||||
options.Logger = zaptest.NewLogger(t)
|
|
||||||
options.LogSigning = true
|
|
||||||
})
|
|
||||||
|
|
||||||
credAdapter := v4a.SymmetricCredentialAdaptor{
|
|
||||||
SymmetricProvider: credentials.NewStaticCredentialsProvider(accessKeyID, secretKey, ""),
|
|
||||||
}
|
|
||||||
|
|
||||||
bodyStr := `
|
|
||||||
1b;chunk-signature=3045022100b63692a1b20759bdabd342011823427a8952df75c93174d98ad043abca8052e002201695228a91ba986171b8d0ad20856d3d94ca3614d0a90a50a531ba8e52447b9b**
|
|
||||||
Testing with the {sdk-java}
|
|
||||||
0;chunk-signature=30440220455885a2d4e9f705256ca6b0a5a22f7f784780ccbd1c0a371e5db3059c91745b022073259dd44746cbd63261d628a04d25be5a32a974c077c5c2d83c8157fb323b9f****
|
|
||||||
|
|
||||||
`
|
|
||||||
body := bytes.NewBufferString(bodyStr)
|
|
||||||
|
|
||||||
req, err := http.NewRequest("PUT", "http://localhost:8084/test/tmp", body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.Header.Set("Amz-Sdk-Invocation-Id", "ca3a3cde-7d26-fce6-ed9c-82f7a0573824")
|
|
||||||
req.Header.Set("Amz-Sdk-Request", "attempt=2; max=2")
|
|
||||||
req.Header.Set("Authorization", "AWS4-ECDSA-P256-SHA256 Credential=2XEbqH4M3ym7a3E3esxfZ2gRLnMwDXrCN4y1SkQg5fHa09sThVmVL3EE6xeKsyMzaqu5jPi41YCaVbnwbwCTF3bx1/20240904/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-region-set, Signature=30440220574244c5ff5deba388c4e3b0541a42113179b6839b3e6b4212d255a118fa9089022056f7b9b72c93f67dbcd25fe9ca67950b5913fc00bb7a62bc276c21e828c0b6c7")
|
|
||||||
req.Header.Set("Content-Length", "360")
|
|
||||||
req.Header.Set("Content-Type", "text/plain; charset=UTF-8")
|
|
||||||
req.Header.Set("X-Amz-Content-Sha256", "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD")
|
|
||||||
req.Header.Set("X-Amz-Date", "20240904T133253Z")
|
|
||||||
req.Header.Set("X-Amz-Decoded-Content-Length", "27")
|
|
||||||
req.Header.Set("X-Amz-Region-Set", "us-east-1")
|
|
||||||
|
|
||||||
service := "s3"
|
|
||||||
regionSet := []string{"us-east-1"}
|
|
||||||
signature := "30440220574244c5ff5deba388c4e3b0541a42113179b6839b3e6b4212d255a118fa9089022056f7b9b72c93f67dbcd25fe9ca67950b5913fc00bb7a62bc276c21e828c0b6c7"
|
|
||||||
signingTime, err := time.Parse("20060102T150405Z", "20240904T133253Z")
|
|
||||||
require.NoError(t, err)
|
|
||||||
creds, err := credAdapter.RetrievePrivateKey(req.Context())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = signer.VerifySignature(creds, req, "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD", service, regionSet, signingTime, signature)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignatureV4(t *testing.T) {
|
|
||||||
signer := v4.NewSigner(func(options *v4.SignerOptions) {
|
|
||||||
options.DisableURIPathEscaping = true
|
|
||||||
options.Logger = zaptest.NewLogger(t)
|
|
||||||
options.LogSigning = true
|
|
||||||
})
|
|
||||||
|
|
||||||
creds := aws.Credentials{
|
|
||||||
AccessKeyID: "9CBEGH8T9XfLin2pg7LG8ZxBH1PnZc1yoioViKngrUnu0CbC2mcjpcw9t4Y7AS6zsF5cJGkDhXAx5hxFDKwfZzgj7",
|
|
||||||
SecretAccessKey: "8742218da7f905de24f633f44efe02f82c6d2a317ed6f99592627215d17816e3",
|
|
||||||
}
|
|
||||||
|
|
||||||
bodyStr := `tmp2
|
|
||||||
`
|
|
||||||
body := bytes.NewBufferString(bodyStr)
|
|
||||||
|
|
||||||
req, err := http.NewRequest("PUT", "http://localhost:8084/main/tmp2", body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.Header.Set("Authorization", "AWS4-HMAC-SHA256 Credential=9CBEGH8T9XfLin2pg7LG8ZxBH1PnZc1yoioViKngrUnu0CbC2mcjpcw9t4Y7AS6zsF5cJGkDhXAx5hxFDKwfZzgj7/20241210/ru/s3/aws4_request, SignedHeaders=content-md5;host;x-amz-content-sha256;x-amz-date, Signature=945664a5bccfd37a1167ca5e718e2b883f68a7ccf7f1044768e7fe58b737b7ed")
|
|
||||||
req.Header.Set("Content-Length", "5")
|
|
||||||
req.Header.Set("User-Agent", "aws-cli/2.13.2 Python/3.11.4 Linux/6.4.5-x64v1-xanmod1 exe/x86_64.debian.11 prompt/off command/s3api.put-object")
|
|
||||||
req.Header.Set("Content-MD5", "DstU4KxdzBj5jTGltfyqgA==")
|
|
||||||
req.Header.Set("Expect", "101-continue")
|
|
||||||
req.Header.Set("X-Amz-Content-Sha256", "1f9b7417ee5445c41dbe904c3651eb0ba1c12fecff16c1bccd8df3db6e390b5f")
|
|
||||||
req.Header.Set("X-Amz-Date", "20241210T114611Z")
|
|
||||||
|
|
||||||
service := "s3"
|
|
||||||
region := "ru"
|
|
||||||
signature := "945664a5bccfd37a1167ca5e718e2b883f68a7ccf7f1044768e7fe58b737b7ed"
|
|
||||||
signingTime, err := time.Parse("20060102T150405Z", "20241210T114611Z")
|
|
||||||
require.NoError(t, err)
|
|
||||||
cloned := cloneRequest(req, &AuthHeader{SignedFields: []string{"content-md5", "host", "x-amz-content-sha256", "x-amz-date"}})
|
|
||||||
|
|
||||||
err = signer.SignHTTP(cloned.Context(), creds, cloned, "1f9b7417ee5445c41dbe904c3651eb0ba1c12fecff16c1bccd8df3db6e390b5f", service, region, signingTime)
|
|
||||||
require.NoError(t, err)
|
|
||||||
signatureComputed := NewRegexpMatcher(AuthorizationFieldRegexp).GetSubmatches(cloned.Header.Get(AuthorizationHdr))["v4_signature"]
|
|
||||||
require.Equal(t, signature, signatureComputed, "signature mismatched")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckFormatContentSHA256(t *testing.T) {
|
|
||||||
defaultErr := errors.GetAPIError(errors.ErrContentSHA256Mismatch)
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
hash string
|
|
||||||
error error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "invalid hash format: length and character",
|
|
||||||
hash: "invalid-hash",
|
|
||||||
error: defaultErr,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid hash format: length (63 characters)",
|
|
||||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f7",
|
|
||||||
error: defaultErr,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid hash format: character",
|
|
||||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f7s",
|
|
||||||
error: defaultErr,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid hash format: hash size",
|
|
||||||
hash: "5aadb45520dcd8726b2822a7a78bb53d794f557199d5d4abdedd2c55a4bd6ca73607605c558de3db80c8e86c3196484566163ed1327e82e8b6757d1932113cb8",
|
|
||||||
error: defaultErr,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unsigned payload",
|
|
||||||
hash: "UNSIGNED-PAYLOAD",
|
|
||||||
error: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no hash",
|
|
||||||
hash: "",
|
|
||||||
error: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "correct hash format",
|
|
||||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73",
|
|
||||||
error: nil,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
err := checkFormatHashContentSHA256(tc.hash)
|
|
||||||
require.ErrorIs(t, err, tc.error)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type frostFSMock struct {
|
|
||||||
objects map[string]*object.Object
|
|
||||||
}
|
|
||||||
|
|
||||||
func newFrostFSMock() *frostFSMock {
|
|
||||||
return &frostFSMock{
|
|
||||||
objects: map[string]*object.Object{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *frostFSMock) GetCredsObject(_ context.Context, prm tokens.PrmGetCredsObject) (*object.Object, error) {
|
|
||||||
obj, ok := f.objects[prm.AccessKeyID]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *frostFSMock) CreateObject(context.Context, tokens.PrmObjectCreate) (oid.ID, error) {
|
|
||||||
return oid.ID{}, fmt.Errorf("the mock method is not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAuthenticate(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
key, err := keys.NewPrivateKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cfg := &cache.Config{
|
|
||||||
Size: 10,
|
|
||||||
Lifetime: 24 * time.Hour,
|
|
||||||
Logger: zaptest.NewLogger(t),
|
|
||||||
}
|
|
||||||
|
|
||||||
gateData := []*accessbox.GateData{{
|
|
||||||
BearerToken: &bearer.Token{},
|
|
||||||
GateKey: key.PublicKey(),
|
|
||||||
}}
|
|
||||||
|
|
||||||
accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"), false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
data, err := accessBox.Marshal()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var obj object.Object
|
|
||||||
obj.SetPayload(data)
|
|
||||||
addr := oidtest.Address()
|
|
||||||
obj.SetContainerID(addr.Container())
|
|
||||||
obj.SetID(addr.Object())
|
|
||||||
|
|
||||||
accessKeyID := getAccessKeyID(addr)
|
|
||||||
|
|
||||||
frostfs := newFrostFSMock()
|
|
||||||
frostfs.objects[accessKeyID] = &obj
|
|
||||||
|
|
||||||
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secret.SecretKey}
|
|
||||||
defaultSigner := v4.NewSigner()
|
|
||||||
|
|
||||||
service, region := "s3", "default"
|
|
||||||
invalidValue := "invalid-value"
|
|
||||||
|
|
||||||
bigConfig := tokens.Config{
|
|
||||||
FrostFS: frostfs,
|
|
||||||
Key: key,
|
|
||||||
CacheConfig: cfg,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
prefixes []string
|
|
||||||
request *http.Request
|
|
||||||
err bool
|
|
||||||
errCode errors.ErrorCode
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "valid sign",
|
|
||||||
prefixes: []string{addr.Container().String()},
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no authorization header",
|
|
||||||
request: func() *http.Request {
|
|
||||||
return httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid authorization header",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
r.Header.Set(AuthorizationHdr, invalidValue)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrAuthorizationHeaderMalformed,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid access key id format",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
cred := aws.Credentials{AccessKeyID: addr.Object().String(), SecretAccessKey: secret.SecretKey}
|
|
||||||
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrInvalidAccessKeyID,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "not allowed access key id",
|
|
||||||
prefixes: []string{addr.Object().String()},
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrAccessDenied,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid access key id value",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
cred := aws.Credentials{AccessKeyID: accessKeyID[:len(accessKeyID)-4], SecretAccessKey: secret.SecretKey}
|
|
||||||
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrInvalidAccessKeyID,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unknown access key id",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
cred := aws.Credentials{AccessKeyID: addr.Object().String() + "0" + addr.Container().String(), SecretAccessKey: secret.SecretKey}
|
|
||||||
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid signature",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
cred := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: "secret"}
|
|
||||||
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrSignatureDoesNotMatch,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid signature - AmzDate",
|
|
||||||
prefixes: []string{addr.Container().String()},
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
|
|
||||||
r.Header.Set(AmzDate, invalidValue)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid AmzContentSHA256",
|
|
||||||
prefixes: []string{addr.Container().String()},
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
|
|
||||||
r.Header.Set(AmzContentSHA256, invalidValue)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid presign",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
r.Header.Set(AmzExpires, "60")
|
|
||||||
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
r.URL, err = url.ParseRequestURI(signedURI)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "presign, bad X-Amz-Credential",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
query := url.Values{
|
|
||||||
AmzAlgorithm: []string{"AWS4-HMAC-SHA256"},
|
|
||||||
AmzCredential: []string{invalidValue},
|
|
||||||
}
|
|
||||||
r.URL.RawQuery = query.Encode()
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "presign, bad X-Amz-Expires",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
r.Header.Set(AmzExpires, invalidValue)
|
|
||||||
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, UnsignedPayload, service, region, time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
r.URL, err = url.ParseRequestURI(signedURI)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrMalformedExpires,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "presign, expired",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
r.Header.Set(AmzExpires, "60")
|
|
||||||
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, UnsignedPayload, service, region, time.Now().Add(-time.Minute))
|
|
||||||
require.NoError(t, err)
|
|
||||||
r.URL, err = url.ParseRequestURI(signedURI)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrExpiredPresignRequest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "presign, signature from future",
|
|
||||||
request: func() *http.Request {
|
|
||||||
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
r.Header.Set(AmzExpires, "60")
|
|
||||||
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, UnsignedPayload, service, region, time.Now().Add(time.Minute))
|
|
||||||
require.NoError(t, err)
|
|
||||||
r.URL, err = url.ParseRequestURI(signedURI)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return r
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrBadRequest,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
creds := tokens.New(bigConfig)
|
|
||||||
cntr := New(creds, tc.prefixes, ¢erSettingsMock{})
|
|
||||||
box, err := cntr.Authenticate(tc.request)
|
|
||||||
|
|
||||||
if tc.err {
|
|
||||||
require.Error(t, err)
|
|
||||||
if tc.errCode > 0 {
|
|
||||||
err = frosterr.UnwrapErr(err)
|
|
||||||
require.Equal(t, errors.GetAPIError(tc.errCode), err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, accessKeyID, box.AuthHeaders.AccessKeyID)
|
|
||||||
require.Equal(t, region, box.AuthHeaders.Region)
|
|
||||||
require.Equal(t, secret.SecretKey, box.AccessBox.Gate.SecretKey)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHTTPPostAuthenticate(t *testing.T) {
|
|
||||||
const (
|
|
||||||
policyBase64 = "eyJleHBpcmF0aW9uIjogIjIwMjUtMTItMDFUMTI6MDA6MDAuMDAwWiIsImNvbmRpdGlvbnMiOiBbCiBbInN0YXJ0cy13aXRoIiwgIiR4LWFtei1jcmVkZW50aWFsIiwgIiJdLAogWyJzdGFydHMtd2l0aCIsICIkeC1hbXotZGF0ZSIsICIiXQpdfQ=="
|
|
||||||
invalidValue = "invalid-value"
|
|
||||||
defaultFieldName = "file"
|
|
||||||
service = "s3"
|
|
||||||
region = "default"
|
|
||||||
)
|
|
||||||
|
|
||||||
key, err := keys.NewPrivateKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cfg := &cache.Config{
|
|
||||||
Size: 10,
|
|
||||||
Lifetime: 24 * time.Hour,
|
|
||||||
Logger: zaptest.NewLogger(t),
|
|
||||||
}
|
|
||||||
|
|
||||||
gateData := []*accessbox.GateData{{
|
|
||||||
BearerToken: &bearer.Token{},
|
|
||||||
GateKey: key.PublicKey(),
|
|
||||||
}}
|
|
||||||
|
|
||||||
accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"), false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
data, err := accessBox.Marshal()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var obj object.Object
|
|
||||||
obj.SetPayload(data)
|
|
||||||
addr := oidtest.Address()
|
|
||||||
obj.SetContainerID(addr.Container())
|
|
||||||
obj.SetID(addr.Object())
|
|
||||||
|
|
||||||
accessKeyID := getAccessKeyID(addr)
|
|
||||||
|
|
||||||
frostfs := newFrostFSMock()
|
|
||||||
frostfs.objects[accessKeyID] = &obj
|
|
||||||
|
|
||||||
invalidAccessKeyID := oidtest.Address().String() + "0" + oidtest.Address().Object().String()
|
|
||||||
|
|
||||||
timeToSign := time.Now()
|
|
||||||
timeToSignStr := timeToSign.Format("20060102T150405Z")
|
|
||||||
|
|
||||||
bigConfig := tokens.Config{
|
|
||||||
FrostFS: frostfs,
|
|
||||||
Key: key,
|
|
||||||
CacheConfig: cfg,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
prefixes []string
|
|
||||||
request *http.Request
|
|
||||||
err bool
|
|
||||||
errCode errors.ErrorCode
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "HTTP POST valid",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
|
||||||
}(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST valid with custom field name",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, "files")
|
|
||||||
}(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST valid with field name with a capital letter",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, "File")
|
|
||||||
}(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST invalid multipart form",
|
|
||||||
request: func() *http.Request {
|
|
||||||
req := httptest.NewRequest(http.MethodPost, "/", nil)
|
|
||||||
req.Header.Set(ContentTypeHdr, "multipart/form-data")
|
|
||||||
|
|
||||||
return req
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrInvalidArgument,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST invalid signature date time",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, invalidValue, sign, defaultFieldName)
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST invalid creds",
|
|
||||||
request: func() *http.Request {
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, invalidValue, timeToSignStr, sign, defaultFieldName)
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrAuthorizationHeaderMalformed,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST missing policy",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, "", creds, timeToSignStr, sign, defaultFieldName)
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST invalid accessKeyId",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(invalidValue, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST invalid accessKeyId - a non-existent box",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(invalidAccessKeyID, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "HTTP POST invalid signature",
|
|
||||||
request: func() *http.Request {
|
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
|
||||||
sign := SignStr(secret.SecretKey, service, region, timeToSign, invalidValue)
|
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
|
||||||
}(),
|
|
||||||
err: true,
|
|
||||||
errCode: errors.ErrSignatureDoesNotMatch,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
creds := tokens.New(bigConfig)
|
|
||||||
cntr := New(creds, tc.prefixes, ¢erSettingsMock{})
|
|
||||||
box, err := cntr.Authenticate(tc.request)
|
|
||||||
|
|
||||||
if tc.err {
|
|
||||||
require.Error(t, err)
|
|
||||||
if tc.errCode > 0 {
|
|
||||||
err = frosterr.UnwrapErr(err)
|
|
||||||
require.Equal(t, errors.GetAPIError(tc.errCode), err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, secret.SecretKey, box.AccessBox.Gate.SecretKey)
|
|
||||||
require.Equal(t, accessKeyID, box.AuthHeaders.AccessKeyID)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCredsStr(accessKeyID, timeToSign, region, service string) string {
|
|
||||||
return accessKeyID + "/" + timeToSign + "/" + region + "/" + service + "/aws4_request"
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRequestWithMultipartForm(t *testing.T, policy, creds, date, sign, fieldName string) *http.Request {
|
|
||||||
body := &bytes.Buffer{}
|
|
||||||
writer := multipart.NewWriter(body)
|
|
||||||
defer writer.Close()
|
|
||||||
|
|
||||||
err := writer.WriteField("policy", policy)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = writer.WriteField(AmzCredential, creds)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = writer.WriteField(AmzDate, date)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = writer.WriteField(AmzSignature, sign)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = writer.CreateFormFile(fieldName, "test.txt")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
req := httptest.NewRequest(http.MethodPost, "/", body)
|
|
||||||
req.Header.Set(ContentTypeHdr, writer.FormDataContentType())
|
|
||||||
|
|
||||||
return req
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAccessKeyID(addr oid.Address) string {
|
|
||||||
return strings.ReplaceAll(addr.EncodeToString(), "/", "0")
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,102 +0,0 @@
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/smithy/encoding/httpbinding"
|
|
||||||
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
|
|
||||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/v4"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type RequestData struct {
|
|
||||||
Method string
|
|
||||||
Endpoint string
|
|
||||||
Bucket string
|
|
||||||
Object string
|
|
||||||
}
|
|
||||||
|
|
||||||
type PresignData struct {
|
|
||||||
Service string
|
|
||||||
Region string
|
|
||||||
Lifetime time.Duration
|
|
||||||
SignTime time.Time
|
|
||||||
Headers map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// PresignRequest forms pre-signed request to access objects without aws credentials.
|
|
||||||
func PresignRequest(ctx context.Context, creds aws.Credentials, reqData RequestData, presignData PresignData, log *zap.Logger) (*http.Request, error) {
|
|
||||||
urlStr := fmt.Sprintf("%s/%s/%s", reqData.Endpoint, httpbinding.EscapePath(reqData.Bucket, false), httpbinding.EscapePath(reqData.Object, false))
|
|
||||||
req, err := http.NewRequest(strings.ToUpper(reqData.Method), urlStr, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create new request: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range presignData.Headers {
|
|
||||||
req.Header.Set(k, v) // maybe we should filter system header (or keep responsibility on caller)
|
|
||||||
}
|
|
||||||
req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z"))
|
|
||||||
req.Header.Set(AmzExpires, strconv.FormatFloat(presignData.Lifetime.Round(time.Second).Seconds(), 'f', 0, 64))
|
|
||||||
|
|
||||||
signer := v4.NewSigner(func(options *v4.SignerOptions) {
|
|
||||||
options.DisableURIPathEscaping = true
|
|
||||||
options.LogSigning = true
|
|
||||||
options.Logger = log
|
|
||||||
})
|
|
||||||
|
|
||||||
signedURI, _, err := signer.PresignHTTP(ctx, creds, req, presignData.Headers[AmzContentSHA256], presignData.Service, presignData.Region, presignData.SignTime)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("presign: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.URL, err = url.ParseRequestURI(signedURI); err != nil {
|
|
||||||
return nil, fmt.Errorf("parse signed URI: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PresignRequestV4a forms pre-signed request to access objects without aws credentials.
|
|
||||||
func PresignRequestV4a(cred aws.Credentials, reqData RequestData, presignData PresignData, log *zap.Logger) (*http.Request, error) {
|
|
||||||
urlStr := fmt.Sprintf("%s/%s/%s", reqData.Endpoint, httpbinding.EscapePath(reqData.Bucket, false), httpbinding.EscapePath(reqData.Object, false))
|
|
||||||
req, err := http.NewRequest(strings.ToUpper(reqData.Method), urlStr, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create new request: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range presignData.Headers {
|
|
||||||
req.Header.Set(k, v) // maybe we should filter system header (or keep responsibility on caller)
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z"))
|
|
||||||
req.Header.Set(AmzExpires, strconv.FormatFloat(presignData.Lifetime.Round(time.Second).Seconds(), 'f', 0, 64))
|
|
||||||
|
|
||||||
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
|
|
||||||
options.DisableURIPathEscaping = true
|
|
||||||
options.LogSigning = true
|
|
||||||
options.Logger = log
|
|
||||||
})
|
|
||||||
|
|
||||||
credAdapter := v4a.SymmetricCredentialAdaptor{
|
|
||||||
SymmetricProvider: credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, ""),
|
|
||||||
}
|
|
||||||
|
|
||||||
creds, err := credAdapter.RetrievePrivateKey(req.Context())
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to derive assymetric key from credentials: %w", err)
|
|
||||||
}
|
|
||||||
presignedURL, _, err := signer.PresignHTTP(req.Context(), creds, req, presignData.Headers[AmzContentSHA256], presignData.Service, []string{presignData.Region}, presignData.SignTime)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("presign: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return http.NewRequest(reqData.Method, presignedURL, nil)
|
|
||||||
}
|
|
|
@ -1,202 +0,0 @@
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
credentialsv2 "github.com/aws/aws-sdk-go-v2/credentials"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"go.uber.org/zap/zaptest"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ tokens.Credentials = (*credentialsMock)(nil)
|
|
||||||
|
|
||||||
type credentialsMock struct {
|
|
||||||
boxes map[string]*accessbox.Box
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTokensFrostfsMock() *credentialsMock {
|
|
||||||
return &credentialsMock{
|
|
||||||
boxes: make(map[string]*accessbox.Box),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m credentialsMock) addBox(addr oid.Address, box *accessbox.Box) {
|
|
||||||
m.boxes[getAccessKeyID(addr)] = box
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m credentialsMock) GetBox(_ context.Context, _ cid.ID, accessKeyID string) (*accessbox.Box, []object.Attribute, error) {
|
|
||||||
box, ok := m.boxes[accessKeyID]
|
|
||||||
if !ok {
|
|
||||||
return nil, nil, &apistatus.ObjectNotFound{}
|
|
||||||
}
|
|
||||||
|
|
||||||
return box, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m credentialsMock) Put(context.Context, tokens.CredentialsParam) (oid.Address, error) {
|
|
||||||
return oid.Address{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m credentialsMock) Update(context.Context, tokens.CredentialsParam) (oid.Address, error) {
|
|
||||||
return oid.Address{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckSign(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
var accessKeyAddr oid.Address
|
|
||||||
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
|
||||||
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
|
|
||||||
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secretKey}
|
|
||||||
|
|
||||||
reqData := RequestData{
|
|
||||||
Method: "GET",
|
|
||||||
Endpoint: "http://localhost:8084",
|
|
||||||
Bucket: "my-bucket",
|
|
||||||
Object: "@obj/name",
|
|
||||||
}
|
|
||||||
presignData := PresignData{
|
|
||||||
Service: "s3",
|
|
||||||
Region: "spb",
|
|
||||||
Lifetime: 10 * time.Minute,
|
|
||||||
SignTime: time.Now().UTC(),
|
|
||||||
Headers: map[string]string{
|
|
||||||
ContentTypeHdr: "text/plain",
|
|
||||||
AmzContentSHA256: UnsignedPayload,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := PresignRequest(ctx, awsCreds, reqData, presignData, zaptest.NewLogger(t))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
expBox := &accessbox.Box{
|
|
||||||
Gate: &accessbox.GateData{
|
|
||||||
SecretKey: secretKey,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
mock := newTokensFrostfsMock()
|
|
||||||
mock.addBox(accessKeyAddr, expBox)
|
|
||||||
|
|
||||||
c := &Center{
|
|
||||||
cli: mock,
|
|
||||||
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
|
||||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
|
||||||
settings: ¢erSettingsMock{},
|
|
||||||
}
|
|
||||||
box, err := c.Authenticate(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, expBox, box.AccessBox)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckSignV4a(t *testing.T) {
|
|
||||||
var accessKeyAddr oid.Address
|
|
||||||
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
|
||||||
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
|
|
||||||
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secretKey}
|
|
||||||
|
|
||||||
reqData := RequestData{
|
|
||||||
Method: "GET",
|
|
||||||
Endpoint: "http://localhost:8084",
|
|
||||||
Bucket: "my-bucket",
|
|
||||||
Object: "@obj/name",
|
|
||||||
}
|
|
||||||
presignData := PresignData{
|
|
||||||
Service: "s3",
|
|
||||||
Region: "spb",
|
|
||||||
Lifetime: 10 * time.Minute,
|
|
||||||
SignTime: time.Now().UTC(),
|
|
||||||
Headers: map[string]string{
|
|
||||||
ContentTypeHdr: "text/plain",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := PresignRequestV4a(awsCreds, reqData, presignData, zaptest.NewLogger(t))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
req.Header.Set(ContentTypeHdr, "text/plain")
|
|
||||||
|
|
||||||
expBox := &accessbox.Box{
|
|
||||||
Gate: &accessbox.GateData{
|
|
||||||
SecretKey: secretKey,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
mock := newTokensFrostfsMock()
|
|
||||||
mock.addBox(accessKeyAddr, expBox)
|
|
||||||
|
|
||||||
c := &Center{
|
|
||||||
cli: mock,
|
|
||||||
regV4a: NewRegexpMatcher(authorizationFieldV4aRegexp),
|
|
||||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
|
||||||
}
|
|
||||||
box, err := c.Authenticate(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, expBox, box.AccessBox)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPresignRequestV4a(t *testing.T) {
|
|
||||||
var accessKeyAddr oid.Address
|
|
||||||
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
|
||||||
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
|
|
||||||
|
|
||||||
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
|
|
||||||
options.DisableURIPathEscaping = true
|
|
||||||
options.LogSigning = true
|
|
||||||
options.Logger = zaptest.NewLogger(t)
|
|
||||||
})
|
|
||||||
|
|
||||||
credAdapter := v4a.SymmetricCredentialAdaptor{
|
|
||||||
SymmetricProvider: credentialsv2.NewStaticCredentialsProvider(accessKeyID, secretKey, ""),
|
|
||||||
}
|
|
||||||
|
|
||||||
creds, err := credAdapter.RetrievePrivateKey(context.TODO())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
signingTime := time.Now()
|
|
||||||
service := "s3"
|
|
||||||
regionSet := []string{"spb"}
|
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", "http://localhost:8084/bucket/object", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.Header.Set(AmzExpires, "600")
|
|
||||||
|
|
||||||
presignedURL, hdr, err := signer.PresignHTTP(req.Context(), creds, req, "", service, regionSet, signingTime)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
fmt.Println(presignedURL)
|
|
||||||
fmt.Println(hdr)
|
|
||||||
|
|
||||||
signature := req.URL.Query().Get(AmzSignature)
|
|
||||||
|
|
||||||
r, err := http.NewRequest("GET", presignedURL, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
query := r.URL.Query()
|
|
||||||
query.Del(AmzSignature)
|
|
||||||
r.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
err = signer.VerifyPresigned(creds, r, "", service, regionSet, signingTime, signature)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
|
@ -1,37 +0,0 @@
|
||||||
// This file is part of https://github.com/aws/smithy-go/blob/f0c6adfdec6e40bb8bb2920a40d016943b4ad762/encoding/httpbinding/path_replace.go
|
|
||||||
|
|
||||||
package httpbinding
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EscapePath escapes part of a URL path in Amazon style.
|
|
||||||
func EscapePath(path string, encodeSep bool) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
for i := 0; i < len(path); i++ {
|
|
||||||
c := path[i]
|
|
||||||
if noEscape[c] || (c == '/' && !encodeSep) {
|
|
||||||
buf.WriteByte(c)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(&buf, "%%%02X", c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
var noEscape [256]bool
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
for i := 0; i < len(noEscape); i++ {
|
|
||||||
// AWS expects every character except these to be escaped
|
|
||||||
noEscape[i] = (i >= 'A' && i <= 'Z') ||
|
|
||||||
(i >= 'a' && i <= 'z') ||
|
|
||||||
(i >= '0' && i <= '9') ||
|
|
||||||
i == '-' ||
|
|
||||||
i == '.' ||
|
|
||||||
i == '_' ||
|
|
||||||
i == '~'
|
|
||||||
}
|
|
||||||
}
|
|
87
api/auth/signer/v4/header_rules.go
Normal file
87
api/auth/signer/v4/header_rules.go
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// validator houses a set of rule needed for validation of a
|
||||||
|
// string value.
|
||||||
|
type rules []rule
|
||||||
|
|
||||||
|
// rule interface allows for more flexible rules and just simply
|
||||||
|
// checks whether or not a value adheres to that rule.
|
||||||
|
type rule interface {
|
||||||
|
IsValid(value string) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid will iterate through all rules and see if any rules
|
||||||
|
// apply to the value and supports nested rules.
|
||||||
|
func (r rules) IsValid(value string) bool {
|
||||||
|
for _, rule := range r {
|
||||||
|
if rule.IsValid(value) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// mapRule generic rule for maps.
|
||||||
|
type mapRule map[string]struct{}
|
||||||
|
|
||||||
|
// IsValid for the map rule satisfies whether it exists in the map.
|
||||||
|
func (m mapRule) IsValid(value string) bool {
|
||||||
|
_, ok := m[value]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// whitelist is a generic rule for whitelisting.
|
||||||
|
type whitelist struct {
|
||||||
|
rule
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid for whitelist checks if the value is within the whitelist.
|
||||||
|
func (w whitelist) IsValid(value string) bool {
|
||||||
|
return w.rule.IsValid(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// blacklist is a generic rule for blacklisting.
|
||||||
|
type blacklist struct {
|
||||||
|
rule
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid for whitelist checks if the value is within the whitelist.
|
||||||
|
func (b blacklist) IsValid(value string) bool {
|
||||||
|
return !b.rule.IsValid(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
type patterns []string
|
||||||
|
|
||||||
|
// IsValid for patterns checks each pattern and returns if a match has
|
||||||
|
// been found.
|
||||||
|
func (p patterns) IsValid(value string) bool {
|
||||||
|
for _, pattern := range p {
|
||||||
|
if HasPrefixFold(value, pattern) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
|
||||||
|
// under Unicode case-folding.
|
||||||
|
func HasPrefixFold(s, prefix string) bool {
|
||||||
|
return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// inclusiveRules rules allow for rules to depend on one another.
|
||||||
|
type inclusiveRules []rule
|
||||||
|
|
||||||
|
// IsValid will return true if all rules are true.
|
||||||
|
func (r inclusiveRules) IsValid(value string) bool {
|
||||||
|
for _, rule := range r {
|
||||||
|
if !rule.IsValid(value) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
7
api/auth/signer/v4/options.go
Normal file
7
api/auth/signer/v4/options.go
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
package v4
|
||||||
|
|
||||||
|
// WithUnsignedPayload will enable and set the UnsignedPayload field to
|
||||||
|
// true of the signer.
|
||||||
|
func WithUnsignedPayload(v4 *Signer) {
|
||||||
|
v4.UnsignedPayload = true
|
||||||
|
}
|
14
api/auth/signer/v4/request_context_go1.7.go
Normal file
14
api/auth/signer/v4/request_context_go1.7.go
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
//go:build go1.7
|
||||||
|
// +build go1.7
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
)
|
||||||
|
|
||||||
|
func requestContext(r *http.Request) aws.Context {
|
||||||
|
return r.Context()
|
||||||
|
}
|
63
api/auth/signer/v4/stream.go
Normal file
63
api/auth/signer/v4/stream.go
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/hex"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
)
|
||||||
|
|
||||||
|
type credentialValueProvider interface {
|
||||||
|
Get() (credentials.Value, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StreamSigner implements signing of event stream encoded payloads.
|
||||||
|
type StreamSigner struct {
|
||||||
|
region string
|
||||||
|
service string
|
||||||
|
|
||||||
|
credentials credentialValueProvider
|
||||||
|
|
||||||
|
prevSig []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages.
|
||||||
|
func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner {
|
||||||
|
return &StreamSigner{
|
||||||
|
region: region,
|
||||||
|
service: service,
|
||||||
|
credentials: credentials,
|
||||||
|
prevSig: seedSignature,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSignature takes an event stream encoded headers and payload and returns a signature.
|
||||||
|
func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) {
|
||||||
|
credValue, err := s.credentials.Get()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date)
|
||||||
|
|
||||||
|
keyPath := buildSigningScope(s.region, s.service, date)
|
||||||
|
|
||||||
|
stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date)
|
||||||
|
|
||||||
|
signature := hmacSHA256(sigKey, []byte(stringToSign))
|
||||||
|
s.prevSig = signature
|
||||||
|
|
||||||
|
return signature, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string {
|
||||||
|
return strings.Join([]string{
|
||||||
|
"AWS4-HMAC-SHA256-PAYLOAD",
|
||||||
|
formatTime(date),
|
||||||
|
scope,
|
||||||
|
hex.EncodeToString(prevSig),
|
||||||
|
hex.EncodeToString(hashSHA256(headers)),
|
||||||
|
hex.EncodeToString(hashSHA256(payload)),
|
||||||
|
}, "\n")
|
||||||
|
}
|
25
api/auth/signer/v4/uri_path.go
Normal file
25
api/auth/signer/v4/uri_path.go
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
//go:build go1.5
|
||||||
|
// +build go1.5
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getURIPath(u *url.URL) string {
|
||||||
|
var uri string
|
||||||
|
|
||||||
|
if len(u.Opaque) > 0 {
|
||||||
|
uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
|
||||||
|
} else {
|
||||||
|
uri = u.EscapedPath()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(uri) == 0 {
|
||||||
|
uri = "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
return uri
|
||||||
|
}
|
856
api/auth/signer/v4/v4.go
Normal file
856
api/auth/signer/v4/v4.go
Normal file
|
@ -0,0 +1,856 @@
|
||||||
|
// Package v4 implements signing for AWS V4 signer
|
||||||
|
//
|
||||||
|
// Provides request signing for request that need to be signed with
|
||||||
|
// AWS V4 Signatures.
|
||||||
|
//
|
||||||
|
// # Standalone Signer
|
||||||
|
//
|
||||||
|
// Generally using the signer outside of the SDK should not require any additional
|
||||||
|
// logic when using Go v1.5 or higher. The signer does this by taking advantage
|
||||||
|
// of the URL.EscapedPath method. If your request URI requires additional escaping
|
||||||
|
// you many need to use the URL.Opaque to define what the raw URI should be sent
|
||||||
|
// to the service as.
|
||||||
|
//
|
||||||
|
// The signer will first check the URL.Opaque field, and use its value if set.
|
||||||
|
// The signer does require the URL.Opaque field to be set in the form of:
|
||||||
|
//
|
||||||
|
// "//<hostname>/<path>"
|
||||||
|
//
|
||||||
|
// // e.g.
|
||||||
|
// "//example.com/some/path"
|
||||||
|
//
|
||||||
|
// The leading "//" and hostname are required or the URL.Opaque escaping will
|
||||||
|
// not work correctly.
|
||||||
|
//
|
||||||
|
// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
|
||||||
|
// method and using the returned value. If you're using Go v1.4 you must set
|
||||||
|
// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
|
||||||
|
// Go v1.5 the signer will fallback to URL.Path.
|
||||||
|
//
|
||||||
|
// AWS v4 signature validation requires that the canonical string's URI path
|
||||||
|
// element must be the URI escaped form of the HTTP request's path.
|
||||||
|
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
||||||
|
//
|
||||||
|
// The Go HTTP client will perform escaping automatically on the request. Some
|
||||||
|
// of these escaping may cause signature validation errors because the HTTP
|
||||||
|
// request differs from the URI path or query that the signature was generated.
|
||||||
|
// https://golang.org/pkg/net/url/#URL.EscapedPath
|
||||||
|
//
|
||||||
|
// Because of this, it is recommended that when using the signer outside of the
|
||||||
|
// SDK that explicitly escaping the request prior to being signed is preferable,
|
||||||
|
// and will help prevent signature validation errors. This can be done by setting
|
||||||
|
// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
|
||||||
|
// call URL.EscapedPath() if Opaque is not set.
|
||||||
|
//
|
||||||
|
// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
|
||||||
|
// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
|
||||||
|
// request URL. https://github.com/golang/go/issues/16847 points to a bug in
|
||||||
|
// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP
|
||||||
|
// message. URL.Opaque generally will force Go to make requests with absolute URL.
|
||||||
|
// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
|
||||||
|
// or url.EscapedPath will ignore the RawPath escaping.
|
||||||
|
//
|
||||||
|
// Test `TestStandaloneSign` provides a complete example of using the signer
|
||||||
|
// outside of the SDK and pre-escaping the URI path.
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
authorizationHeader = "Authorization"
|
||||||
|
authHeaderSignatureElem = "Signature="
|
||||||
|
signatureQueryKey = "X-Amz-Signature"
|
||||||
|
|
||||||
|
authHeaderPrefix = "AWS4-HMAC-SHA256"
|
||||||
|
timeFormat = "20060102T150405Z"
|
||||||
|
shortTimeFormat = "20060102"
|
||||||
|
awsV4Request = "aws4_request"
|
||||||
|
|
||||||
|
// emptyStringSHA256 is a SHA256 of an empty string.
|
||||||
|
emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
|
||||||
|
)
|
||||||
|
|
||||||
|
var ignoredPresignHeaders = rules{
|
||||||
|
blacklist{
|
||||||
|
mapRule{
|
||||||
|
authorizationHeader: struct{}{},
|
||||||
|
"User-Agent": struct{}{},
|
||||||
|
"X-Amzn-Trace-Id": struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// drop User-Agent header to be compatible with aws sdk java v1.
|
||||||
|
var ignoredHeaders = rules{
|
||||||
|
blacklist{
|
||||||
|
mapRule{
|
||||||
|
authorizationHeader: struct{}{},
|
||||||
|
"X-Amzn-Trace-Id": struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// requiredSignedHeaders is a whitelist for build canonical headers.
|
||||||
|
var requiredSignedHeaders = rules{
|
||||||
|
whitelist{
|
||||||
|
mapRule{
|
||||||
|
"Cache-Control": struct{}{},
|
||||||
|
"Content-Disposition": struct{}{},
|
||||||
|
"Content-Encoding": struct{}{},
|
||||||
|
"Content-Language": struct{}{},
|
||||||
|
"Content-Md5": struct{}{},
|
||||||
|
"Content-Type": struct{}{},
|
||||||
|
"Expires": struct{}{},
|
||||||
|
"If-Match": struct{}{},
|
||||||
|
"If-Modified-Since": struct{}{},
|
||||||
|
"If-None-Match": struct{}{},
|
||||||
|
"If-Unmodified-Since": struct{}{},
|
||||||
|
"Range": struct{}{},
|
||||||
|
"X-Amz-Acl": struct{}{},
|
||||||
|
"X-Amz-Copy-Source": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-Match": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Range": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||||
|
"X-Amz-Grant-Full-control": struct{}{},
|
||||||
|
"X-Amz-Grant-Read": struct{}{},
|
||||||
|
"X-Amz-Grant-Read-Acp": struct{}{},
|
||||||
|
"X-Amz-Grant-Write": struct{}{},
|
||||||
|
"X-Amz-Grant-Write-Acp": struct{}{},
|
||||||
|
"X-Amz-Metadata-Directive": struct{}{},
|
||||||
|
"X-Amz-Mfa": struct{}{},
|
||||||
|
"X-Amz-Request-Payer": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||||
|
"X-Amz-Storage-Class": struct{}{},
|
||||||
|
"X-Amz-Tagging": struct{}{},
|
||||||
|
"X-Amz-Website-Redirect-Location": struct{}{},
|
||||||
|
"X-Amz-Content-Sha256": struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
patterns{"X-Amz-Meta-"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// allowedHoisting is a whitelist for build query headers. The boolean value
|
||||||
|
// represents whether or not it is a pattern.
|
||||||
|
var allowedQueryHoisting = inclusiveRules{
|
||||||
|
blacklist{requiredSignedHeaders},
|
||||||
|
patterns{"X-Amz-"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signer applies AWS v4 signing to given request. Use this to sign requests
|
||||||
|
// that need to be signed with AWS V4 Signatures.
|
||||||
|
type Signer struct {
|
||||||
|
// The authentication credentials the request will be signed against.
|
||||||
|
// This value must be set to sign requests.
|
||||||
|
Credentials *credentials.Credentials
|
||||||
|
|
||||||
|
// Sets the log level the signer should use when reporting information to
|
||||||
|
// the logger. If the logger is nil nothing will be logged. See
|
||||||
|
// aws.LogLevelType for more information on available logging levels
|
||||||
|
//
|
||||||
|
// By default nothing will be logged.
|
||||||
|
Debug aws.LogLevelType
|
||||||
|
|
||||||
|
// The logger loging information will be written to. If there the logger
|
||||||
|
// is nil, nothing will be logged.
|
||||||
|
Logger aws.Logger
|
||||||
|
|
||||||
|
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
|
||||||
|
// request header to the request's query string. This is most commonly used
|
||||||
|
// with pre-signed requests preventing headers from being added to the
|
||||||
|
// request's query string.
|
||||||
|
DisableHeaderHoisting bool
|
||||||
|
|
||||||
|
// Disables the automatic escaping of the URI path of the request for the
|
||||||
|
// siganture's canonical string's path. For services that do not need additional
|
||||||
|
// escaping then use this to disable the signer escaping the path.
|
||||||
|
//
|
||||||
|
// S3 is an example of a service that does not need additional escaping.
|
||||||
|
//
|
||||||
|
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
||||||
|
DisableURIPathEscaping bool
|
||||||
|
|
||||||
|
// Disables the automatical setting of the HTTP request's Body field with the
|
||||||
|
// io.ReadSeeker passed in to the signer. This is useful if you're using a
|
||||||
|
// custom wrapper around the body for the io.ReadSeeker and want to preserve
|
||||||
|
// the Body value on the Request.Body.
|
||||||
|
//
|
||||||
|
// This does run the risk of signing a request with a body that will not be
|
||||||
|
// sent in the request. Need to ensure that the underlying data of the Body
|
||||||
|
// values are the same.
|
||||||
|
DisableRequestBodyOverwrite bool
|
||||||
|
|
||||||
|
// currentTimeFn returns the time value which represents the current time.
|
||||||
|
// This value should only be used for testing. If it is nil the default
|
||||||
|
// time.Now will be used.
|
||||||
|
currentTimeFn func() time.Time
|
||||||
|
|
||||||
|
// UnsignedPayload will prevent signing of the payload. This will only
|
||||||
|
// work for services that have support for this.
|
||||||
|
UnsignedPayload bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSigner returns a Signer pointer configured with the credentials and optional
|
||||||
|
// option values provided. If not options are provided the Signer will use its
|
||||||
|
// default configuration.
|
||||||
|
func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
|
||||||
|
v4 := &Signer{
|
||||||
|
Credentials: credentials,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(v4)
|
||||||
|
}
|
||||||
|
|
||||||
|
return v4
|
||||||
|
}
|
||||||
|
|
||||||
|
type signingCtx struct {
|
||||||
|
ServiceName string
|
||||||
|
Region string
|
||||||
|
Request *http.Request
|
||||||
|
Body io.ReadSeeker
|
||||||
|
Query url.Values
|
||||||
|
Time time.Time
|
||||||
|
ExpireTime time.Duration
|
||||||
|
SignedHeaderVals http.Header
|
||||||
|
|
||||||
|
DisableURIPathEscaping bool
|
||||||
|
|
||||||
|
credValues credentials.Value
|
||||||
|
isPresign bool
|
||||||
|
unsignedPayload bool
|
||||||
|
|
||||||
|
bodyDigest string
|
||||||
|
signedHeaders string
|
||||||
|
canonicalHeaders string
|
||||||
|
canonicalString string
|
||||||
|
credentialString string
|
||||||
|
stringToSign string
|
||||||
|
signature string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign signs AWS v4 requests with the provided body, service name, region the
|
||||||
|
// request is made to, and time the request is signed at. The signTime allows
|
||||||
|
// you to specify that a request is signed for the future, and cannot be
|
||||||
|
// used until then.
|
||||||
|
//
|
||||||
|
// Returns a list of HTTP headers that were included in the signature or an
|
||||||
|
// error if signing the request failed. Generally for signed requests this value
|
||||||
|
// is not needed as the full request context will be captured by the http.Request
|
||||||
|
// value. It is included for reference though.
|
||||||
|
//
|
||||||
|
// Sign will set the request's Body to be the `body` parameter passed in. If
|
||||||
|
// the body is not already an io.ReadCloser, it will be wrapped within one. If
|
||||||
|
// a `nil` body parameter passed to Sign, the request's Body field will be
|
||||||
|
// also set to nil. Its important to note that this functionality will not
|
||||||
|
// change the request's ContentLength of the request.
|
||||||
|
//
|
||||||
|
// Sign differs from Presign in that it will sign the request using HTTP
|
||||||
|
// header values. This type of signing is intended for http.Request values that
|
||||||
|
// will not be shared, or are shared in a way the header values on the request
|
||||||
|
// will not be lost.
|
||||||
|
//
|
||||||
|
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
|
||||||
|
// generated. To bypass the signer computing the hash you can set the
|
||||||
|
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
|
||||||
|
// only compute the hash if the request header value is empty.
|
||||||
|
func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
|
||||||
|
return v4.signWithBody(r, body, service, region, 0, false, signTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Presign signs AWS v4 requests with the provided body, service name, region
|
||||||
|
// the request is made to, and time the request is signed at. The signTime
|
||||||
|
// allows you to specify that a request is signed for the future, and cannot
|
||||||
|
// be used until then.
|
||||||
|
//
|
||||||
|
// Returns a list of HTTP headers that were included in the signature or an
|
||||||
|
// error if signing the request failed. For presigned requests these headers
|
||||||
|
// and their values must be included on the HTTP request when it is made. This
|
||||||
|
// is helpful to know what header values need to be shared with the party the
|
||||||
|
// presigned request will be distributed to.
|
||||||
|
//
|
||||||
|
// Presign differs from Sign in that it will sign the request using query string
|
||||||
|
// instead of header values. This allows you to share the Presigned Request's
|
||||||
|
// URL with third parties, or distribute it throughout your system with minimal
|
||||||
|
// dependencies.
|
||||||
|
//
|
||||||
|
// Presign also takes an exp value which is the duration the
|
||||||
|
// signed request will be valid after the signing time. This is allows you to
|
||||||
|
// set when the request will expire.
|
||||||
|
//
|
||||||
|
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
|
||||||
|
// generated. To bypass the signer computing the hash you can set the
|
||||||
|
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
|
||||||
|
// only compute the hash if the request header value is empty.
|
||||||
|
//
|
||||||
|
// Presigning a S3 request will not compute the body's SHA256 hash by default.
|
||||||
|
// This is done due to the general use case for S3 presigned URLs is to share
|
||||||
|
// PUT/GET capabilities. If you would like to include the body's SHA256 in the
|
||||||
|
// presigned request's signature you can set the "X-Amz-Content-Sha256"
|
||||||
|
// HTTP header and that will be included in the request's signature.
|
||||||
|
func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
|
||||||
|
return v4.signWithBody(r, body, service, region, exp, true, signTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) {
|
||||||
|
currentTimeFn := v4.currentTimeFn
|
||||||
|
if currentTimeFn == nil {
|
||||||
|
currentTimeFn = time.Now
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := &signingCtx{
|
||||||
|
Request: r,
|
||||||
|
Body: body,
|
||||||
|
Query: r.URL.Query(),
|
||||||
|
Time: signTime,
|
||||||
|
ExpireTime: exp,
|
||||||
|
isPresign: isPresign,
|
||||||
|
ServiceName: service,
|
||||||
|
Region: region,
|
||||||
|
DisableURIPathEscaping: v4.DisableURIPathEscaping,
|
||||||
|
unsignedPayload: v4.UnsignedPayload,
|
||||||
|
}
|
||||||
|
|
||||||
|
for key := range ctx.Query {
|
||||||
|
sort.Strings(ctx.Query[key])
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.isRequestSigned() {
|
||||||
|
ctx.Time = currentTimeFn()
|
||||||
|
ctx.handlePresignRemoval()
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r))
|
||||||
|
if err != nil {
|
||||||
|
return http.Header{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.sanitizeHostForHeader()
|
||||||
|
ctx.assignAmzQueryValues()
|
||||||
|
if err := ctx.build(v4.DisableHeaderHoisting); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the request is not presigned the body should be attached to it. This
|
||||||
|
// prevents the confusion of wanting to send a signed request without
|
||||||
|
// the body the request was signed for attached.
|
||||||
|
if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
|
||||||
|
var reader io.ReadCloser
|
||||||
|
if body != nil {
|
||||||
|
var ok bool
|
||||||
|
if reader, ok = body.(io.ReadCloser); !ok {
|
||||||
|
reader = io.NopCloser(body)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.Body = reader
|
||||||
|
}
|
||||||
|
|
||||||
|
if v4.Debug.Matches(aws.LogDebugWithSigning) {
|
||||||
|
v4.logSigningInfo(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx.SignedHeaderVals, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) sanitizeHostForHeader() {
|
||||||
|
request.SanitizeHostForHeader(ctx.Request)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) handlePresignRemoval() {
|
||||||
|
if !ctx.isPresign {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// The credentials have expired for this request. The current signing
|
||||||
|
// is invalid, and needs to be request because the request will fail.
|
||||||
|
ctx.removePresign()
|
||||||
|
|
||||||
|
// Update the request's query string to ensure the values stays in
|
||||||
|
// sync in the case retrieving the new credentials fails.
|
||||||
|
ctx.Request.URL.RawQuery = ctx.Query.Encode()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) assignAmzQueryValues() {
|
||||||
|
if ctx.isPresign {
|
||||||
|
ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
|
||||||
|
if ctx.credValues.SessionToken != "" {
|
||||||
|
ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
|
||||||
|
} else {
|
||||||
|
ctx.Query.Del("X-Amz-Security-Token")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.credValues.SessionToken != "" {
|
||||||
|
ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignRequestHandler is a named request handler the SDK will use to sign
|
||||||
|
// service client request with using the V4 signature.
|
||||||
|
var SignRequestHandler = request.NamedHandler{
|
||||||
|
Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignSDKRequest signs an AWS request with the V4 signature. This
|
||||||
|
// request handler should only be used with the SDK's built in service client's
|
||||||
|
// API operation requests.
|
||||||
|
//
|
||||||
|
// This function should not be used on its on its own, but in conjunction with
|
||||||
|
// an AWS service client's API operation call. To sign a standalone request
|
||||||
|
// not created by a service client's API operation method use the "Sign" or
|
||||||
|
// "Presign" functions of the "Signer" type.
|
||||||
|
//
|
||||||
|
// If the credentials of the request's config are set to
|
||||||
|
// credentials.AnonymousCredentials the request will not be signed.
|
||||||
|
func SignSDKRequest(req *request.Request) {
|
||||||
|
SignSDKRequestWithCurrentTime(req, time.Now)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildNamedHandler will build a generic handler for signing.
|
||||||
|
func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler {
|
||||||
|
return request.NamedHandler{
|
||||||
|
Name: name,
|
||||||
|
Fn: func(req *request.Request) {
|
||||||
|
SignSDKRequestWithCurrentTime(req, time.Now, opts...)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignSDKRequestWithCurrentTime will sign the SDK's request using the time
|
||||||
|
// function passed in. Behaves the same as SignSDKRequest with the exception
|
||||||
|
// the request is signed with the value returned by the current time function.
|
||||||
|
func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
|
||||||
|
// If the request does not need to be signed ignore the signing of the
|
||||||
|
// request if the AnonymousCredentials object is used.
|
||||||
|
if req.Config.Credentials == credentials.AnonymousCredentials {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
region := req.ClientInfo.SigningRegion
|
||||||
|
if region == "" {
|
||||||
|
region = aws.StringValue(req.Config.Region)
|
||||||
|
}
|
||||||
|
|
||||||
|
name := req.ClientInfo.SigningName
|
||||||
|
if name == "" {
|
||||||
|
name = req.ClientInfo.ServiceName
|
||||||
|
}
|
||||||
|
|
||||||
|
v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
|
||||||
|
v4.Debug = req.Config.LogLevel.Value()
|
||||||
|
v4.Logger = req.Config.Logger
|
||||||
|
v4.DisableHeaderHoisting = req.NotHoist
|
||||||
|
v4.currentTimeFn = curTimeFn
|
||||||
|
if name == "s3" {
|
||||||
|
// S3 service should not have any escaping applied
|
||||||
|
v4.DisableURIPathEscaping = true
|
||||||
|
}
|
||||||
|
// Prevents setting the HTTPRequest's Body. Since the Body could be
|
||||||
|
// wrapped in a custom io.Closer that we do not want to be stompped
|
||||||
|
// on top of by the signer.
|
||||||
|
v4.DisableRequestBodyOverwrite = true
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(v4)
|
||||||
|
}
|
||||||
|
|
||||||
|
curTime := curTimeFn()
|
||||||
|
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
|
||||||
|
name, region, req.ExpireTime, req.ExpireTime > 0, curTime,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
req.Error = err
|
||||||
|
req.SignedHeaderVals = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
req.SignedHeaderVals = signedHeaders
|
||||||
|
req.LastSignedAt = curTime
|
||||||
|
}
|
||||||
|
|
||||||
|
const logSignInfoMsg = `DEBUG: Request Signature:
|
||||||
|
---[ CANONICAL STRING ]-----------------------------
|
||||||
|
%s
|
||||||
|
---[ STRING TO SIGN ]--------------------------------
|
||||||
|
%s%s
|
||||||
|
-----------------------------------------------------`
|
||||||
|
const logSignedURLMsg = `
|
||||||
|
---[ SIGNED URL ]------------------------------------
|
||||||
|
%s`
|
||||||
|
|
||||||
|
func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
|
||||||
|
signedURLMsg := ""
|
||||||
|
if ctx.isPresign {
|
||||||
|
signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
|
||||||
|
}
|
||||||
|
msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
|
||||||
|
v4.Logger.Log(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) build(disableHeaderHoisting bool) error {
|
||||||
|
ctx.buildTime() // no depends
|
||||||
|
ctx.buildCredentialString() // no depends
|
||||||
|
|
||||||
|
if err := ctx.buildBodyDigest(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
unsignedHeaders := ctx.Request.Header
|
||||||
|
if ctx.isPresign {
|
||||||
|
if !disableHeaderHoisting {
|
||||||
|
var urlValues url.Values
|
||||||
|
urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
|
||||||
|
for k := range urlValues {
|
||||||
|
ctx.Query[k] = urlValues[k]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.isPresign {
|
||||||
|
ctx.buildCanonicalHeaders(ignoredPresignHeaders, unsignedHeaders)
|
||||||
|
} else {
|
||||||
|
ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
|
||||||
|
}
|
||||||
|
ctx.buildCanonicalString() // depends on canon headers / signed headers
|
||||||
|
ctx.buildStringToSign() // depends on canon string
|
||||||
|
ctx.buildSignature() // depends on string to sign
|
||||||
|
|
||||||
|
if ctx.isPresign {
|
||||||
|
ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature
|
||||||
|
} else {
|
||||||
|
parts := []string{
|
||||||
|
authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
|
||||||
|
"SignedHeaders=" + ctx.signedHeaders,
|
||||||
|
authHeaderSignatureElem + ctx.signature,
|
||||||
|
}
|
||||||
|
ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSignedRequestSignature attempts to extract the signature of the request.
|
||||||
|
// Returning an error if the request is unsigned, or unable to extract the
|
||||||
|
// signature.
|
||||||
|
func GetSignedRequestSignature(r *http.Request) ([]byte, error) {
|
||||||
|
if auth := r.Header.Get(authorizationHeader); len(auth) != 0 {
|
||||||
|
ps := strings.Split(auth, ", ")
|
||||||
|
for _, p := range ps {
|
||||||
|
if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 {
|
||||||
|
sig := p[len(authHeaderSignatureElem):]
|
||||||
|
if len(sig) == 0 {
|
||||||
|
return nil, fmt.Errorf("invalid request signature authorization header")
|
||||||
|
}
|
||||||
|
return hex.DecodeString(sig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 {
|
||||||
|
return hex.DecodeString(sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("request not signed")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildTime() {
|
||||||
|
if ctx.isPresign {
|
||||||
|
duration := int64(ctx.ExpireTime / time.Second)
|
||||||
|
ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time))
|
||||||
|
ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
|
||||||
|
} else {
|
||||||
|
ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildCredentialString() {
|
||||||
|
ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time)
|
||||||
|
|
||||||
|
if ctx.isPresign {
|
||||||
|
ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
|
||||||
|
query := url.Values{}
|
||||||
|
unsignedHeaders := http.Header{}
|
||||||
|
for k, h := range header {
|
||||||
|
if r.IsValid(k) {
|
||||||
|
query[k] = h
|
||||||
|
} else {
|
||||||
|
unsignedHeaders[k] = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return query, unsignedHeaders
|
||||||
|
}
|
||||||
|
func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
|
||||||
|
var headers []string
|
||||||
|
headers = append(headers, "host")
|
||||||
|
for k, v := range header {
|
||||||
|
if !r.IsValid(k) {
|
||||||
|
continue // ignored header
|
||||||
|
}
|
||||||
|
if ctx.SignedHeaderVals == nil {
|
||||||
|
ctx.SignedHeaderVals = make(http.Header)
|
||||||
|
}
|
||||||
|
|
||||||
|
lowerCaseKey := strings.ToLower(k)
|
||||||
|
if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
|
||||||
|
// include additional values
|
||||||
|
ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
headers = append(headers, lowerCaseKey)
|
||||||
|
ctx.SignedHeaderVals[lowerCaseKey] = v
|
||||||
|
}
|
||||||
|
sort.Strings(headers)
|
||||||
|
|
||||||
|
ctx.signedHeaders = strings.Join(headers, ";")
|
||||||
|
|
||||||
|
if ctx.isPresign {
|
||||||
|
ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
|
||||||
|
}
|
||||||
|
|
||||||
|
headerValues := make([]string, len(headers))
|
||||||
|
for i, k := range headers {
|
||||||
|
if k == "host" {
|
||||||
|
if ctx.Request.Host != "" {
|
||||||
|
headerValues[i] = "host:" + ctx.Request.Host
|
||||||
|
} else {
|
||||||
|
headerValues[i] = "host:" + ctx.Request.URL.Host
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
headerValues[i] = k + ":" +
|
||||||
|
strings.Join(ctx.SignedHeaderVals[k], ",")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stripExcessSpaces(headerValues)
|
||||||
|
ctx.canonicalHeaders = strings.Join(headerValues, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildCanonicalString() {
|
||||||
|
ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
|
||||||
|
|
||||||
|
uri := getURIPath(ctx.Request.URL)
|
||||||
|
|
||||||
|
if !ctx.DisableURIPathEscaping {
|
||||||
|
uri = rest.EscapePath(uri, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.canonicalString = strings.Join([]string{
|
||||||
|
ctx.Request.Method,
|
||||||
|
uri,
|
||||||
|
ctx.Request.URL.RawQuery,
|
||||||
|
ctx.canonicalHeaders + "\n",
|
||||||
|
ctx.signedHeaders,
|
||||||
|
ctx.bodyDigest,
|
||||||
|
}, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildStringToSign() {
|
||||||
|
ctx.stringToSign = strings.Join([]string{
|
||||||
|
authHeaderPrefix,
|
||||||
|
formatTime(ctx.Time),
|
||||||
|
ctx.credentialString,
|
||||||
|
hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))),
|
||||||
|
}, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildSignature() {
|
||||||
|
creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time)
|
||||||
|
signature := hmacSHA256(creds, []byte(ctx.stringToSign))
|
||||||
|
ctx.signature = hex.EncodeToString(signature)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildBodyDigest() error {
|
||||||
|
hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
|
||||||
|
if hash == "" {
|
||||||
|
includeSHA256Header := ctx.unsignedPayload ||
|
||||||
|
ctx.ServiceName == "s3" ||
|
||||||
|
ctx.ServiceName == "glacier"
|
||||||
|
|
||||||
|
s3Presign := ctx.isPresign && ctx.ServiceName == "s3"
|
||||||
|
|
||||||
|
if ctx.unsignedPayload || s3Presign {
|
||||||
|
hash = "UNSIGNED-PAYLOAD"
|
||||||
|
includeSHA256Header = !s3Presign
|
||||||
|
} else if ctx.Body == nil {
|
||||||
|
hash = emptyStringSHA256
|
||||||
|
} else {
|
||||||
|
if !aws.IsReaderSeekable(ctx.Body) {
|
||||||
|
return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
|
||||||
|
}
|
||||||
|
hashBytes, err := makeSha256Reader(ctx.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hash = hex.EncodeToString(hashBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
if includeSHA256Header {
|
||||||
|
ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctx.bodyDigest = hash
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isRequestSigned returns if the request is currently signed or presigned.
|
||||||
|
func (ctx *signingCtx) isRequestSigned() bool {
|
||||||
|
if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if ctx.Request.Header.Get("Authorization") != "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsign removes signing flags for both signed and presigned requests.
|
||||||
|
func (ctx *signingCtx) removePresign() {
|
||||||
|
ctx.Query.Del("X-Amz-Algorithm")
|
||||||
|
ctx.Query.Del("X-Amz-Signature")
|
||||||
|
ctx.Query.Del("X-Amz-Security-Token")
|
||||||
|
ctx.Query.Del("X-Amz-Date")
|
||||||
|
ctx.Query.Del("X-Amz-Expires")
|
||||||
|
ctx.Query.Del("X-Amz-Credential")
|
||||||
|
ctx.Query.Del("X-Amz-SignedHeaders")
|
||||||
|
}
|
||||||
|
|
||||||
|
func hmacSHA256(key []byte, data []byte) []byte {
|
||||||
|
hash := hmac.New(sha256.New, key)
|
||||||
|
hash.Write(data)
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func hashSHA256(data []byte) []byte {
|
||||||
|
hash := sha256.New()
|
||||||
|
hash.Write(data)
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) {
|
||||||
|
hash := sha256.New()
|
||||||
|
start, err := reader.Seek(0, io.SeekCurrent)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
// ensure error is return if unable to seek back to start of payload.
|
||||||
|
_, err = reader.Seek(start, io.SeekStart)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
|
||||||
|
// smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
|
||||||
|
size, err := aws.SeekerLen(reader)
|
||||||
|
if err != nil {
|
||||||
|
_, _ = io.Copy(hash, reader)
|
||||||
|
} else {
|
||||||
|
_, _ = io.CopyN(hash, reader, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
return hash.Sum(nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const doubleSpace = " "
|
||||||
|
|
||||||
|
// stripExcessSpaces will rewrite the passed in slice's string values to not
|
||||||
|
// contain multiple side-by-side spaces.
|
||||||
|
func stripExcessSpaces(vals []string) {
|
||||||
|
var j, k, l, m, spaces int
|
||||||
|
for i, str := range vals {
|
||||||
|
// Trim trailing spaces
|
||||||
|
for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trim leading spaces
|
||||||
|
for k = 0; k < j && str[k] == ' '; k++ {
|
||||||
|
}
|
||||||
|
str = str[k : j+1]
|
||||||
|
|
||||||
|
// Strip multiple spaces.
|
||||||
|
j = strings.Index(str, doubleSpace)
|
||||||
|
if j < 0 {
|
||||||
|
vals[i] = str
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := []byte(str)
|
||||||
|
for k, m, l = j, j, len(buf); k < l; k++ {
|
||||||
|
if buf[k] == ' ' {
|
||||||
|
if spaces == 0 {
|
||||||
|
// First space.
|
||||||
|
buf[m] = buf[k]
|
||||||
|
m++
|
||||||
|
}
|
||||||
|
spaces++
|
||||||
|
} else {
|
||||||
|
// End of multiple spaces.
|
||||||
|
spaces = 0
|
||||||
|
buf[m] = buf[k]
|
||||||
|
m++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
vals[i] = string(buf[:m])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildSigningScope(region, service string, dt time.Time) string {
|
||||||
|
return strings.Join([]string{
|
||||||
|
formatShortTime(dt),
|
||||||
|
region,
|
||||||
|
service,
|
||||||
|
awsV4Request,
|
||||||
|
}, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte {
|
||||||
|
hmacDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt)))
|
||||||
|
hmacRegion := hmacSHA256(hmacDate, []byte(region))
|
||||||
|
hmacService := hmacSHA256(hmacRegion, []byte(service))
|
||||||
|
signingKey := hmacSHA256(hmacService, []byte(awsV4Request))
|
||||||
|
return signingKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatShortTime(dt time.Time) string {
|
||||||
|
return dt.UTC().Format(shortTimeFormat)
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatTime(dt time.Time) string {
|
||||||
|
return dt.UTC().Format(timeFormat)
|
||||||
|
}
|
|
@ -1,144 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/credentials.go
|
|
||||||
// with changes:
|
|
||||||
// * use `time.Now()` instead of `sdk.NowTime()`
|
|
||||||
|
|
||||||
package v4a
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Credentials is Context, ECDSA, and Optional Session Token that can be used
|
|
||||||
// to sign requests using SigV4a
|
|
||||||
type Credentials struct {
|
|
||||||
Context string
|
|
||||||
PrivateKey *ecdsa.PrivateKey
|
|
||||||
SessionToken string
|
|
||||||
|
|
||||||
// Time the credentials will expire.
|
|
||||||
CanExpire bool
|
|
||||||
Expires time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expired returns if the credentials have expired.
|
|
||||||
func (v Credentials) Expired() bool {
|
|
||||||
if v.CanExpire {
|
|
||||||
return !v.Expires.After(time.Now())
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasKeys returns if the credentials keys are set.
|
|
||||||
func (v Credentials) HasKeys() bool {
|
|
||||||
return len(v.Context) > 0 && v.PrivateKey != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SymmetricCredentialAdaptor wraps a SigV4 AccessKey/SecretKey provider and adapts the credentials
|
|
||||||
// to a ECDSA PrivateKey for signing with SiV4a
|
|
||||||
type SymmetricCredentialAdaptor struct {
|
|
||||||
SymmetricProvider aws.CredentialsProvider
|
|
||||||
|
|
||||||
asymmetric atomic.Value
|
|
||||||
m sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve retrieves symmetric credentials from the underlying provider.
|
|
||||||
func (s *SymmetricCredentialAdaptor) Retrieve(ctx context.Context) (aws.Credentials, error) {
|
|
||||||
symCreds, err := s.retrieveFromSymmetricProvider(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return aws.Credentials{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if asymCreds := s.getCreds(); asymCreds == nil {
|
|
||||||
return symCreds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.m.Lock()
|
|
||||||
defer s.m.Unlock()
|
|
||||||
|
|
||||||
asymCreds := s.getCreds()
|
|
||||||
if asymCreds == nil {
|
|
||||||
return symCreds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the context does not match the access key id clear it
|
|
||||||
if asymCreds.Context != symCreds.AccessKeyID {
|
|
||||||
s.asymmetric.Store((*Credentials)(nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
return symCreds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RetrievePrivateKey returns credentials suitable for SigV4a signing
|
|
||||||
func (s *SymmetricCredentialAdaptor) RetrievePrivateKey(ctx context.Context) (Credentials, error) {
|
|
||||||
if asymCreds := s.getCreds(); asymCreds != nil {
|
|
||||||
return *asymCreds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.m.Lock()
|
|
||||||
defer s.m.Unlock()
|
|
||||||
|
|
||||||
if asymCreds := s.getCreds(); asymCreds != nil {
|
|
||||||
return *asymCreds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
symmetricCreds, err := s.retrieveFromSymmetricProvider(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return Credentials{}, fmt.Errorf("failed to retrieve symmetric credentials: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
privateKey, err := deriveKeyFromAccessKeyPair(symmetricCreds.AccessKeyID, symmetricCreds.SecretAccessKey)
|
|
||||||
if err != nil {
|
|
||||||
return Credentials{}, fmt.Errorf("failed to derive assymetric key from credentials")
|
|
||||||
}
|
|
||||||
|
|
||||||
creds := Credentials{
|
|
||||||
Context: symmetricCreds.AccessKeyID,
|
|
||||||
PrivateKey: privateKey,
|
|
||||||
SessionToken: symmetricCreds.SessionToken,
|
|
||||||
CanExpire: symmetricCreds.CanExpire,
|
|
||||||
Expires: symmetricCreds.Expires,
|
|
||||||
}
|
|
||||||
|
|
||||||
s.asymmetric.Store(&creds)
|
|
||||||
|
|
||||||
return creds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SymmetricCredentialAdaptor) getCreds() *Credentials {
|
|
||||||
v := s.asymmetric.Load()
|
|
||||||
|
|
||||||
if v == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
c := v.(*Credentials)
|
|
||||||
if c != nil && c.HasKeys() && !c.Expired() {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SymmetricCredentialAdaptor) retrieveFromSymmetricProvider(ctx context.Context) (aws.Credentials, error) {
|
|
||||||
credentials, err := s.SymmetricProvider.Retrieve(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return aws.Credentials{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return credentials, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CredentialsProvider is the interface for a provider to retrieve credentials
|
|
||||||
// to sign requests with.
|
|
||||||
type CredentialsProvider interface {
|
|
||||||
RetrievePrivateKey(context.Context) (Credentials, error)
|
|
||||||
}
|
|
|
@ -1,79 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/credentials_test.go
|
|
||||||
|
|
||||||
package v4a
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
)
|
|
||||||
|
|
||||||
type rotatingCredsProvider struct {
|
|
||||||
count int
|
|
||||||
fail chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *rotatingCredsProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {
|
|
||||||
select {
|
|
||||||
case <-r.fail:
|
|
||||||
return aws.Credentials{}, fmt.Errorf("rotatingCredsProvider error")
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
credentials := aws.Credentials{
|
|
||||||
AccessKeyID: fmt.Sprintf("ACCESS_KEY_ID_%d", r.count),
|
|
||||||
SecretAccessKey: fmt.Sprintf("SECRET_ACCESS_KEY_%d", r.count),
|
|
||||||
SessionToken: fmt.Sprintf("SESSION_TOKEN_%d", r.count),
|
|
||||||
}
|
|
||||||
return credentials, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSymmetricCredentialAdaptor(t *testing.T) {
|
|
||||||
provider := &rotatingCredsProvider{
|
|
||||||
count: 0,
|
|
||||||
fail: make(chan struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
adaptor := &SymmetricCredentialAdaptor{SymmetricProvider: provider}
|
|
||||||
|
|
||||||
if symCreds, err := adaptor.Retrieve(context.Background()); err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
} else if !symCreds.HasKeys() {
|
|
||||||
t.Fatalf("expect symmetric credentials to have keys")
|
|
||||||
}
|
|
||||||
|
|
||||||
if load := adaptor.asymmetric.Load(); load != nil {
|
|
||||||
t.Errorf("expect asymmetric credentials to be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
if asymCreds, err := adaptor.RetrievePrivateKey(context.Background()); err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
} else if !asymCreds.HasKeys() {
|
|
||||||
t.Fatalf("expect asymmetric credentials to have keys")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := adaptor.Retrieve(context.Background()); err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if load := adaptor.asymmetric.Load(); load.(*Credentials) == nil {
|
|
||||||
t.Errorf("expect asymmetric credentials to be not nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
provider.count++
|
|
||||||
|
|
||||||
if _, err := adaptor.Retrieve(context.Background()); err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if load := adaptor.asymmetric.Load(); load.(*Credentials) != nil {
|
|
||||||
t.Errorf("expect asymmetric credentials to be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
close(provider.fail) // All requests to the original provider will now fail from this point-on.
|
|
||||||
_, err := adaptor.Retrieve(context.Background())
|
|
||||||
if err == nil {
|
|
||||||
t.Error("expect error, got nil")
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,32 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/compare.go
|
|
||||||
|
|
||||||
package crypto
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison
|
|
||||||
// if the two byte slices assuming they represent a big-endian number.
|
|
||||||
//
|
|
||||||
// error if len(x) != len(y)
|
|
||||||
// -1 if x < y
|
|
||||||
// 0 if x == y
|
|
||||||
// +1 if x > y
|
|
||||||
func ConstantTimeByteCompare(x, y []byte) (int, error) {
|
|
||||||
if len(x) != len(y) {
|
|
||||||
return 0, fmt.Errorf("slice lengths do not match")
|
|
||||||
}
|
|
||||||
|
|
||||||
xLarger, yLarger := 0, 0
|
|
||||||
|
|
||||||
for i := 0; i < len(x); i++ {
|
|
||||||
xByte, yByte := int(x[i]), int(y[i])
|
|
||||||
|
|
||||||
x := ((yByte - xByte) >> 8) & 1
|
|
||||||
y := ((xByte - yByte) >> 8) & 1
|
|
||||||
|
|
||||||
xLarger |= x &^ yLarger
|
|
||||||
yLarger |= y &^ xLarger
|
|
||||||
}
|
|
||||||
|
|
||||||
return xLarger - yLarger, nil
|
|
||||||
}
|
|
|
@ -1,62 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/compare_test.go
|
|
||||||
|
|
||||||
package crypto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"math/big"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConstantTimeByteCompare(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
x, y []byte
|
|
||||||
r int
|
|
||||||
expectErr bool
|
|
||||||
}{
|
|
||||||
{x: []byte{}, y: []byte{}, r: 0},
|
|
||||||
{x: []byte{40}, y: []byte{30}, r: 1},
|
|
||||||
{x: []byte{30}, y: []byte{40}, r: -1},
|
|
||||||
{x: []byte{60, 40, 30, 10, 20}, y: []byte{50, 30, 20, 0, 10}, r: 1},
|
|
||||||
{x: []byte{50, 30, 20, 0, 10}, y: []byte{60, 40, 30, 10, 20}, r: -1},
|
|
||||||
{x: nil, y: []byte{}, r: 0},
|
|
||||||
{x: []byte{}, y: nil, r: 0},
|
|
||||||
{x: []byte{}, y: []byte{10}, expectErr: true},
|
|
||||||
{x: []byte{10}, y: []byte{}, expectErr: true},
|
|
||||||
{x: []byte{10, 20}, y: []byte{10}, expectErr: true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range cases {
|
|
||||||
compare, err := ConstantTimeByteCompare(tt.x, tt.y)
|
|
||||||
if (err != nil) != tt.expectErr {
|
|
||||||
t.Fatalf("expectErr=%v, got %v", tt.expectErr, err)
|
|
||||||
}
|
|
||||||
if e, a := tt.r, compare; e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkConstantTimeCompare(b *testing.B) {
|
|
||||||
x, y := big.NewInt(1023), big.NewInt(1024)
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
ConstantTimeByteCompare(x.Bytes(), y.Bytes())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCompare(b *testing.B) {
|
|
||||||
x, y := big.NewInt(1023).Bytes(), big.NewInt(1024).Bytes()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
bytes.Compare(x, y)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustBigInt(s string) *big.Int {
|
|
||||||
b, ok := (&big.Int{}).SetString(s, 16)
|
|
||||||
if !ok {
|
|
||||||
panic("can't parse as big.Int")
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
|
@ -1,115 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/ecc.go
|
|
||||||
|
|
||||||
package crypto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/hmac"
|
|
||||||
"encoding/asn1"
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"math"
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ecdsaSignature struct {
|
|
||||||
R, S *big.Int
|
|
||||||
}
|
|
||||||
|
|
||||||
// ECDSAKey takes the given elliptic curve, and private key (d) byte slice
|
|
||||||
// and returns the private ECDSA key.
|
|
||||||
func ECDSAKey(curve elliptic.Curve, d []byte) *ecdsa.PrivateKey {
|
|
||||||
return ECDSAKeyFromPoint(curve, (&big.Int{}).SetBytes(d))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ECDSAKeyFromPoint takes the given elliptic curve and point and returns the
|
|
||||||
// private and public keypair
|
|
||||||
func ECDSAKeyFromPoint(curve elliptic.Curve, d *big.Int) *ecdsa.PrivateKey {
|
|
||||||
pX, pY := curve.ScalarBaseMult(d.Bytes())
|
|
||||||
|
|
||||||
privKey := &ecdsa.PrivateKey{
|
|
||||||
PublicKey: ecdsa.PublicKey{
|
|
||||||
Curve: curve,
|
|
||||||
X: pX,
|
|
||||||
Y: pY,
|
|
||||||
},
|
|
||||||
D: d,
|
|
||||||
}
|
|
||||||
|
|
||||||
return privKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// ECDSAPublicKey takes the provide curve and (x, y) coordinates and returns
|
|
||||||
// *ecdsa.PublicKey. Returns an error if the given points are not on the curve.
|
|
||||||
func ECDSAPublicKey(curve elliptic.Curve, x, y []byte) (*ecdsa.PublicKey, error) {
|
|
||||||
xPoint := (&big.Int{}).SetBytes(x)
|
|
||||||
yPoint := (&big.Int{}).SetBytes(y)
|
|
||||||
|
|
||||||
if !curve.IsOnCurve(xPoint, yPoint) {
|
|
||||||
return nil, fmt.Errorf("point(%v, %v) is not on the given curve", xPoint.String(), yPoint.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ecdsa.PublicKey{
|
|
||||||
Curve: curve,
|
|
||||||
X: xPoint,
|
|
||||||
Y: yPoint,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifySignature takes the provided public key, hash, and asn1 encoded signature and returns
|
|
||||||
// whether the given signature is valid.
|
|
||||||
func VerifySignature(key *ecdsa.PublicKey, hash []byte, signature []byte) (bool, error) {
|
|
||||||
var ecdsaSignature ecdsaSignature
|
|
||||||
|
|
||||||
_, err := asn1.Unmarshal(signature, &ecdsaSignature)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ecdsa.Verify(key, hash, ecdsaSignature.R, ecdsaSignature.S), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HMACKeyDerivation provides an implementation of a NIST-800-108 of a KDF (Key Derivation Function) in Counter Mode.
|
|
||||||
// For the purposes of this implantation HMAC is used as the PRF (Pseudorandom function), where the value of
|
|
||||||
// `r` is defined as a 4 byte counter.
|
|
||||||
func HMACKeyDerivation(hash func() hash.Hash, bitLen int, key []byte, label, context []byte) ([]byte, error) {
|
|
||||||
// verify that we won't overflow the counter
|
|
||||||
n := int64(math.Ceil((float64(bitLen) / 8) / float64(hash().Size())))
|
|
||||||
if n > 0x7FFFFFFF {
|
|
||||||
return nil, fmt.Errorf("unable to derive key of size %d using 32-bit counter", bitLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
// verify the requested bit length is not larger then the length encoding size
|
|
||||||
if int64(bitLen) > 0x7FFFFFFF {
|
|
||||||
return nil, fmt.Errorf("bitLen is greater than 32-bits")
|
|
||||||
}
|
|
||||||
|
|
||||||
fixedInput := bytes.NewBuffer(nil)
|
|
||||||
fixedInput.Write(label)
|
|
||||||
fixedInput.WriteByte(0x00)
|
|
||||||
fixedInput.Write(context)
|
|
||||||
if err := binary.Write(fixedInput, binary.BigEndian, int32(bitLen)); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to write bit length to fixed input string: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var output []byte
|
|
||||||
|
|
||||||
h := hmac.New(hash, key)
|
|
||||||
|
|
||||||
for i := int64(1); i <= n; i++ {
|
|
||||||
h.Reset()
|
|
||||||
if err := binary.Write(h, binary.BigEndian, int32(i)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
_, err := h.Write(fixedInput.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
output = append(output, h.Sum(nil)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return output[:bitLen/8], nil
|
|
||||||
}
|
|
|
@ -1,279 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/ecc_test.go
|
|
||||||
|
|
||||||
package crypto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha256"
|
|
||||||
"io"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestECDSAPublicKeyDerivation_P256(t *testing.T) {
|
|
||||||
d := []byte{
|
|
||||||
0xc9, 0x80, 0x68, 0x98, 0xa0, 0x33, 0x49, 0x16, 0xc8, 0x60, 0x74, 0x88, 0x80, 0xa5, 0x41, 0xf0,
|
|
||||||
0x93, 0xb5, 0x79, 0xa9, 0xb1, 0xf3, 0x29, 0x34, 0xd8, 0x6c, 0x36, 0x3c, 0x39, 0x80, 0x03, 0x57,
|
|
||||||
}
|
|
||||||
|
|
||||||
x := []byte{
|
|
||||||
0xd0, 0x72, 0x0d, 0xc6, 0x91, 0xaa, 0x80, 0x09, 0x6b, 0xa3, 0x2f, 0xed, 0x1c, 0xb9, 0x7c, 0x2b,
|
|
||||||
0x62, 0x06, 0x90, 0xd0, 0x6d, 0xe0, 0x31, 0x7b, 0x86, 0x18, 0xd5, 0xce, 0x65, 0xeb, 0x72, 0x8f,
|
|
||||||
}
|
|
||||||
|
|
||||||
y := []byte{
|
|
||||||
0x96, 0x81, 0xb5, 0x17, 0xb1, 0xcd, 0xa1, 0x7d, 0x0d, 0x83, 0xd3, 0x35, 0xd9, 0xc4, 0xa8, 0xa9,
|
|
||||||
0xa9, 0xb0, 0xb1, 0xb3, 0xc7, 0x10, 0x6d, 0x8f, 0x3c, 0x72, 0xbc, 0x50, 0x93, 0xdc, 0x27, 0x5f,
|
|
||||||
}
|
|
||||||
|
|
||||||
testKeyDerivation(t, elliptic.P256(), d, x, y)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSAPublicKeyDerivation_P384(t *testing.T) {
|
|
||||||
d := []byte{
|
|
||||||
0x53, 0x94, 0xf7, 0x97, 0x3e, 0xa8, 0x68, 0xc5, 0x2b, 0xf3, 0xff, 0x8d, 0x8c, 0xee, 0xb4, 0xdb,
|
|
||||||
0x90, 0xa6, 0x83, 0x65, 0x3b, 0x12, 0x48, 0x5d, 0x5f, 0x62, 0x7c, 0x3c, 0xe5, 0xab, 0xd8, 0x97,
|
|
||||||
0x8f, 0xc9, 0x67, 0x3d, 0x14, 0xa7, 0x1d, 0x92, 0x57, 0x47, 0x93, 0x16, 0x62, 0x49, 0x3c, 0x37,
|
|
||||||
}
|
|
||||||
|
|
||||||
x := []byte{
|
|
||||||
0xfd, 0x3c, 0x84, 0xe5, 0x68, 0x9b, 0xed, 0x27, 0x0e, 0x60, 0x1b, 0x3d, 0x80, 0xf9, 0x0d, 0x67,
|
|
||||||
0xa9, 0xae, 0x45, 0x1c, 0xce, 0x89, 0x0f, 0x53, 0xe5, 0x83, 0x22, 0x9a, 0xd0, 0xe2, 0xee, 0x64,
|
|
||||||
0x56, 0x11, 0xfa, 0x99, 0x36, 0xdf, 0xa4, 0x53, 0x06, 0xec, 0x18, 0x06, 0x67, 0x74, 0xaa, 0x24,
|
|
||||||
}
|
|
||||||
|
|
||||||
y := []byte{
|
|
||||||
0xb8, 0x3c, 0xa4, 0x12, 0x6c, 0xfc, 0x4c, 0x4d, 0x1d, 0x18, 0xa4, 0xb6, 0xc2, 0x1c, 0x7f, 0x69,
|
|
||||||
0x9d, 0x51, 0x23, 0xdd, 0x9c, 0x24, 0xf6, 0x6f, 0x83, 0x38, 0x46, 0xee, 0xb5, 0x82, 0x96, 0x19,
|
|
||||||
0x6b, 0x42, 0xec, 0x06, 0x42, 0x5d, 0xb5, 0xb7, 0x0a, 0x4b, 0x81, 0xb7, 0xfc, 0xf7, 0x05, 0xa0,
|
|
||||||
}
|
|
||||||
|
|
||||||
testKeyDerivation(t, elliptic.P384(), d, x, y)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSAKnownSigningValue_P256(t *testing.T) {
|
|
||||||
d := []byte{
|
|
||||||
0x51, 0x9b, 0x42, 0x3d, 0x71, 0x5f, 0x8b, 0x58, 0x1f, 0x4f, 0xa8, 0xee, 0x59, 0xf4, 0x77, 0x1a,
|
|
||||||
0x5b, 0x44, 0xc8, 0x13, 0x0b, 0x4e, 0x3e, 0xac, 0xca, 0x54, 0xa5, 0x6d, 0xda, 0x72, 0xb4, 0x64,
|
|
||||||
}
|
|
||||||
|
|
||||||
testKnownSigningValue(t, elliptic.P256(), d)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSAKnownSigningValue_P384(t *testing.T) {
|
|
||||||
d := []byte{
|
|
||||||
0x53, 0x94, 0xf7, 0x97, 0x3e, 0xa8, 0x68, 0xc5, 0x2b, 0xf3, 0xff, 0x8d, 0x8c, 0xee, 0xb4, 0xdb,
|
|
||||||
0x90, 0xa6, 0x83, 0x65, 0x3b, 0x12, 0x48, 0x5d, 0x5f, 0x62, 0x7c, 0x3c, 0xe5, 0xab, 0xd8, 0x97,
|
|
||||||
0x8f, 0xc9, 0x67, 0x3d, 0x14, 0xa7, 0x1d, 0x92, 0x57, 0x47, 0x93, 0x16, 0x62, 0x49, 0x3c, 0x37,
|
|
||||||
}
|
|
||||||
|
|
||||||
testKnownSigningValue(t, elliptic.P384(), d)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testKeyDerivation(t *testing.T, curve elliptic.Curve, d, expectedX, expectedY []byte) {
|
|
||||||
privKey := ECDSAKey(curve, d)
|
|
||||||
|
|
||||||
if e, a := d, privKey.D.Bytes(); bytes.Compare(e, a) != 0 {
|
|
||||||
t.Errorf("expected % x, got % x", e, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
if e, a := expectedX, privKey.X.Bytes(); bytes.Compare(e, a) != 0 {
|
|
||||||
t.Errorf("expected % x, got % x", e, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
if e, a := expectedY, privKey.Y.Bytes(); bytes.Compare(e, a) != 0 {
|
|
||||||
t.Errorf("expected % x, got % x", e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testKnownSigningValue(t *testing.T, curve elliptic.Curve, d []byte) {
|
|
||||||
signingKey := ECDSAKey(curve, d)
|
|
||||||
|
|
||||||
message := []byte{
|
|
||||||
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
|
|
||||||
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
|
|
||||||
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
|
|
||||||
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
|
|
||||||
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
|
|
||||||
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
|
|
||||||
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
|
|
||||||
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
|
|
||||||
}
|
|
||||||
|
|
||||||
sha256Hash := sha256.New()
|
|
||||||
_, err := io.Copy(sha256Hash, bytes.NewReader(message))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msgHash := sha256Hash.Sum(nil)
|
|
||||||
msgSignature, err := signingKey.Sign(rand.Reader, msgHash, crypto.SHA256)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
verified, err := VerifySignature(&signingKey.PublicKey, msgHash, msgSignature)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !verified {
|
|
||||||
t.Fatalf("failed to verify message msgSignature")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSAInvalidSignature_P256(t *testing.T) {
|
|
||||||
testInvalidSignature(t, elliptic.P256())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSAInvalidSignature_P384(t *testing.T) {
|
|
||||||
testInvalidSignature(t, elliptic.P384())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSAGenKeySignature_P256(t *testing.T) {
|
|
||||||
testGenKeySignature(t, elliptic.P256())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSAGenKeySignature_P384(t *testing.T) {
|
|
||||||
testGenKeySignature(t, elliptic.P384())
|
|
||||||
}
|
|
||||||
|
|
||||||
func testInvalidSignature(t *testing.T, curve elliptic.Curve) {
|
|
||||||
privateKey, err := ecdsa.GenerateKey(curve, rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to generate key: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
message := []byte{
|
|
||||||
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
|
|
||||||
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
|
|
||||||
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
|
|
||||||
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
|
|
||||||
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
|
|
||||||
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
|
|
||||||
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
|
|
||||||
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
|
|
||||||
}
|
|
||||||
|
|
||||||
sha256Hash := sha256.New()
|
|
||||||
_, err = io.Copy(sha256Hash, bytes.NewReader(message))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msgHash := sha256Hash.Sum(nil)
|
|
||||||
msgSignature, err := privateKey.Sign(rand.Reader, msgHash, crypto.SHA256)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
byteToFlip := 15
|
|
||||||
switch msgSignature[byteToFlip] {
|
|
||||||
case 0:
|
|
||||||
msgSignature[byteToFlip] = 0x0a
|
|
||||||
default:
|
|
||||||
msgSignature[byteToFlip] &^= msgSignature[byteToFlip]
|
|
||||||
}
|
|
||||||
|
|
||||||
verified, err := VerifySignature(&privateKey.PublicKey, msgHash, msgSignature)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if verified {
|
|
||||||
t.Fatalf("expected message verification to fail")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testGenKeySignature(t *testing.T, curve elliptic.Curve) {
|
|
||||||
privateKey, err := ecdsa.GenerateKey(curve, rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to generate key: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
message := []byte{
|
|
||||||
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
|
|
||||||
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
|
|
||||||
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
|
|
||||||
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
|
|
||||||
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
|
|
||||||
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
|
|
||||||
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
|
|
||||||
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
|
|
||||||
}
|
|
||||||
|
|
||||||
sha256Hash := sha256.New()
|
|
||||||
_, err = io.Copy(sha256Hash, bytes.NewReader(message))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msgHash := sha256Hash.Sum(nil)
|
|
||||||
msgSignature, err := privateKey.Sign(rand.Reader, msgHash, crypto.SHA256)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
verified, err := VerifySignature(&privateKey.PublicKey, msgHash, msgSignature)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !verified {
|
|
||||||
t.Fatalf("expected message verification to fail")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestECDSASignatureFormat(t *testing.T) {
|
|
||||||
asn1Signature := []byte{
|
|
||||||
0x30, 0x45, 0x02, 0x21, 0x00, 0xd7, 0xc5, 0xb9, 0x9e, 0x0b, 0xb1, 0x1a, 0x1f, 0x32, 0xda, 0x66, 0xe0, 0xff,
|
|
||||||
0x59, 0xb7, 0x8a, 0x5e, 0xb3, 0x94, 0x9c, 0x23, 0xb3, 0xfc, 0x1f, 0x18, 0xcc, 0xf6, 0x61, 0x67, 0x8b, 0xf1,
|
|
||||||
0xc1, 0x02, 0x20, 0x26, 0x4d, 0x8b, 0x7c, 0xaa, 0x52, 0x4c, 0xc0, 0x2e, 0x5f, 0xf6, 0x7e, 0x24, 0x82, 0xe5,
|
|
||||||
0xfb, 0xcb, 0xc7, 0x9b, 0x83, 0x0d, 0x19, 0x7e, 0x7a, 0x40, 0x37, 0x87, 0xdd, 0x1c, 0x93, 0x13, 0xc4,
|
|
||||||
}
|
|
||||||
|
|
||||||
x := []byte{
|
|
||||||
0x1c, 0xcb, 0xe9, 0x1c, 0x07, 0x5f, 0xc7, 0xf4, 0xf0, 0x33, 0xbf, 0xa2, 0x48, 0xdb, 0x8f, 0xcc,
|
|
||||||
0xd3, 0x56, 0x5d, 0xe9, 0x4b, 0xbf, 0xb1, 0x2f, 0x3c, 0x59, 0xff, 0x46, 0xc2, 0x71, 0xbf, 0x83,
|
|
||||||
}
|
|
||||||
|
|
||||||
y := []byte{
|
|
||||||
0xce, 0x40, 0x14, 0xc6, 0x88, 0x11, 0xf9, 0xa2, 0x1a, 0x1f, 0xdb, 0x2c, 0x0e, 0x61, 0x13, 0xe0,
|
|
||||||
0x6d, 0xb7, 0xca, 0x93, 0xb7, 0x40, 0x4e, 0x78, 0xdc, 0x7c, 0xcd, 0x5c, 0xa8, 0x9a, 0x4c, 0xa9,
|
|
||||||
}
|
|
||||||
|
|
||||||
publicKey, err := ECDSAPublicKey(elliptic.P256(), x, y)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
message := []byte{
|
|
||||||
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
|
|
||||||
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
|
|
||||||
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
|
|
||||||
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
|
|
||||||
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
|
|
||||||
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
|
|
||||||
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
|
|
||||||
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
|
|
||||||
}
|
|
||||||
|
|
||||||
hash := sha256.New()
|
|
||||||
_, err = io.Copy(hash, bytes.NewReader(message))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
msgHash := hash.Sum(nil)
|
|
||||||
|
|
||||||
verifySignature, err := VerifySignature(publicKey, msgHash, asn1Signature)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !verifySignature {
|
|
||||||
t.Fatalf("failed to verify signature")
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,38 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/const.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
const (
|
|
||||||
// EmptyStringSHA256 is the hex encoded sha256 value of an empty string
|
|
||||||
EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
|
|
||||||
|
|
||||||
// UnsignedPayload indicates that the request payload body is unsigned
|
|
||||||
UnsignedPayload = "UNSIGNED-PAYLOAD"
|
|
||||||
|
|
||||||
// AmzAlgorithmKey indicates the signing algorithm
|
|
||||||
AmzAlgorithmKey = "X-Amz-Algorithm"
|
|
||||||
|
|
||||||
// AmzSecurityTokenKey indicates the security token to be used with temporary credentials
|
|
||||||
AmzSecurityTokenKey = "X-Amz-Security-Token"
|
|
||||||
|
|
||||||
// AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z'
|
|
||||||
AmzDateKey = "X-Amz-Date"
|
|
||||||
|
|
||||||
// AmzCredentialKey is the access key ID and credential scope
|
|
||||||
AmzCredentialKey = "X-Amz-Credential"
|
|
||||||
|
|
||||||
// AmzSignedHeadersKey is the set of headers signed for the request
|
|
||||||
AmzSignedHeadersKey = "X-Amz-SignedHeaders"
|
|
||||||
|
|
||||||
// AmzSignatureKey is the query parameter to store the SigV4 signature
|
|
||||||
AmzSignatureKey = "X-Amz-Signature"
|
|
||||||
|
|
||||||
// TimeFormat is the time format to be used in the X-Amz-Date header or query parameter
|
|
||||||
TimeFormat = "20060102T150405Z"
|
|
||||||
|
|
||||||
// ShortTimeFormat is the shorten time format used in the credential scope
|
|
||||||
ShortTimeFormat = "20060102"
|
|
||||||
|
|
||||||
// ContentSHAKey is the SHA256 of request body
|
|
||||||
ContentSHAKey = "X-Amz-Content-Sha256"
|
|
||||||
)
|
|
|
@ -1,90 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/header_rules.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Rules houses a set of Rule needed for validation of a
|
|
||||||
// string value
|
|
||||||
type Rules []Rule
|
|
||||||
|
|
||||||
// Rule interface allows for more flexible rules and just simply
|
|
||||||
// checks whether or not a value adheres to that Rule
|
|
||||||
type Rule interface {
|
|
||||||
IsValid(value string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid will iterate through all rules and see if any rules
|
|
||||||
// apply to the value and supports nested rules
|
|
||||||
func (r Rules) IsValid(value string) bool {
|
|
||||||
for _, rule := range r {
|
|
||||||
if rule.IsValid(value) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapRule generic Rule for maps
|
|
||||||
type MapRule map[string]struct{}
|
|
||||||
|
|
||||||
// IsValid for the map Rule satisfies whether it exists in the map
|
|
||||||
func (m MapRule) IsValid(value string) bool {
|
|
||||||
_, ok := m[value]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowList is a generic Rule for whitelisting
|
|
||||||
type AllowList struct {
|
|
||||||
Rule
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid for AllowList checks if the value is within the AllowList
|
|
||||||
func (w AllowList) IsValid(value string) bool {
|
|
||||||
return w.Rule.IsValid(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DenyList is a generic Rule for blacklisting
|
|
||||||
type DenyList struct {
|
|
||||||
Rule
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid for AllowList checks if the value is within the AllowList
|
|
||||||
func (b DenyList) IsValid(value string) bool {
|
|
||||||
return !b.Rule.IsValid(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Patterns is a list of strings to match against
|
|
||||||
type Patterns []string
|
|
||||||
|
|
||||||
// IsValid for Patterns checks each pattern and returns if a match has
|
|
||||||
// been found
|
|
||||||
func (p Patterns) IsValid(value string) bool {
|
|
||||||
for _, pattern := range p {
|
|
||||||
if HasPrefixFold(value, pattern) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// InclusiveRules rules allow for rules to depend on one another
|
|
||||||
type InclusiveRules []Rule
|
|
||||||
|
|
||||||
// IsValid will return true if all rules are true
|
|
||||||
func (r InclusiveRules) IsValid(value string) bool {
|
|
||||||
for _, rule := range r {
|
|
||||||
if !rule.IsValid(value) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
|
|
||||||
// under Unicode case-folding.
|
|
||||||
func HasPrefixFold(s, prefix string) bool {
|
|
||||||
return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
|
|
||||||
}
|
|
|
@ -1,83 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/header.go
|
|
||||||
// with changes:
|
|
||||||
// * drop User-Agent header from ignored
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
// IgnoredPresignedHeaders is a list of headers that are ignored during signing
|
|
||||||
var IgnoredPresignedHeaders = Rules{
|
|
||||||
DenyList{
|
|
||||||
MapRule{
|
|
||||||
"Authorization": struct{}{},
|
|
||||||
"User-Agent": struct{}{},
|
|
||||||
"X-Amzn-Trace-Id": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoredHeaders is a list of headers that are ignored during signing
|
|
||||||
// drop User-Agent header to be compatible with aws sdk java v1.
|
|
||||||
var IgnoredHeaders = Rules{
|
|
||||||
DenyList{
|
|
||||||
MapRule{
|
|
||||||
"Authorization": struct{}{},
|
|
||||||
//"User-Agent": struct{}{},
|
|
||||||
"X-Amzn-Trace-Id": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequiredSignedHeaders is a whitelist for Build canonical headers.
|
|
||||||
var RequiredSignedHeaders = Rules{
|
|
||||||
AllowList{
|
|
||||||
MapRule{
|
|
||||||
"Cache-Control": struct{}{},
|
|
||||||
"Content-Disposition": struct{}{},
|
|
||||||
"Content-Encoding": struct{}{},
|
|
||||||
"Content-Language": struct{}{},
|
|
||||||
"Content-Md5": struct{}{},
|
|
||||||
"Content-Type": struct{}{},
|
|
||||||
"Expires": struct{}{},
|
|
||||||
"If-Match": struct{}{},
|
|
||||||
"If-Modified-Since": struct{}{},
|
|
||||||
"If-None-Match": struct{}{},
|
|
||||||
"If-Unmodified-Since": struct{}{},
|
|
||||||
"Range": struct{}{},
|
|
||||||
"X-Amz-Acl": struct{}{},
|
|
||||||
"X-Amz-Copy-Source": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Match": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Range": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
|
||||||
"X-Amz-Grant-Full-control": struct{}{},
|
|
||||||
"X-Amz-Grant-Read": struct{}{},
|
|
||||||
"X-Amz-Grant-Read-Acp": struct{}{},
|
|
||||||
"X-Amz-Grant-Write": struct{}{},
|
|
||||||
"X-Amz-Grant-Write-Acp": struct{}{},
|
|
||||||
"X-Amz-Metadata-Directive": struct{}{},
|
|
||||||
"X-Amz-Mfa": struct{}{},
|
|
||||||
"X-Amz-Request-Payer": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
|
||||||
"X-Amz-Storage-Class": struct{}{},
|
|
||||||
"X-Amz-Website-Redirect-Location": struct{}{},
|
|
||||||
"X-Amz-Content-Sha256": struct{}{},
|
|
||||||
"X-Amz-Tagging": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Patterns{"X-Amz-Meta-"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowedQueryHoisting is a whitelist for Build query headers. The boolean value
|
|
||||||
// represents whether or not it is a pattern.
|
|
||||||
var AllowedQueryHoisting = InclusiveRules{
|
|
||||||
DenyList{RequiredSignedHeaders},
|
|
||||||
Patterns{"X-Amz-"},
|
|
||||||
}
|
|
|
@ -1,15 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/hmac.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HMACSHA256 computes a HMAC-SHA256 of data given the provided key.
|
|
||||||
func HMACSHA256(key []byte, data []byte) []byte {
|
|
||||||
hash := hmac.New(sha256.New, key)
|
|
||||||
hash.Write(data)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
|
@ -1,77 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/host.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SanitizeHostForHeader removes default port from host and updates request.Host
|
|
||||||
func SanitizeHostForHeader(r *http.Request) {
|
|
||||||
host := getHost(r)
|
|
||||||
port := portOnly(host)
|
|
||||||
if port != "" && isDefaultPort(r.URL.Scheme, port) {
|
|
||||||
r.Host = stripPort(host)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns host from request
|
|
||||||
func getHost(r *http.Request) string {
|
|
||||||
if r.Host != "" {
|
|
||||||
return r.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.URL.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hostname returns u.Host, without any port number.
|
|
||||||
//
|
|
||||||
// If Host is an IPv6 literal with a port number, Hostname returns the
|
|
||||||
// IPv6 literal without the square brackets. IPv6 literals may include
|
|
||||||
// a zone identifier.
|
|
||||||
//
|
|
||||||
// Copied from the Go 1.8 standard library (net/url)
|
|
||||||
func stripPort(hostport string) string {
|
|
||||||
colon := strings.IndexByte(hostport, ':')
|
|
||||||
if colon == -1 {
|
|
||||||
return hostport
|
|
||||||
}
|
|
||||||
if i := strings.IndexByte(hostport, ']'); i != -1 {
|
|
||||||
return strings.TrimPrefix(hostport[:i], "[")
|
|
||||||
}
|
|
||||||
return hostport[:colon]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Port returns the port part of u.Host, without the leading colon.
|
|
||||||
// If u.Host doesn't contain a port, Port returns an empty string.
|
|
||||||
//
|
|
||||||
// Copied from the Go 1.8 standard library (net/url)
|
|
||||||
func portOnly(hostport string) string {
|
|
||||||
colon := strings.IndexByte(hostport, ':')
|
|
||||||
if colon == -1 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
if i := strings.Index(hostport, "]:"); i != -1 {
|
|
||||||
return hostport[i+len("]:"):]
|
|
||||||
}
|
|
||||||
if strings.Contains(hostport, "]") {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return hostport[colon+len(":"):]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if the specified URI is using the standard port
|
|
||||||
// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
|
|
||||||
func isDefaultPort(scheme, port string) bool {
|
|
||||||
if port == "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
lowerCaseScheme := strings.ToLower(scheme)
|
|
||||||
if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
|
@ -1,38 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/time.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing.
|
|
||||||
type SigningTime struct {
|
|
||||||
time.Time
|
|
||||||
timeFormat string
|
|
||||||
shortTimeFormat string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSigningTime creates a new SigningTime given a time.Time
|
|
||||||
func NewSigningTime(t time.Time) SigningTime {
|
|
||||||
return SigningTime{
|
|
||||||
Time: t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeFormat provides a time formatted in the X-Amz-Date format.
|
|
||||||
func (m *SigningTime) TimeFormat() string {
|
|
||||||
return m.format(&m.timeFormat, TimeFormat)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShortTimeFormat provides a time formatted of 20060102.
|
|
||||||
func (m *SigningTime) ShortTimeFormat() string {
|
|
||||||
return m.format(&m.shortTimeFormat, ShortTimeFormat)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *SigningTime) format(target *string, format string) string {
|
|
||||||
if len(*target) > 0 {
|
|
||||||
return *target
|
|
||||||
}
|
|
||||||
v := m.Time.Format(format)
|
|
||||||
*target = v
|
|
||||||
return v
|
|
||||||
}
|
|
|
@ -1,66 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/util.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const doubleSpace = " "
|
|
||||||
|
|
||||||
// StripExcessSpaces will rewrite the passed in slice's string values to not
|
|
||||||
// contain muliple side-by-side spaces.
|
|
||||||
func StripExcessSpaces(str string) string {
|
|
||||||
var j, k, l, m, spaces int
|
|
||||||
// Trim trailing spaces
|
|
||||||
for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trim leading spaces
|
|
||||||
for k = 0; k < j && str[k] == ' '; k++ {
|
|
||||||
}
|
|
||||||
str = str[k : j+1]
|
|
||||||
|
|
||||||
// Strip multiple spaces.
|
|
||||||
j = strings.Index(str, doubleSpace)
|
|
||||||
if j < 0 {
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := []byte(str)
|
|
||||||
for k, m, l = j, j, len(buf); k < l; k++ {
|
|
||||||
if buf[k] == ' ' {
|
|
||||||
if spaces == 0 {
|
|
||||||
// First space.
|
|
||||||
buf[m] = buf[k]
|
|
||||||
m++
|
|
||||||
}
|
|
||||||
spaces++
|
|
||||||
} else {
|
|
||||||
// End of multiple spaces.
|
|
||||||
spaces = 0
|
|
||||||
buf[m] = buf[k]
|
|
||||||
m++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(buf[:m])
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetURIPath returns the escaped URI component from the provided URL
|
|
||||||
func GetURIPath(u *url.URL) string {
|
|
||||||
var uri string
|
|
||||||
|
|
||||||
if len(u.Opaque) > 0 {
|
|
||||||
uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
|
|
||||||
} else {
|
|
||||||
uri = u.EscapedPath()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(uri) == 0 {
|
|
||||||
uri = "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
return uri
|
|
||||||
}
|
|
|
@ -1,77 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/tuil_test.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestStripExcessHeaders(t *testing.T) {
|
|
||||||
vals := []string{
|
|
||||||
"",
|
|
||||||
"123",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3 ",
|
|
||||||
" 1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 23",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 ",
|
|
||||||
" 1 2 ",
|
|
||||||
"12 3",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1abc123",
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := []string{
|
|
||||||
"",
|
|
||||||
"123",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 23",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2",
|
|
||||||
"1 2",
|
|
||||||
"12 3",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1abc123",
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(vals); i++ {
|
|
||||||
r := StripExcessSpaces(vals[i])
|
|
||||||
if e, a := expected[i], r; e != a {
|
|
||||||
t.Errorf("%d, expect %v, got %v", i, e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var stripExcessSpaceCases = []string{
|
|
||||||
`AWS4-HMAC-SHA256 Credential=AKIDFAKEIDFAKEID/20160628/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=1234567890abcdef1234567890abcdef1234567890abcdef`,
|
|
||||||
`123 321 123 321`,
|
|
||||||
` 123 321 123 321 `,
|
|
||||||
` 123 321 123 321 `,
|
|
||||||
"123",
|
|
||||||
"1 2 3",
|
|
||||||
" 1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 23",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 ",
|
|
||||||
" 1 2 ",
|
|
||||||
"12 3",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1abc123",
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkStripExcessSpaces(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
for _, v := range stripExcessSpaceCases {
|
|
||||||
StripExcessSpaces(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,98 +0,0 @@
|
||||||
// This file is adopting https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/stream.go for sigv4a.
|
|
||||||
|
|
||||||
package v4a
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
signerCrypto "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/crypto"
|
|
||||||
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/v4"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EventStreamSigner is an AWS EventStream protocol signer.
|
|
||||||
type EventStreamSigner interface {
|
|
||||||
GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StreamSignerOptions is the configuration options for StreamSigner.
|
|
||||||
type StreamSignerOptions struct{}
|
|
||||||
|
|
||||||
// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads.
|
|
||||||
type StreamSigner struct {
|
|
||||||
options StreamSignerOptions
|
|
||||||
|
|
||||||
credentials Credentials
|
|
||||||
service string
|
|
||||||
|
|
||||||
prevSignature []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewStreamSigner returns a new AWS EventStream protocol signer.
|
|
||||||
func NewStreamSigner(credentials Credentials, service string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner {
|
|
||||||
o := StreamSignerOptions{}
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&o)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &StreamSigner{
|
|
||||||
options: o,
|
|
||||||
credentials: credentials,
|
|
||||||
service: service,
|
|
||||||
prevSignature: seedSignature,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StreamSigner) VerifySignature(headers, payload []byte, signingTime time.Time, signature []byte, optFns ...func(*StreamSignerOptions)) error {
|
|
||||||
options := s.options
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
prevSignature := s.prevSignature
|
|
||||||
|
|
||||||
st := v4Internal.NewSigningTime(signingTime)
|
|
||||||
|
|
||||||
scope := buildCredentialScope(st, s.service)
|
|
||||||
|
|
||||||
stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st)
|
|
||||||
|
|
||||||
ok, err := signerCrypto.VerifySignature(&s.credentials.PrivateKey.PublicKey, makeHash(sha256.New(), []byte(stringToSign)), signature)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("v4a: invalid signature")
|
|
||||||
}
|
|
||||||
|
|
||||||
s.prevSignature = signature
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
|
|
||||||
hash := sha256.New()
|
|
||||||
return strings.Join([]string{
|
|
||||||
"AWS4-ECDSA-P256-SHA256-PAYLOAD",
|
|
||||||
signingTime.TimeFormat(),
|
|
||||||
credentialScope,
|
|
||||||
hex.EncodeToString(previousSignature),
|
|
||||||
hex.EncodeToString(makeHash(hash, headers)),
|
|
||||||
hex.EncodeToString(makeHash(hash, payload)),
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildCredentialScope(st v4Internal.SigningTime, service string) string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
st.Format(shortTimeFormat),
|
|
||||||
service,
|
|
||||||
"aws4_request",
|
|
||||||
}, "/")
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,591 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/v4a.go
|
|
||||||
// with changes:
|
|
||||||
// * adding exported VerifySignature methods
|
|
||||||
// * using different ignore headers for sing/presign requests
|
|
||||||
// * don't duplicate content-length as signed header
|
|
||||||
// * use copy of smithy-go encoding/httpbinding package
|
|
||||||
// * use zap.Logger instead of smithy-go/logging
|
|
||||||
|
|
||||||
package v4a
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"math/big"
|
|
||||||
"net/http"
|
|
||||||
"net/textproto"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/smithy/encoding/httpbinding"
|
|
||||||
signerCrypto "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/crypto"
|
|
||||||
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/v4"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// AmzRegionSetKey represents the region set header used for sigv4a
|
|
||||||
AmzRegionSetKey = "X-Amz-Region-Set"
|
|
||||||
amzAlgorithmKey = v4Internal.AmzAlgorithmKey
|
|
||||||
amzSecurityTokenKey = v4Internal.AmzSecurityTokenKey
|
|
||||||
amzDateKey = v4Internal.AmzDateKey
|
|
||||||
amzCredentialKey = v4Internal.AmzCredentialKey
|
|
||||||
amzSignedHeadersKey = v4Internal.AmzSignedHeadersKey
|
|
||||||
authorizationHeader = "Authorization"
|
|
||||||
|
|
||||||
signingAlgorithm = "AWS4-ECDSA-P256-SHA256"
|
|
||||||
|
|
||||||
timeFormat = "20060102T150405Z"
|
|
||||||
shortTimeFormat = "20060102"
|
|
||||||
|
|
||||||
// EmptyStringSHA256 is a hex encoded SHA-256 hash of an empty string
|
|
||||||
EmptyStringSHA256 = v4Internal.EmptyStringSHA256
|
|
||||||
|
|
||||||
// Version of signing v4a
|
|
||||||
Version = "SigV4A"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
p256 elliptic.Curve
|
|
||||||
nMinusTwoP256 *big.Int
|
|
||||||
|
|
||||||
one = new(big.Int).SetInt64(1)
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Ensure the elliptic curve parameters are initialized on package import rather then on first usage
|
|
||||||
p256 = elliptic.P256()
|
|
||||||
|
|
||||||
nMinusTwoP256 = new(big.Int).SetBytes(p256.Params().N.Bytes())
|
|
||||||
nMinusTwoP256 = nMinusTwoP256.Sub(nMinusTwoP256, new(big.Int).SetInt64(2))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignerOptions is the SigV4a signing options for constructing a Signer.
|
|
||||||
type SignerOptions struct {
|
|
||||||
Logger *zap.Logger
|
|
||||||
LogSigning bool
|
|
||||||
|
|
||||||
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
|
|
||||||
// request header to the request's query string. This is most commonly used
|
|
||||||
// with pre-signed requests preventing headers from being added to the
|
|
||||||
// request's query string.
|
|
||||||
DisableHeaderHoisting bool
|
|
||||||
|
|
||||||
// Disables the automatic escaping of the URI path of the request for the
|
|
||||||
// siganture's canonical string's path. For services that do not need additional
|
|
||||||
// escaping then use this to disable the signer escaping the path.
|
|
||||||
//
|
|
||||||
// S3 is an example of a service that does not need additional escaping.
|
|
||||||
//
|
|
||||||
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
|
||||||
DisableURIPathEscaping bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Signer is a SigV4a HTTP signing implementation
|
|
||||||
type Signer struct {
|
|
||||||
options SignerOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSigner constructs a SigV4a Signer.
|
|
||||||
func NewSigner(optFns ...func(*SignerOptions)) *Signer {
|
|
||||||
options := SignerOptions{}
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Signer{options: options}
|
|
||||||
}
|
|
||||||
|
|
||||||
// deriveKeyFromAccessKeyPair derives a NIST P-256 PrivateKey from the given
|
|
||||||
// IAM AccessKey and SecretKey pair.
|
|
||||||
//
|
|
||||||
// Based on FIPS.186-4 Appendix B.4.2
|
|
||||||
func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey, error) {
|
|
||||||
params := p256.Params()
|
|
||||||
bitLen := params.BitSize // Testing random candidates does not require an additional 64 bits
|
|
||||||
counter := 0x01
|
|
||||||
|
|
||||||
buffer := make([]byte, 1+len(accessKey)) // 1 byte counter + len(accessKey)
|
|
||||||
kdfContext := bytes.NewBuffer(buffer)
|
|
||||||
|
|
||||||
inputKey := append([]byte("AWS4A"), []byte(secretKey)...)
|
|
||||||
|
|
||||||
d := new(big.Int)
|
|
||||||
for {
|
|
||||||
kdfContext.Reset()
|
|
||||||
kdfContext.WriteString(accessKey)
|
|
||||||
kdfContext.WriteByte(byte(counter))
|
|
||||||
|
|
||||||
key, err := signerCrypto.HMACKeyDerivation(sha256.New, bitLen, inputKey, []byte(signingAlgorithm), kdfContext.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check key first before calling SetBytes if key key is in fact a valid candidate.
|
|
||||||
// This ensures the byte slice is the correct length (32-bytes) to compare in constant-time
|
|
||||||
cmp, err := signerCrypto.ConstantTimeByteCompare(key, nMinusTwoP256.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if cmp == -1 {
|
|
||||||
d.SetBytes(key)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
counter++
|
|
||||||
if counter > 0xFF {
|
|
||||||
return nil, fmt.Errorf("exhausted single byte external counter")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d = d.Add(d, one)
|
|
||||||
|
|
||||||
priv := new(ecdsa.PrivateKey)
|
|
||||||
priv.PublicKey.Curve = p256
|
|
||||||
priv.D = d
|
|
||||||
priv.PublicKey.X, priv.PublicKey.Y = p256.ScalarBaseMult(d.Bytes())
|
|
||||||
|
|
||||||
return priv, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type httpSigner struct {
|
|
||||||
Request *http.Request
|
|
||||||
ServiceName string
|
|
||||||
RegionSet []string
|
|
||||||
Time time.Time
|
|
||||||
Credentials Credentials
|
|
||||||
IsPreSign bool
|
|
||||||
|
|
||||||
Logger *zap.Logger
|
|
||||||
Debug bool
|
|
||||||
|
|
||||||
// PayloadHash is the hex encoded SHA-256 hash of the request payload
|
|
||||||
// If len(PayloadHash) == 0 the signer will attempt to send the request
|
|
||||||
// as an unsigned payload. Note: Unsigned payloads only work for a subset of services.
|
|
||||||
PayloadHash string
|
|
||||||
|
|
||||||
DisableHeaderHoisting bool
|
|
||||||
DisableURIPathEscaping bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and signs using SigV4a.
|
|
||||||
// The passed in request will be modified in place.
|
|
||||||
func (s *Signer) SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) error {
|
|
||||||
options := s.options
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
signer := &httpSigner{
|
|
||||||
Request: r,
|
|
||||||
PayloadHash: payloadHash,
|
|
||||||
ServiceName: service,
|
|
||||||
RegionSet: regionSet,
|
|
||||||
Credentials: credentials,
|
|
||||||
Time: signingTime.UTC(),
|
|
||||||
DisableHeaderHoisting: options.DisableHeaderHoisting,
|
|
||||||
DisableURIPathEscaping: options.DisableURIPathEscaping,
|
|
||||||
Logger: options.Logger,
|
|
||||||
}
|
|
||||||
|
|
||||||
signedRequest, err := signer.Build()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
logHTTPSigningInfo(ctx, options, signedRequest)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifySignature checks sigv4a.
|
|
||||||
func (s *Signer) VerifySignature(credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, signature string, optFns ...func(*SignerOptions)) error {
|
|
||||||
return s.verifySignature(credentials, r, payloadHash, service, regionSet, signingTime, signature, false, optFns...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyPresigned checks sigv4a.
|
|
||||||
func (s *Signer) VerifyPresigned(credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, signature string, optFns ...func(*SignerOptions)) error {
|
|
||||||
return s.verifySignature(credentials, r, payloadHash, service, regionSet, signingTime, signature, true, optFns...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Signer) verifySignature(credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, signature string, isPresigned bool, optFns ...func(*SignerOptions)) error {
|
|
||||||
options := s.options
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
signer := &httpSigner{
|
|
||||||
Request: r,
|
|
||||||
PayloadHash: payloadHash,
|
|
||||||
ServiceName: service,
|
|
||||||
RegionSet: regionSet,
|
|
||||||
Credentials: credentials,
|
|
||||||
Time: signingTime.UTC(),
|
|
||||||
IsPreSign: isPresigned,
|
|
||||||
DisableHeaderHoisting: options.DisableHeaderHoisting,
|
|
||||||
DisableURIPathEscaping: options.DisableURIPathEscaping,
|
|
||||||
}
|
|
||||||
|
|
||||||
signedReq, err := signer.Build()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
logHTTPSigningInfo(context.TODO(), options, signedReq)
|
|
||||||
|
|
||||||
signatureRaw, err := hex.DecodeString(signature)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("decode hex signature: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ok, err := signerCrypto.VerifySignature(&credentials.PrivateKey.PublicKey, makeHash(sha256.New(), []byte(signedReq.StringToSign)), signatureRaw)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("v4a: invalid signature")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PresignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and presigns using SigV4a
|
|
||||||
// Returns the presigned URL along with the headers that were signed with the request.
|
|
||||||
//
|
|
||||||
// PresignHTTP will not set the expires time of the presigned request
|
|
||||||
// automatically. To specify the expire duration for a request add the
|
|
||||||
// "X-Amz-Expires" query parameter on the request with the value as the
|
|
||||||
// duration in seconds the presigned URL should be considered valid for. This
|
|
||||||
// parameter is not used by all AWS services, and is most notable used by
|
|
||||||
// Amazon S3 APIs.
|
|
||||||
func (s *Signer) PresignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) (signedURI string, signedHeaders http.Header, err error) {
|
|
||||||
options := s.options
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
signer := &httpSigner{
|
|
||||||
Request: r,
|
|
||||||
PayloadHash: payloadHash,
|
|
||||||
ServiceName: service,
|
|
||||||
RegionSet: regionSet,
|
|
||||||
Credentials: credentials,
|
|
||||||
Time: signingTime.UTC(),
|
|
||||||
IsPreSign: true,
|
|
||||||
DisableHeaderHoisting: options.DisableHeaderHoisting,
|
|
||||||
DisableURIPathEscaping: options.DisableURIPathEscaping,
|
|
||||||
}
|
|
||||||
|
|
||||||
signedRequest, err := signer.Build()
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
logHTTPSigningInfo(ctx, options, signedRequest)
|
|
||||||
|
|
||||||
signedHeaders = make(http.Header)
|
|
||||||
|
|
||||||
// For the signed headers we canonicalize the header keys in the returned map.
|
|
||||||
// This avoids situations where can standard library double headers like host header. For example the standard
|
|
||||||
// library will set the Host header, even if it is present in lower-case form.
|
|
||||||
for k, v := range signedRequest.SignedHeaders {
|
|
||||||
key := textproto.CanonicalMIMEHeaderKey(k)
|
|
||||||
signedHeaders[key] = append(signedHeaders[key], v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return signedRequest.Request.URL.String(), signedHeaders, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) {
|
|
||||||
amzDate := s.Time.Format(timeFormat)
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
query.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ","))
|
|
||||||
query.Set(amzDateKey, amzDate)
|
|
||||||
query.Set(amzAlgorithmKey, signingAlgorithm)
|
|
||||||
if len(s.Credentials.SessionToken) > 0 {
|
|
||||||
query.Set(amzSecurityTokenKey, s.Credentials.SessionToken)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
headers.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ","))
|
|
||||||
headers.Set(amzDateKey, amzDate)
|
|
||||||
if len(s.Credentials.SessionToken) > 0 {
|
|
||||||
headers.Set(amzSecurityTokenKey, s.Credentials.SessionToken)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) Build() (signedRequest, error) {
|
|
||||||
req := s.Request
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
headers := req.Header
|
|
||||||
|
|
||||||
s.setRequiredSigningFields(headers, query)
|
|
||||||
|
|
||||||
// Sort Each Query Key's Values
|
|
||||||
for key := range query {
|
|
||||||
sort.Strings(query[key])
|
|
||||||
}
|
|
||||||
|
|
||||||
v4Internal.SanitizeHostForHeader(req)
|
|
||||||
|
|
||||||
credentialScope := s.buildCredentialScope()
|
|
||||||
credentialStr := s.Credentials.Context + "/" + credentialScope
|
|
||||||
if s.IsPreSign {
|
|
||||||
query.Set(amzCredentialKey, credentialStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
unsignedHeaders := headers
|
|
||||||
if s.IsPreSign && !s.DisableHeaderHoisting {
|
|
||||||
urlValues := url.Values{}
|
|
||||||
urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, unsignedHeaders)
|
|
||||||
for k := range urlValues {
|
|
||||||
query[k] = urlValues[k]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
host := req.URL.Host
|
|
||||||
if len(req.Host) > 0 {
|
|
||||||
host = req.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
signedHeaders http.Header
|
|
||||||
signedHeadersStr string
|
|
||||||
canonicalHeaderStr string
|
|
||||||
)
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredPresignedHeaders, unsignedHeaders, s.Request.ContentLength)
|
|
||||||
} else {
|
|
||||||
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength)
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
query.Set(amzSignedHeadersKey, signedHeadersStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
rawQuery := strings.Replace(query.Encode(), "+", "%20", -1)
|
|
||||||
|
|
||||||
canonicalURI := v4Internal.GetURIPath(req.URL)
|
|
||||||
if !s.DisableURIPathEscaping {
|
|
||||||
canonicalURI = httpbinding.EscapePath(canonicalURI, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
canonicalString := s.buildCanonicalString(
|
|
||||||
req.Method,
|
|
||||||
canonicalURI,
|
|
||||||
rawQuery,
|
|
||||||
signedHeadersStr,
|
|
||||||
canonicalHeaderStr,
|
|
||||||
)
|
|
||||||
|
|
||||||
strToSign := s.buildStringToSign(credentialScope, canonicalString)
|
|
||||||
signingSignature, err := s.buildSignature(strToSign)
|
|
||||||
if err != nil {
|
|
||||||
return signedRequest{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
rawQuery += "&X-Amz-Signature=" + signingSignature
|
|
||||||
} else {
|
|
||||||
headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature))
|
|
||||||
}
|
|
||||||
|
|
||||||
req.URL.RawQuery = rawQuery
|
|
||||||
|
|
||||||
return signedRequest{
|
|
||||||
Request: req,
|
|
||||||
SignedHeaders: signedHeaders,
|
|
||||||
CanonicalString: canonicalString,
|
|
||||||
StringToSign: strToSign,
|
|
||||||
PreSigned: s.IsPreSign,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string {
|
|
||||||
const credential = "Credential="
|
|
||||||
const signedHeaders = "SignedHeaders="
|
|
||||||
const signature = "Signature="
|
|
||||||
const commaSpace = ", "
|
|
||||||
|
|
||||||
var parts strings.Builder
|
|
||||||
parts.Grow(len(signingAlgorithm) + 1 +
|
|
||||||
len(credential) + len(credentialStr) + len(commaSpace) +
|
|
||||||
len(signedHeaders) + len(signedHeadersStr) + len(commaSpace) +
|
|
||||||
len(signature) + len(signingSignature),
|
|
||||||
)
|
|
||||||
parts.WriteString(signingAlgorithm)
|
|
||||||
parts.WriteRune(' ')
|
|
||||||
parts.WriteString(credential)
|
|
||||||
parts.WriteString(credentialStr)
|
|
||||||
parts.WriteString(commaSpace)
|
|
||||||
parts.WriteString(signedHeaders)
|
|
||||||
parts.WriteString(signedHeadersStr)
|
|
||||||
parts.WriteString(commaSpace)
|
|
||||||
parts.WriteString(signature)
|
|
||||||
parts.WriteString(signingSignature)
|
|
||||||
return parts.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildCredentialScope() string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
s.Time.Format(shortTimeFormat),
|
|
||||||
s.ServiceName,
|
|
||||||
"aws4_request",
|
|
||||||
}, "/")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) {
|
|
||||||
query := url.Values{}
|
|
||||||
unsignedHeaders := http.Header{}
|
|
||||||
for k, h := range header {
|
|
||||||
if r.IsValid(k) {
|
|
||||||
query[k] = h
|
|
||||||
} else {
|
|
||||||
unsignedHeaders[k] = h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return query, unsignedHeaders
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) {
|
|
||||||
signed = make(http.Header)
|
|
||||||
|
|
||||||
var headers []string
|
|
||||||
const hostHeader = "host"
|
|
||||||
headers = append(headers, hostHeader)
|
|
||||||
signed[hostHeader] = append(signed[hostHeader], host)
|
|
||||||
|
|
||||||
//const contentLengthHeader = "content-length"
|
|
||||||
//if length > 0 {
|
|
||||||
// headers = append(headers, contentLengthHeader)
|
|
||||||
// signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10))
|
|
||||||
//}
|
|
||||||
|
|
||||||
for k, v := range header {
|
|
||||||
if !rule.IsValid(k) {
|
|
||||||
continue // ignored header
|
|
||||||
}
|
|
||||||
|
|
||||||
lowerCaseKey := strings.ToLower(k)
|
|
||||||
if _, ok := signed[lowerCaseKey]; ok {
|
|
||||||
// include additional values
|
|
||||||
signed[lowerCaseKey] = append(signed[lowerCaseKey], v...)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
headers = append(headers, lowerCaseKey)
|
|
||||||
signed[lowerCaseKey] = v
|
|
||||||
}
|
|
||||||
sort.Strings(headers)
|
|
||||||
|
|
||||||
signedHeaders = strings.Join(headers, ";")
|
|
||||||
|
|
||||||
var canonicalHeaders strings.Builder
|
|
||||||
n := len(headers)
|
|
||||||
const colon = ':'
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
if headers[i] == hostHeader {
|
|
||||||
canonicalHeaders.WriteString(hostHeader)
|
|
||||||
canonicalHeaders.WriteRune(colon)
|
|
||||||
canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host))
|
|
||||||
} else {
|
|
||||||
canonicalHeaders.WriteString(headers[i])
|
|
||||||
canonicalHeaders.WriteRune(colon)
|
|
||||||
// Trim out leading, trailing, and dedup inner spaces from signed header values.
|
|
||||||
values := signed[headers[i]]
|
|
||||||
for j, v := range values {
|
|
||||||
cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v))
|
|
||||||
canonicalHeaders.WriteString(cleanedValue)
|
|
||||||
if j < len(values)-1 {
|
|
||||||
canonicalHeaders.WriteRune(',')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
canonicalHeaders.WriteRune('\n')
|
|
||||||
}
|
|
||||||
canonicalHeadersStr = canonicalHeaders.String()
|
|
||||||
|
|
||||||
return signed, signedHeaders, canonicalHeadersStr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
method,
|
|
||||||
uri,
|
|
||||||
query,
|
|
||||||
canonicalHeaders,
|
|
||||||
signedHeaders,
|
|
||||||
s.PayloadHash,
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
signingAlgorithm,
|
|
||||||
s.Time.Format(timeFormat),
|
|
||||||
credentialScope,
|
|
||||||
hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))),
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeHash(hash hash.Hash, b []byte) []byte {
|
|
||||||
hash.Reset()
|
|
||||||
hash.Write(b)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildSignature(strToSign string) (string, error) {
|
|
||||||
sig, err := s.Credentials.PrivateKey.Sign(rand.Reader, makeHash(sha256.New(), []byte(strToSign)), crypto.SHA256)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return hex.EncodeToString(sig), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const logSignInfoMsg = `Request Signature:
|
|
||||||
---[ CANONICAL STRING ]-----------------------------
|
|
||||||
%s
|
|
||||||
---[ STRING TO SIGN ]--------------------------------
|
|
||||||
%s%s
|
|
||||||
-----------------------------------------------------`
|
|
||||||
const logSignedURLMsg = `
|
|
||||||
---[ SIGNED URL ]------------------------------------
|
|
||||||
%s`
|
|
||||||
|
|
||||||
func logHTTPSigningInfo(_ context.Context, options SignerOptions, r signedRequest) {
|
|
||||||
if !options.LogSigning {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
signedURLMsg := ""
|
|
||||||
if r.PreSigned {
|
|
||||||
signedURLMsg = fmt.Sprintf(logSignedURLMsg, r.Request.URL.String())
|
|
||||||
}
|
|
||||||
if options.Logger != nil {
|
|
||||||
options.Logger.Debug(fmt.Sprintf(logSignInfoMsg, r.CanonicalString, r.StringToSign, signedURLMsg))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type signedRequest struct {
|
|
||||||
Request *http.Request
|
|
||||||
SignedHeaders http.Header
|
|
||||||
CanonicalString string
|
|
||||||
StringToSign string
|
|
||||||
PreSigned bool
|
|
||||||
}
|
|
|
@ -1,425 +0,0 @@
|
||||||
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/v4a_test.go
|
|
||||||
// with changes:
|
|
||||||
// * use zap.Logger instead of smithy-go/logging
|
|
||||||
|
|
||||||
package v4a
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/crypto"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
"go.uber.org/zap/zaptest"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
accessKey = "AKISORANDOMAASORANDOM"
|
|
||||||
secretKey = "q+jcrXGc+0zWN6uzclKVhvMmUsIfRPa4rlRandom"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDeriveECDSAKeyPairFromSecret(t *testing.T) {
|
|
||||||
privateKey, err := deriveKeyFromAccessKeyPair(accessKey, secretKey)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedX := func() *big.Int {
|
|
||||||
t.Helper()
|
|
||||||
b, ok := new(big.Int).SetString("15D242CEEBF8D8169FD6A8B5A746C41140414C3B07579038DA06AF89190FFFCB", 16)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("failed to parse big integer")
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}()
|
|
||||||
expectedY := func() *big.Int {
|
|
||||||
t.Helper()
|
|
||||||
b, ok := new(big.Int).SetString("515242CEDD82E94799482E4C0514B505AFCCF2C0C98D6A553BF539F424C5EC0", 16)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("failed to parse big integer")
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}()
|
|
||||||
|
|
||||||
if privateKey.X.Cmp(expectedX) != 0 {
|
|
||||||
t.Errorf("expected % X, got % X", expectedX, privateKey.X)
|
|
||||||
}
|
|
||||||
if privateKey.Y.Cmp(expectedY) != 0 {
|
|
||||||
t.Errorf("expected % X, got % X", expectedY, privateKey.Y)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignHTTP(t *testing.T) {
|
|
||||||
req := buildRequest("dynamodb", "us-east-1")
|
|
||||||
|
|
||||||
signer, credProvider := buildSigner(t, true)
|
|
||||||
|
|
||||||
key, err := credProvider.RetrievePrivateKey(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = signer.SignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedDate := "19700101T000000Z"
|
|
||||||
expectedAlg := "AWS4-ECDSA-P256-SHA256"
|
|
||||||
expectedCredential := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
|
|
||||||
expectedSignedHeaders := "content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-region-set;x-amz-security-token;x-amz-target"
|
|
||||||
expectedStrToSignHash := "4ba7d0482cf4d5450cefdc067a00de1a4a715e444856fa3e1d85c35fb34d9730"
|
|
||||||
|
|
||||||
q := req.Header
|
|
||||||
|
|
||||||
validateAuthorization(t, q.Get("Authorization"), expectedAlg, expectedCredential, expectedSignedHeaders, expectedStrToSignHash)
|
|
||||||
|
|
||||||
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignHTTP_NoSessionToken(t *testing.T) {
|
|
||||||
req := buildRequest("dynamodb", "us-east-1")
|
|
||||||
|
|
||||||
signer, credProvider := buildSigner(t, false)
|
|
||||||
|
|
||||||
key, err := credProvider.RetrievePrivateKey(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = signer.SignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedAlg := "AWS4-ECDSA-P256-SHA256"
|
|
||||||
expectedCredential := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
|
|
||||||
expectedSignedHeaders := "content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-region-set;x-amz-target"
|
|
||||||
expectedStrToSignHash := "1aeefb422ae6aa0de7aec829da813e55cff35553cac212dffd5f9474c71e47ee"
|
|
||||||
|
|
||||||
q := req.Header
|
|
||||||
|
|
||||||
validateAuthorization(t, q.Get("Authorization"), expectedAlg, expectedCredential, expectedSignedHeaders, expectedStrToSignHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPresignHTTP(t *testing.T) {
|
|
||||||
req := buildRequest("dynamodb", "us-east-1")
|
|
||||||
|
|
||||||
signer, credProvider := buildSigner(t, false)
|
|
||||||
|
|
||||||
key, err := credProvider.RetrievePrivateKey(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
query.Set("X-Amz-Expires", "18000")
|
|
||||||
req.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
signedURL, _, err := signer.PresignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedDate := "19700101T000000Z"
|
|
||||||
expectedAlg := "AWS4-ECDSA-P256-SHA256"
|
|
||||||
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
|
|
||||||
expectedCredential := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
|
|
||||||
expectedStrToSignHash := "d7ffbd2fab644384c056957e6ac38de4ae68246764b5f5df171b3824153b6397"
|
|
||||||
expectedTarget := "prefix.Operation"
|
|
||||||
|
|
||||||
signedReq, err := url.Parse(signedURL)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
q := signedReq.Query()
|
|
||||||
|
|
||||||
validateSignature(t, expectedStrToSignHash, q.Get("X-Amz-Signature"))
|
|
||||||
|
|
||||||
if e, a := expectedAlg, q.Get("X-Amz-Algorithm"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedCredential, q.Get("X-Amz-Credential"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
|
|
||||||
t.Errorf("expect %v to be empty", a)
|
|
||||||
}
|
|
||||||
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := "us-east-1", q.Get("X-Amz-Region-Set"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPresignHTTP_BodyWithArrayRequest(t *testing.T) {
|
|
||||||
req := buildRequest("dynamodb", "us-east-1")
|
|
||||||
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
|
|
||||||
|
|
||||||
signer, credProvider := buildSigner(t, true)
|
|
||||||
|
|
||||||
key, err := credProvider.RetrievePrivateKey(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
query.Set("X-Amz-Expires", "300")
|
|
||||||
req.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
signedURI, _, err := signer.PresignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
signedReq, err := url.Parse(signedURI)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedAlg := "AWS4-ECDSA-P256-SHA256"
|
|
||||||
expectedDate := "19700101T000000Z"
|
|
||||||
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
|
|
||||||
expectedStrToSignHash := "acff64fd3689be96259d4112c3742ff79f4da0d813bc58a285dc1c4449760bec"
|
|
||||||
expectedCred := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
|
|
||||||
expectedTarget := "prefix.Operation"
|
|
||||||
|
|
||||||
q := signedReq.Query()
|
|
||||||
|
|
||||||
validateSignature(t, expectedStrToSignHash, q.Get("X-Amz-Signature"))
|
|
||||||
|
|
||||||
if e, a := expectedAlg, q.Get("X-Amz-Algorithm"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedCred, q.Get("X-Amz-Credential"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
|
|
||||||
t.Errorf("expect %v to be empty, was not", a)
|
|
||||||
}
|
|
||||||
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := "us-east-1", q.Get("X-Amz-Region-Set"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func TestSign_buildCanonicalHeaders(t *testing.T) {
|
|
||||||
serviceName := "mockAPI"
|
|
||||||
region := "mock-region"
|
|
||||||
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
|
|
||||||
|
|
||||||
req, err := http.NewRequest("POST", endpoint, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create request, %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("FooInnerSpace", " inner space ")
|
|
||||||
req.Header.Set("FooLeadingSpace", " leading-space")
|
|
||||||
req.Header.Add("FooMultipleSpace", "no-space")
|
|
||||||
req.Header.Add("FooMultipleSpace", "\ttab-space")
|
|
||||||
req.Header.Add("FooMultipleSpace", "trailing-space ")
|
|
||||||
req.Header.Set("FooNoSpace", "no-space")
|
|
||||||
req.Header.Set("FooTabSpace", "\ttab-space\t")
|
|
||||||
req.Header.Set("FooTrailingSpace", "trailing-space ")
|
|
||||||
req.Header.Set("FooWrappedSpace", " wrapped-space ")
|
|
||||||
|
|
||||||
credProvider := &SymmetricCredentialAdaptor{
|
|
||||||
SymmetricProvider: staticCredentialsProvider{
|
|
||||||
Value: aws.Credentials{
|
|
||||||
AccessKeyID: accessKey,
|
|
||||||
SecretAccessKey: secretKey,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
key, err := credProvider.RetrievePrivateKey(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := &httpSigner{
|
|
||||||
Request: req,
|
|
||||||
ServiceName: serviceName,
|
|
||||||
RegionSet: []string{region},
|
|
||||||
Credentials: key,
|
|
||||||
Time: time.Date(2021, 10, 20, 12, 42, 0, 0, time.UTC),
|
|
||||||
}
|
|
||||||
|
|
||||||
build, err := ctx.Build()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectCanonicalString := strings.Join([]string{
|
|
||||||
`POST`,
|
|
||||||
`/`,
|
|
||||||
``,
|
|
||||||
`fooinnerspace:inner space`,
|
|
||||||
`fooleadingspace:leading-space`,
|
|
||||||
`foomultiplespace:no-space,tab-space,trailing-space`,
|
|
||||||
`foonospace:no-space`,
|
|
||||||
`footabspace:tab-space`,
|
|
||||||
`footrailingspace:trailing-space`,
|
|
||||||
`foowrappedspace:wrapped-space`,
|
|
||||||
`host:mockAPI.mock-region.amazonaws.com`,
|
|
||||||
`x-amz-date:20211020T124200Z`,
|
|
||||||
`x-amz-region-set:mock-region`,
|
|
||||||
``,
|
|
||||||
`fooinnerspace;fooleadingspace;foomultiplespace;foonospace;footabspace;footrailingspace;foowrappedspace;host;x-amz-date;x-amz-region-set`,
|
|
||||||
``,
|
|
||||||
}, "\n")
|
|
||||||
if diff := cmpDiff(expectCanonicalString, build.CanonicalString); diff != "" {
|
|
||||||
t.Errorf("expect match, got\n%s", diff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateAuthorization(t *testing.T, authorization, expectedAlg, expectedCredential, expectedSignedHeaders, expectedStrToSignHash string) {
|
|
||||||
t.Helper()
|
|
||||||
split := strings.SplitN(authorization, " ", 2)
|
|
||||||
|
|
||||||
if len(split) != 2 {
|
|
||||||
t.Fatal("unexpected authorization header format")
|
|
||||||
}
|
|
||||||
|
|
||||||
if e, a := split[0], expectedAlg; e != a {
|
|
||||||
t.Errorf("expected %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
keyValues := strings.Split(split[1], ", ")
|
|
||||||
seen := make(map[string]string)
|
|
||||||
|
|
||||||
for _, kv := range keyValues {
|
|
||||||
idx := strings.Index(kv, "=")
|
|
||||||
if idx == -1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
key, value := kv[:idx], kv[idx+1:]
|
|
||||||
seen[key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
if a, ok := seen["Credential"]; ok {
|
|
||||||
if expectedCredential != a {
|
|
||||||
t.Errorf("expected credential %v, got %v", expectedCredential, a)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t.Errorf("Credential not found in authorization string")
|
|
||||||
}
|
|
||||||
|
|
||||||
if a, ok := seen["SignedHeaders"]; ok {
|
|
||||||
if expectedSignedHeaders != a {
|
|
||||||
t.Errorf("expected signed headers %v, got %v", expectedSignedHeaders, a)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t.Errorf("SignedHeaders not found in authorization string")
|
|
||||||
}
|
|
||||||
|
|
||||||
if a, ok := seen["Signature"]; ok {
|
|
||||||
validateSignature(t, expectedStrToSignHash, a)
|
|
||||||
} else {
|
|
||||||
t.Errorf("signature not found in authorization string")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateSignature(t *testing.T, expectedHash, signature string) {
|
|
||||||
t.Helper()
|
|
||||||
pair, err := deriveKeyFromAccessKeyPair(accessKey, secretKey)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hash, _ := hex.DecodeString(expectedHash)
|
|
||||||
sig, _ := hex.DecodeString(signature)
|
|
||||||
|
|
||||||
ok, err := crypto.VerifySignature(&pair.PublicKey, hash, sig)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("failed to verify signing singature")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildRequest(serviceName, region string) *http.Request {
|
|
||||||
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
|
|
||||||
req, _ := http.NewRequest("POST", endpoint, nil)
|
|
||||||
req.URL.Opaque = "//example.org/bucket/key-._~,!@%23$%25^&*()"
|
|
||||||
req.Header.Set("X-Amz-Target", "prefix.Operation")
|
|
||||||
req.Header.Set("Content-Type", "application/x-amz-json-1.0")
|
|
||||||
|
|
||||||
req.Header.Set("Content-Length", strconv.Itoa(1024))
|
|
||||||
|
|
||||||
req.Header.Set("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
|
|
||||||
req.Header.Add("X-Amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
|
|
||||||
req.Header.Add("X-amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
|
|
||||||
return req
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildSigner(t *testing.T, withToken bool) (*Signer, CredentialsProvider) {
|
|
||||||
creds := aws.Credentials{
|
|
||||||
AccessKeyID: accessKey,
|
|
||||||
SecretAccessKey: secretKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
if withToken {
|
|
||||||
creds.SessionToken = "TOKEN"
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewSigner(func(options *SignerOptions) {
|
|
||||||
options.Logger = zaptest.NewLogger(t)
|
|
||||||
}), &SymmetricCredentialAdaptor{
|
|
||||||
SymmetricProvider: staticCredentialsProvider{
|
|
||||||
Value: creds,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type staticCredentialsProvider struct {
|
|
||||||
Value aws.Credentials
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s staticCredentialsProvider) Retrieve(_ context.Context) (aws.Credentials, error) {
|
|
||||||
v := s.Value
|
|
||||||
if v.AccessKeyID == "" || v.SecretAccessKey == "" {
|
|
||||||
return aws.Credentials{
|
|
||||||
Source: "Source Name",
|
|
||||||
}, fmt.Errorf("static credentials are empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(v.Source) == 0 {
|
|
||||||
v.Source = "Source Name"
|
|
||||||
}
|
|
||||||
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func cmpDiff(e, a interface{}) string {
|
|
||||||
if !reflect.DeepEqual(e, a) {
|
|
||||||
return fmt.Sprintf("%v != %v", e, a)
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
|
@ -1,117 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/cache.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
)
|
|
||||||
|
|
||||||
func lookupKey(service, region string) string {
|
|
||||||
var s strings.Builder
|
|
||||||
s.Grow(len(region) + len(service) + 3)
|
|
||||||
s.WriteString(region)
|
|
||||||
s.WriteRune('/')
|
|
||||||
s.WriteString(service)
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
type derivedKey struct {
|
|
||||||
AccessKey string
|
|
||||||
Date time.Time
|
|
||||||
Credential []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type derivedKeyCache struct {
|
|
||||||
values map[string]derivedKey
|
|
||||||
mutex sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDerivedKeyCache() derivedKeyCache {
|
|
||||||
return derivedKeyCache{
|
|
||||||
values: make(map[string]derivedKey),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *derivedKeyCache) Get(credentials aws.Credentials, service, region string, signingTime SigningTime) []byte {
|
|
||||||
key := lookupKey(service, region)
|
|
||||||
s.mutex.RLock()
|
|
||||||
if cred, ok := s.get(key, credentials, signingTime.Time); ok {
|
|
||||||
s.mutex.RUnlock()
|
|
||||||
return cred
|
|
||||||
}
|
|
||||||
s.mutex.RUnlock()
|
|
||||||
|
|
||||||
s.mutex.Lock()
|
|
||||||
if cred, ok := s.get(key, credentials, signingTime.Time); ok {
|
|
||||||
s.mutex.Unlock()
|
|
||||||
return cred
|
|
||||||
}
|
|
||||||
cred := deriveKey(credentials.SecretAccessKey, service, region, signingTime)
|
|
||||||
entry := derivedKey{
|
|
||||||
AccessKey: credentials.AccessKeyID,
|
|
||||||
Date: signingTime.Time,
|
|
||||||
Credential: cred,
|
|
||||||
}
|
|
||||||
s.values[key] = entry
|
|
||||||
s.mutex.Unlock()
|
|
||||||
|
|
||||||
return cred
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *derivedKeyCache) get(key string, credentials aws.Credentials, signingTime time.Time) ([]byte, bool) {
|
|
||||||
cacheEntry, ok := s.retrieveFromCache(key)
|
|
||||||
if ok && cacheEntry.AccessKey == credentials.AccessKeyID && isSameDay(signingTime, cacheEntry.Date) {
|
|
||||||
return cacheEntry.Credential, true
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *derivedKeyCache) retrieveFromCache(key string) (derivedKey, bool) {
|
|
||||||
if v, ok := s.values[key]; ok {
|
|
||||||
return v, true
|
|
||||||
}
|
|
||||||
return derivedKey{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// SigningKeyDeriver derives a signing key from a set of credentials
|
|
||||||
type SigningKeyDeriver struct {
|
|
||||||
cache derivedKeyCache
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSigningKeyDeriver returns a new SigningKeyDeriver
|
|
||||||
func NewSigningKeyDeriver() *SigningKeyDeriver {
|
|
||||||
return &SigningKeyDeriver{
|
|
||||||
cache: newDerivedKeyCache(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeriveKey returns a derived signing key from the given credentials to be used with SigV4 signing.
|
|
||||||
func (k *SigningKeyDeriver) DeriveKey(credential aws.Credentials, service, region string, signingTime SigningTime) []byte {
|
|
||||||
return k.cache.Get(credential, service, region, signingTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
func deriveKey(secret, service, region string, t SigningTime) []byte {
|
|
||||||
hmacDate := HMACSHA256([]byte("AWS4"+secret), []byte(t.ShortTimeFormat()))
|
|
||||||
hmacRegion := HMACSHA256(hmacDate, []byte(region))
|
|
||||||
hmacService := HMACSHA256(hmacRegion, []byte(service))
|
|
||||||
return HMACSHA256(hmacService, []byte("aws4_request"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSameDay(x, y time.Time) bool {
|
|
||||||
xYear, xMonth, xDay := x.Date()
|
|
||||||
yYear, yMonth, yDay := y.Date()
|
|
||||||
|
|
||||||
if xYear != yYear {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if xMonth != yMonth {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return xDay == yDay
|
|
||||||
}
|
|
|
@ -1,42 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/const.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
// Signature Version 4 (SigV4) Constants
|
|
||||||
const (
|
|
||||||
// EmptyStringSHA256 is the hex encoded sha256 value of an empty string
|
|
||||||
EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
|
|
||||||
|
|
||||||
// UnsignedPayload indicates that the request payload body is unsigned
|
|
||||||
UnsignedPayload = "UNSIGNED-PAYLOAD"
|
|
||||||
|
|
||||||
// AmzAlgorithmKey indicates the signing algorithm
|
|
||||||
AmzAlgorithmKey = "X-Amz-Algorithm"
|
|
||||||
|
|
||||||
// AmzSecurityTokenKey indicates the security token to be used with temporary credentials
|
|
||||||
AmzSecurityTokenKey = "X-Amz-Security-Token"
|
|
||||||
|
|
||||||
// AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z'
|
|
||||||
AmzDateKey = "X-Amz-Date"
|
|
||||||
|
|
||||||
// AmzCredentialKey is the access key ID and credential scope
|
|
||||||
AmzCredentialKey = "X-Amz-Credential"
|
|
||||||
|
|
||||||
// AmzSignedHeadersKey is the set of headers signed for the request
|
|
||||||
AmzSignedHeadersKey = "X-Amz-SignedHeaders"
|
|
||||||
|
|
||||||
// AmzSignatureKey is the query parameter to store the SigV4 signature
|
|
||||||
AmzSignatureKey = "X-Amz-Signature"
|
|
||||||
|
|
||||||
// TimeFormat is the time format to be used in the X-Amz-Date header or query parameter
|
|
||||||
TimeFormat = "20060102T150405Z"
|
|
||||||
|
|
||||||
// ShortTimeFormat is the shorten time format used in the credential scope
|
|
||||||
ShortTimeFormat = "20060102"
|
|
||||||
|
|
||||||
// ContentSHAKey is the SHA256 of request body
|
|
||||||
ContentSHAKey = "X-Amz-Content-Sha256"
|
|
||||||
|
|
||||||
// StreamingEventsPayload indicates that the request payload body is a signed event stream.
|
|
||||||
StreamingEventsPayload = "STREAMING-AWS4-HMAC-SHA256-EVENTS"
|
|
||||||
)
|
|
|
@ -1,90 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/header_rules.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Rules houses a set of Rule needed for validation of a
|
|
||||||
// string value
|
|
||||||
type Rules []Rule
|
|
||||||
|
|
||||||
// Rule interface allows for more flexible rules and just simply
|
|
||||||
// checks whether or not a value adheres to that Rule
|
|
||||||
type Rule interface {
|
|
||||||
IsValid(value string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid will iterate through all rules and see if any rules
|
|
||||||
// apply to the value and supports nested rules
|
|
||||||
func (r Rules) IsValid(value string) bool {
|
|
||||||
for _, rule := range r {
|
|
||||||
if rule.IsValid(value) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapRule generic Rule for maps
|
|
||||||
type MapRule map[string]struct{}
|
|
||||||
|
|
||||||
// IsValid for the map Rule satisfies whether it exists in the map
|
|
||||||
func (m MapRule) IsValid(value string) bool {
|
|
||||||
_, ok := m[value]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowList is a generic Rule for include listing
|
|
||||||
type AllowList struct {
|
|
||||||
Rule
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid for AllowList checks if the value is within the AllowList
|
|
||||||
func (w AllowList) IsValid(value string) bool {
|
|
||||||
return w.Rule.IsValid(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExcludeList is a generic Rule for exclude listing
|
|
||||||
type ExcludeList struct {
|
|
||||||
Rule
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid for AllowList checks if the value is within the AllowList
|
|
||||||
func (b ExcludeList) IsValid(value string) bool {
|
|
||||||
return !b.Rule.IsValid(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Patterns is a list of strings to match against
|
|
||||||
type Patterns []string
|
|
||||||
|
|
||||||
// IsValid for Patterns checks each pattern and returns if a match has
|
|
||||||
// been found
|
|
||||||
func (p Patterns) IsValid(value string) bool {
|
|
||||||
for _, pattern := range p {
|
|
||||||
if HasPrefixFold(value, pattern) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// InclusiveRules rules allow for rules to depend on one another
|
|
||||||
type InclusiveRules []Rule
|
|
||||||
|
|
||||||
// IsValid will return true if all rules are true
|
|
||||||
func (r InclusiveRules) IsValid(value string) bool {
|
|
||||||
for _, rule := range r {
|
|
||||||
if !rule.IsValid(value) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
|
|
||||||
// under Unicode case-folding.
|
|
||||||
func HasPrefixFold(s, prefix string) bool {
|
|
||||||
return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
|
|
||||||
}
|
|
|
@ -1,88 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/header.go
|
|
||||||
// with changes:
|
|
||||||
// * drop User-Agent header from ignored
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
// IgnoredPresignedHeaders is a list of headers that are ignored during signing
|
|
||||||
var IgnoredPresignedHeaders = Rules{
|
|
||||||
ExcludeList{
|
|
||||||
MapRule{
|
|
||||||
"Authorization": struct{}{},
|
|
||||||
"User-Agent": struct{}{},
|
|
||||||
"X-Amzn-Trace-Id": struct{}{},
|
|
||||||
"Expect": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoredHeaders is a list of headers that are ignored during signing
|
|
||||||
// drop User-Agent header to be compatible with aws sdk java v1.
|
|
||||||
var IgnoredHeaders = Rules{
|
|
||||||
ExcludeList{
|
|
||||||
MapRule{
|
|
||||||
"Authorization": struct{}{},
|
|
||||||
//"User-Agent": struct{}{},
|
|
||||||
"X-Amzn-Trace-Id": struct{}{},
|
|
||||||
"Expect": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequiredSignedHeaders is a allow list for Build canonical headers.
|
|
||||||
var RequiredSignedHeaders = Rules{
|
|
||||||
AllowList{
|
|
||||||
MapRule{
|
|
||||||
"Cache-Control": struct{}{},
|
|
||||||
"Content-Disposition": struct{}{},
|
|
||||||
"Content-Encoding": struct{}{},
|
|
||||||
"Content-Language": struct{}{},
|
|
||||||
"Content-Md5": struct{}{},
|
|
||||||
"Content-Type": struct{}{},
|
|
||||||
"Expires": struct{}{},
|
|
||||||
"If-Match": struct{}{},
|
|
||||||
"If-Modified-Since": struct{}{},
|
|
||||||
"If-None-Match": struct{}{},
|
|
||||||
"If-Unmodified-Since": struct{}{},
|
|
||||||
"Range": struct{}{},
|
|
||||||
"X-Amz-Acl": struct{}{},
|
|
||||||
"X-Amz-Copy-Source": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Match": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Range": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
|
||||||
"X-Amz-Expected-Bucket-Owner": struct{}{},
|
|
||||||
"X-Amz-Grant-Full-control": struct{}{},
|
|
||||||
"X-Amz-Grant-Read": struct{}{},
|
|
||||||
"X-Amz-Grant-Read-Acp": struct{}{},
|
|
||||||
"X-Amz-Grant-Write": struct{}{},
|
|
||||||
"X-Amz-Grant-Write-Acp": struct{}{},
|
|
||||||
"X-Amz-Metadata-Directive": struct{}{},
|
|
||||||
"X-Amz-Mfa": struct{}{},
|
|
||||||
"X-Amz-Request-Payer": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Context": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
|
||||||
"X-Amz-Storage-Class": struct{}{},
|
|
||||||
"X-Amz-Website-Redirect-Location": struct{}{},
|
|
||||||
"X-Amz-Content-Sha256": struct{}{},
|
|
||||||
"X-Amz-Tagging": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Patterns{"X-Amz-Object-Lock-"},
|
|
||||||
Patterns{"X-Amz-Meta-"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowedQueryHoisting is a allowed list for Build query headers. The boolean value
|
|
||||||
// represents whether or not it is a pattern.
|
|
||||||
var AllowedQueryHoisting = InclusiveRules{
|
|
||||||
ExcludeList{RequiredSignedHeaders},
|
|
||||||
Patterns{"X-Amz-"},
|
|
||||||
}
|
|
|
@ -1,65 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/header_test.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func TestAllowedQueryHoisting(t *testing.T) {
|
|
||||||
cases := map[string]struct {
|
|
||||||
Header string
|
|
||||||
ExpectHoist bool
|
|
||||||
}{
|
|
||||||
"object-lock": {
|
|
||||||
Header: "X-Amz-Object-Lock-Mode",
|
|
||||||
ExpectHoist: false,
|
|
||||||
},
|
|
||||||
"s3 metadata": {
|
|
||||||
Header: "X-Amz-Meta-SomeName",
|
|
||||||
ExpectHoist: false,
|
|
||||||
},
|
|
||||||
"another header": {
|
|
||||||
Header: "X-Amz-SomeOtherHeader",
|
|
||||||
ExpectHoist: true,
|
|
||||||
},
|
|
||||||
"non X-AMZ header": {
|
|
||||||
Header: "X-SomeOtherHeader",
|
|
||||||
ExpectHoist: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, c := range cases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
if e, a := c.ExpectHoist, AllowedQueryHoisting.IsValid(c.Header); e != a {
|
|
||||||
t.Errorf("expect hoist %v, was %v", e, a)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIgnoredHeaders(t *testing.T) {
|
|
||||||
cases := map[string]struct {
|
|
||||||
Header string
|
|
||||||
ExpectIgnored bool
|
|
||||||
}{
|
|
||||||
"expect": {
|
|
||||||
Header: "Expect",
|
|
||||||
ExpectIgnored: true,
|
|
||||||
},
|
|
||||||
"authorization": {
|
|
||||||
Header: "Authorization",
|
|
||||||
ExpectIgnored: true,
|
|
||||||
},
|
|
||||||
"X-AMZ header": {
|
|
||||||
Header: "X-Amz-Content-Sha256",
|
|
||||||
ExpectIgnored: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, c := range cases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
if e, a := c.ExpectIgnored, IgnoredHeaders.IsValid(c.Header); e == a {
|
|
||||||
t.Errorf("expect ignored %v, was %v", e, a)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,15 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/hmac.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HMACSHA256 computes a HMAC-SHA256 of data given the provided key.
|
|
||||||
func HMACSHA256(key []byte, data []byte) []byte {
|
|
||||||
hash := hmac.New(sha256.New, key)
|
|
||||||
hash.Write(data)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
|
@ -1,77 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/host.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SanitizeHostForHeader removes default port from host and updates request.Host
|
|
||||||
func SanitizeHostForHeader(r *http.Request) {
|
|
||||||
host := getHost(r)
|
|
||||||
port := portOnly(host)
|
|
||||||
if port != "" && isDefaultPort(r.URL.Scheme, port) {
|
|
||||||
r.Host = stripPort(host)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns host from request
|
|
||||||
func getHost(r *http.Request) string {
|
|
||||||
if r.Host != "" {
|
|
||||||
return r.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.URL.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hostname returns u.Host, without any port number.
|
|
||||||
//
|
|
||||||
// If Host is an IPv6 literal with a port number, Hostname returns the
|
|
||||||
// IPv6 literal without the square brackets. IPv6 literals may include
|
|
||||||
// a zone identifier.
|
|
||||||
//
|
|
||||||
// Copied from the Go 1.8 standard library (net/url)
|
|
||||||
func stripPort(hostport string) string {
|
|
||||||
colon := strings.IndexByte(hostport, ':')
|
|
||||||
if colon == -1 {
|
|
||||||
return hostport
|
|
||||||
}
|
|
||||||
if i := strings.IndexByte(hostport, ']'); i != -1 {
|
|
||||||
return strings.TrimPrefix(hostport[:i], "[")
|
|
||||||
}
|
|
||||||
return hostport[:colon]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Port returns the port part of u.Host, without the leading colon.
|
|
||||||
// If u.Host doesn't contain a port, Port returns an empty string.
|
|
||||||
//
|
|
||||||
// Copied from the Go 1.8 standard library (net/url)
|
|
||||||
func portOnly(hostport string) string {
|
|
||||||
colon := strings.IndexByte(hostport, ':')
|
|
||||||
if colon == -1 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
if i := strings.Index(hostport, "]:"); i != -1 {
|
|
||||||
return hostport[i+len("]:"):]
|
|
||||||
}
|
|
||||||
if strings.Contains(hostport, "]") {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return hostport[colon+len(":"):]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if the specified URI is using the standard port
|
|
||||||
// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
|
|
||||||
func isDefaultPort(scheme, port string) bool {
|
|
||||||
if port == "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
lowerCaseScheme := strings.ToLower(scheme)
|
|
||||||
if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
|
@ -1,15 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/scope.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import "strings"
|
|
||||||
|
|
||||||
// BuildCredentialScope builds the Signature Version 4 (SigV4) signing scope
|
|
||||||
func BuildCredentialScope(signingTime SigningTime, region, service string) string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
signingTime.ShortTimeFormat(),
|
|
||||||
region,
|
|
||||||
service,
|
|
||||||
"aws4_request",
|
|
||||||
}, "/")
|
|
||||||
}
|
|
|
@ -1,38 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/time.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing.
|
|
||||||
type SigningTime struct {
|
|
||||||
time.Time
|
|
||||||
timeFormat string
|
|
||||||
shortTimeFormat string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSigningTime creates a new SigningTime given a time.Time
|
|
||||||
func NewSigningTime(t time.Time) SigningTime {
|
|
||||||
return SigningTime{
|
|
||||||
Time: t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeFormat provides a time formatted in the X-Amz-Date format.
|
|
||||||
func (m *SigningTime) TimeFormat() string {
|
|
||||||
return m.format(&m.timeFormat, TimeFormat)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShortTimeFormat provides a time formatted of 20060102.
|
|
||||||
func (m *SigningTime) ShortTimeFormat() string {
|
|
||||||
return m.format(&m.shortTimeFormat, ShortTimeFormat)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *SigningTime) format(target *string, format string) string {
|
|
||||||
if len(*target) > 0 {
|
|
||||||
return *target
|
|
||||||
}
|
|
||||||
v := m.Time.Format(format)
|
|
||||||
*target = v
|
|
||||||
return v
|
|
||||||
}
|
|
|
@ -1,82 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/util.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const doubleSpace = " "
|
|
||||||
|
|
||||||
// StripExcessSpaces will rewrite the passed in slice's string values to not
|
|
||||||
// contain multiple side-by-side spaces.
|
|
||||||
func StripExcessSpaces(str string) string {
|
|
||||||
var j, k, l, m, spaces int
|
|
||||||
// Trim trailing spaces
|
|
||||||
for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trim leading spaces
|
|
||||||
for k = 0; k < j && str[k] == ' '; k++ {
|
|
||||||
}
|
|
||||||
str = str[k : j+1]
|
|
||||||
|
|
||||||
// Strip multiple spaces.
|
|
||||||
j = strings.Index(str, doubleSpace)
|
|
||||||
if j < 0 {
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := []byte(str)
|
|
||||||
for k, m, l = j, j, len(buf); k < l; k++ {
|
|
||||||
if buf[k] == ' ' {
|
|
||||||
if spaces == 0 {
|
|
||||||
// First space.
|
|
||||||
buf[m] = buf[k]
|
|
||||||
m++
|
|
||||||
}
|
|
||||||
spaces++
|
|
||||||
} else {
|
|
||||||
// End of multiple spaces.
|
|
||||||
spaces = 0
|
|
||||||
buf[m] = buf[k]
|
|
||||||
m++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(buf[:m])
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetURIPath returns the escaped URI component from the provided URL.
|
|
||||||
func GetURIPath(u *url.URL) string {
|
|
||||||
var uriPath string
|
|
||||||
|
|
||||||
if len(u.Opaque) > 0 {
|
|
||||||
const schemeSep, pathSep, queryStart = "//", "/", "?"
|
|
||||||
|
|
||||||
opaque := u.Opaque
|
|
||||||
// Cut off the query string if present.
|
|
||||||
if idx := strings.Index(opaque, queryStart); idx >= 0 {
|
|
||||||
opaque = opaque[:idx]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cutout the scheme separator if present.
|
|
||||||
if strings.Index(opaque, schemeSep) == 0 {
|
|
||||||
opaque = opaque[len(schemeSep):]
|
|
||||||
}
|
|
||||||
|
|
||||||
// capture URI path starting with first path separator.
|
|
||||||
if idx := strings.Index(opaque, pathSep); idx >= 0 {
|
|
||||||
uriPath = opaque[idx:]
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
uriPath = u.EscapedPath()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(uriPath) == 0 {
|
|
||||||
uriPath = "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
return uriPath
|
|
||||||
}
|
|
|
@ -1,160 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/util_test.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func lazyURLParse(v string) func() (*url.URL, error) {
|
|
||||||
return func() (*url.URL, error) {
|
|
||||||
return url.Parse(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetURIPath(t *testing.T) {
|
|
||||||
cases := map[string]struct {
|
|
||||||
getURL func() (*url.URL, error)
|
|
||||||
expect string
|
|
||||||
}{
|
|
||||||
// Cases
|
|
||||||
"with scheme": {
|
|
||||||
getURL: lazyURLParse("https://localhost:9000"),
|
|
||||||
expect: "/",
|
|
||||||
},
|
|
||||||
"no port, with scheme": {
|
|
||||||
getURL: lazyURLParse("https://localhost"),
|
|
||||||
expect: "/",
|
|
||||||
},
|
|
||||||
"without scheme": {
|
|
||||||
getURL: lazyURLParse("localhost:9000"),
|
|
||||||
expect: "/",
|
|
||||||
},
|
|
||||||
"without scheme, with path": {
|
|
||||||
getURL: lazyURLParse("localhost:9000/abc123"),
|
|
||||||
expect: "/abc123",
|
|
||||||
},
|
|
||||||
"without scheme, with separator": {
|
|
||||||
getURL: lazyURLParse("//localhost:9000"),
|
|
||||||
expect: "/",
|
|
||||||
},
|
|
||||||
"no port, without scheme, with separator": {
|
|
||||||
getURL: lazyURLParse("//localhost"),
|
|
||||||
expect: "/",
|
|
||||||
},
|
|
||||||
"without scheme, with separator, with path": {
|
|
||||||
getURL: lazyURLParse("//localhost:9000/abc123"),
|
|
||||||
expect: "/abc123",
|
|
||||||
},
|
|
||||||
"no port, without scheme, with separator, with path": {
|
|
||||||
getURL: lazyURLParse("//localhost/abc123"),
|
|
||||||
expect: "/abc123",
|
|
||||||
},
|
|
||||||
"opaque with query string": {
|
|
||||||
getURL: lazyURLParse("localhost:9000/abc123?efg=456"),
|
|
||||||
expect: "/abc123",
|
|
||||||
},
|
|
||||||
"failing test": {
|
|
||||||
getURL: func() (*url.URL, error) {
|
|
||||||
endpoint := "https://service.region.amazonaws.com"
|
|
||||||
req, _ := http.NewRequest("POST", endpoint, nil)
|
|
||||||
u := req.URL
|
|
||||||
|
|
||||||
u.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()"
|
|
||||||
|
|
||||||
query := u.Query()
|
|
||||||
query.Set("some-query-key", "value")
|
|
||||||
u.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
return u, nil
|
|
||||||
},
|
|
||||||
expect: "/bucket/key-._~,!@#$%^&*()",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, c := range cases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
u, err := c.getURL()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to get URL, %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
actual := GetURIPath(u)
|
|
||||||
if e, a := c.expect, actual; e != a {
|
|
||||||
t.Errorf("expect %v path, got %v", e, a)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStripExcessHeaders(t *testing.T) {
|
|
||||||
vals := []string{
|
|
||||||
"",
|
|
||||||
"123",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3 ",
|
|
||||||
" 1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 23",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 ",
|
|
||||||
" 1 2 ",
|
|
||||||
"12 3",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1abc123",
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := []string{
|
|
||||||
"",
|
|
||||||
"123",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 23",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2",
|
|
||||||
"1 2",
|
|
||||||
"12 3",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1abc123",
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(vals); i++ {
|
|
||||||
r := StripExcessSpaces(vals[i])
|
|
||||||
if e, a := expected[i], r; e != a {
|
|
||||||
t.Errorf("%d, expect %v, got %v", i, e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var stripExcessSpaceCases = []string{
|
|
||||||
`AWS4-HMAC-SHA256 Credential=AKIDFAKEIDFAKEID/20160628/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=1234567890abcdef1234567890abcdef1234567890abcdef`,
|
|
||||||
`123 321 123 321`,
|
|
||||||
` 123 321 123 321 `,
|
|
||||||
` 123 321 123 321 `,
|
|
||||||
"123",
|
|
||||||
"1 2 3",
|
|
||||||
" 1 2 3",
|
|
||||||
"1 2 3",
|
|
||||||
"1 23",
|
|
||||||
"1 2 3",
|
|
||||||
"1 2 ",
|
|
||||||
" 1 2 ",
|
|
||||||
"12 3",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1",
|
|
||||||
"12 3 1abc123",
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkStripExcessSpaces(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
for _, v := range stripExcessSpaceCases {
|
|
||||||
StripExcessSpaces(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,89 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/stream.go
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/internal/v4"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EventStreamSigner is an AWS EventStream protocol signer.
|
|
||||||
type EventStreamSigner interface {
|
|
||||||
GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StreamSignerOptions is the configuration options for StreamSigner.
|
|
||||||
type StreamSignerOptions struct{}
|
|
||||||
|
|
||||||
// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads.
|
|
||||||
type StreamSigner struct {
|
|
||||||
options StreamSignerOptions
|
|
||||||
|
|
||||||
credentials aws.Credentials
|
|
||||||
service string
|
|
||||||
region string
|
|
||||||
|
|
||||||
prevSignature []byte
|
|
||||||
|
|
||||||
signingKeyDeriver *v4Internal.SigningKeyDeriver
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewStreamSigner returns a new AWS EventStream protocol signer.
|
|
||||||
func NewStreamSigner(credentials aws.Credentials, service, region string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner {
|
|
||||||
o := StreamSignerOptions{}
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&o)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &StreamSigner{
|
|
||||||
options: o,
|
|
||||||
credentials: credentials,
|
|
||||||
service: service,
|
|
||||||
region: region,
|
|
||||||
signingKeyDeriver: v4Internal.NewSigningKeyDeriver(),
|
|
||||||
prevSignature: seedSignature,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSignature signs the provided header and payload bytes.
|
|
||||||
func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) {
|
|
||||||
options := s.options
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
prevSignature := s.prevSignature
|
|
||||||
|
|
||||||
st := v4Internal.NewSigningTime(signingTime)
|
|
||||||
|
|
||||||
sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st)
|
|
||||||
|
|
||||||
scope := v4Internal.BuildCredentialScope(st, s.region, s.service)
|
|
||||||
|
|
||||||
stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st)
|
|
||||||
|
|
||||||
signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign))
|
|
||||||
s.prevSignature = signature
|
|
||||||
|
|
||||||
return signature, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
|
|
||||||
hash := sha256.New()
|
|
||||||
return strings.Join([]string{
|
|
||||||
"AWS4-HMAC-SHA256-PAYLOAD",
|
|
||||||
signingTime.TimeFormat(),
|
|
||||||
credentialScope,
|
|
||||||
hex.EncodeToString(previousSignature),
|
|
||||||
hex.EncodeToString(makeHash(hash, headers)),
|
|
||||||
hex.EncodeToString(makeHash(hash, payload)),
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
|
@ -1,582 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/v4.go
|
|
||||||
// with changes:
|
|
||||||
// * using different headers for sign/presign
|
|
||||||
// * don't duplicate content-length as signed header
|
|
||||||
// * use copy of smithy-go encoding/httpbinding package
|
|
||||||
// * use zap.Logger instead of smithy-go/logging
|
|
||||||
|
|
||||||
// Package v4 implements the AWS signature version 4 algorithm (commonly known
|
|
||||||
// as SigV4).
|
|
||||||
//
|
|
||||||
// For more information about SigV4, see [Signing AWS API requests] in the IAM
|
|
||||||
// user guide.
|
|
||||||
//
|
|
||||||
// While this implementation CAN work in an external context, it is developed
|
|
||||||
// primarily for SDK use and you may encounter fringe behaviors around header
|
|
||||||
// canonicalization.
|
|
||||||
//
|
|
||||||
// # Pre-escaping a request URI
|
|
||||||
//
|
|
||||||
// AWS v4 signature validation requires that the canonical string's URI path
|
|
||||||
// component must be the escaped form of the HTTP request's path.
|
|
||||||
//
|
|
||||||
// The Go HTTP client will perform escaping automatically on the HTTP request.
|
|
||||||
// This may cause signature validation errors because the request differs from
|
|
||||||
// the URI path or query from which the signature was generated.
|
|
||||||
//
|
|
||||||
// Because of this, we recommend that you explicitly escape the request when
|
|
||||||
// using this signer outside of the SDK to prevent possible signature mismatch.
|
|
||||||
// This can be done by setting URL.Opaque on the request. The signer will
|
|
||||||
// prefer that value, falling back to the return of URL.EscapedPath if unset.
|
|
||||||
//
|
|
||||||
// When setting URL.Opaque you must do so in the form of:
|
|
||||||
//
|
|
||||||
// "//<hostname>/<path>"
|
|
||||||
//
|
|
||||||
// // e.g.
|
|
||||||
// "//example.com/some/path"
|
|
||||||
//
|
|
||||||
// The leading "//" and hostname are required or the escaping will not work
|
|
||||||
// correctly.
|
|
||||||
//
|
|
||||||
// The TestStandaloneSign unit test provides a complete example of using the
|
|
||||||
// signer outside of the SDK and pre-escaping the URI path.
|
|
||||||
//
|
|
||||||
// [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"net/http"
|
|
||||||
"net/textproto"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/smithy/encoding/httpbinding"
|
|
||||||
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/internal/v4"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
signingAlgorithm = "AWS4-HMAC-SHA256"
|
|
||||||
authorizationHeader = "Authorization"
|
|
||||||
|
|
||||||
// Version of signing v4
|
|
||||||
Version = "SigV4"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests
|
|
||||||
type HTTPSigner interface {
|
|
||||||
SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*SignerOptions)) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type keyDerivator interface {
|
|
||||||
DeriveKey(credential aws.Credentials, service, region string, signingTime v4Internal.SigningTime) []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignerOptions is the SigV4 Signer options.
|
|
||||||
type SignerOptions struct {
|
|
||||||
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
|
|
||||||
// request header to the request's query string. This is most commonly used
|
|
||||||
// with pre-signed requests preventing headers from being added to the
|
|
||||||
// request's query string.
|
|
||||||
DisableHeaderHoisting bool
|
|
||||||
|
|
||||||
// Disables the automatic escaping of the URI path of the request for the
|
|
||||||
// siganture's canonical string's path. For services that do not need additional
|
|
||||||
// escaping then use this to disable the signer escaping the path.
|
|
||||||
//
|
|
||||||
// S3 is an example of a service that does not need additional escaping.
|
|
||||||
//
|
|
||||||
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
|
||||||
DisableURIPathEscaping bool
|
|
||||||
|
|
||||||
// The logger to send log messages to.
|
|
||||||
Logger *zap.Logger
|
|
||||||
|
|
||||||
// Enable logging of signed requests.
|
|
||||||
// This will enable logging of the canonical request, the string to sign, and for presigning the subsequent
|
|
||||||
// presigned URL.
|
|
||||||
LogSigning bool
|
|
||||||
|
|
||||||
// Disables setting the session token on the request as part of signing
|
|
||||||
// through X-Amz-Security-Token. This is needed for variations of v4 that
|
|
||||||
// present the token elsewhere.
|
|
||||||
DisableSessionToken bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Signer applies AWS v4 signing to given request. Use this to sign requests
|
|
||||||
// that need to be signed with AWS V4 Signatures.
|
|
||||||
type Signer struct {
|
|
||||||
options SignerOptions
|
|
||||||
keyDerivator keyDerivator
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSigner returns a new SigV4 Signer
|
|
||||||
func NewSigner(optFns ...func(signer *SignerOptions)) *Signer {
|
|
||||||
options := SignerOptions{}
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Signer{options: options, keyDerivator: v4Internal.NewSigningKeyDeriver()}
|
|
||||||
}
|
|
||||||
|
|
||||||
type httpSigner struct {
|
|
||||||
Request *http.Request
|
|
||||||
ServiceName string
|
|
||||||
Region string
|
|
||||||
Time v4Internal.SigningTime
|
|
||||||
Credentials aws.Credentials
|
|
||||||
KeyDerivator keyDerivator
|
|
||||||
IsPreSign bool
|
|
||||||
|
|
||||||
PayloadHash string
|
|
||||||
|
|
||||||
DisableHeaderHoisting bool
|
|
||||||
DisableURIPathEscaping bool
|
|
||||||
DisableSessionToken bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) Build() (signedRequest, error) {
|
|
||||||
req := s.Request
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
headers := req.Header
|
|
||||||
|
|
||||||
s.setRequiredSigningFields(headers, query)
|
|
||||||
|
|
||||||
// Sort Each Query Key's Values
|
|
||||||
for key := range query {
|
|
||||||
sort.Strings(query[key])
|
|
||||||
}
|
|
||||||
|
|
||||||
v4Internal.SanitizeHostForHeader(req)
|
|
||||||
|
|
||||||
credentialScope := s.buildCredentialScope()
|
|
||||||
credentialStr := s.Credentials.AccessKeyID + "/" + credentialScope
|
|
||||||
if s.IsPreSign {
|
|
||||||
query.Set(v4Internal.AmzCredentialKey, credentialStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
unsignedHeaders := headers
|
|
||||||
if s.IsPreSign && !s.DisableHeaderHoisting {
|
|
||||||
var urlValues url.Values
|
|
||||||
urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, headers)
|
|
||||||
for k := range urlValues {
|
|
||||||
query[k] = urlValues[k]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
host := req.URL.Host
|
|
||||||
if len(req.Host) > 0 {
|
|
||||||
host = req.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
signedHeaders http.Header
|
|
||||||
signedHeadersStr string
|
|
||||||
canonicalHeaderStr string
|
|
||||||
)
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredPresignedHeaders, unsignedHeaders, s.Request.ContentLength)
|
|
||||||
} else {
|
|
||||||
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength)
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
query.Set(v4Internal.AmzSignedHeadersKey, signedHeadersStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
var rawQuery strings.Builder
|
|
||||||
rawQuery.WriteString(strings.Replace(query.Encode(), "+", "%20", -1))
|
|
||||||
|
|
||||||
canonicalURI := v4Internal.GetURIPath(req.URL)
|
|
||||||
if !s.DisableURIPathEscaping {
|
|
||||||
canonicalURI = httpbinding.EscapePath(canonicalURI, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
canonicalString := s.buildCanonicalString(
|
|
||||||
req.Method,
|
|
||||||
canonicalURI,
|
|
||||||
rawQuery.String(),
|
|
||||||
signedHeadersStr,
|
|
||||||
canonicalHeaderStr,
|
|
||||||
)
|
|
||||||
|
|
||||||
strToSign := s.buildStringToSign(credentialScope, canonicalString)
|
|
||||||
signingSignature, err := s.buildSignature(strToSign)
|
|
||||||
if err != nil {
|
|
||||||
return signedRequest{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
rawQuery.WriteString("&X-Amz-Signature=")
|
|
||||||
rawQuery.WriteString(signingSignature)
|
|
||||||
} else {
|
|
||||||
headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature))
|
|
||||||
}
|
|
||||||
|
|
||||||
req.URL.RawQuery = rawQuery.String()
|
|
||||||
|
|
||||||
return signedRequest{
|
|
||||||
Request: req,
|
|
||||||
SignedHeaders: signedHeaders,
|
|
||||||
CanonicalString: canonicalString,
|
|
||||||
StringToSign: strToSign,
|
|
||||||
PreSigned: s.IsPreSign,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string {
|
|
||||||
const credential = "Credential="
|
|
||||||
const signedHeaders = "SignedHeaders="
|
|
||||||
const signature = "Signature="
|
|
||||||
const commaSpace = ", "
|
|
||||||
|
|
||||||
var parts strings.Builder
|
|
||||||
parts.Grow(len(signingAlgorithm) + 1 +
|
|
||||||
len(credential) + len(credentialStr) + 2 +
|
|
||||||
len(signedHeaders) + len(signedHeadersStr) + 2 +
|
|
||||||
len(signature) + len(signingSignature),
|
|
||||||
)
|
|
||||||
parts.WriteString(signingAlgorithm)
|
|
||||||
parts.WriteRune(' ')
|
|
||||||
parts.WriteString(credential)
|
|
||||||
parts.WriteString(credentialStr)
|
|
||||||
parts.WriteString(commaSpace)
|
|
||||||
parts.WriteString(signedHeaders)
|
|
||||||
parts.WriteString(signedHeadersStr)
|
|
||||||
parts.WriteString(commaSpace)
|
|
||||||
parts.WriteString(signature)
|
|
||||||
parts.WriteString(signingSignature)
|
|
||||||
return parts.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignHTTP signs AWS v4 requests with the provided payload hash, service name, region the
|
|
||||||
// request is made to, and time the request is signed at. The signTime allows
|
|
||||||
// you to specify that a request is signed for the future, and cannot be
|
|
||||||
// used until then.
|
|
||||||
//
|
|
||||||
// The payloadHash is the hex encoded SHA-256 hash of the request payload, and
|
|
||||||
// must be provided. Even if the request has no payload (aka body). If the
|
|
||||||
// request has no payload you should use the hex encoded SHA-256 of an empty
|
|
||||||
// string as the payloadHash value.
|
|
||||||
//
|
|
||||||
// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
|
||||||
//
|
|
||||||
// Some services such as Amazon S3 accept alternative values for the payload
|
|
||||||
// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
|
|
||||||
// included in the request signature.
|
|
||||||
//
|
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
|
|
||||||
//
|
|
||||||
// Sign differs from Presign in that it will sign the request using HTTP
|
|
||||||
// header values. This type of signing is intended for http.Request values that
|
|
||||||
// will not be shared, or are shared in a way the header values on the request
|
|
||||||
// will not be lost.
|
|
||||||
//
|
|
||||||
// The passed in request will be modified in place.
|
|
||||||
func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(options *SignerOptions)) error {
|
|
||||||
options := s.options
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
signer := &httpSigner{
|
|
||||||
Request: r,
|
|
||||||
PayloadHash: payloadHash,
|
|
||||||
ServiceName: service,
|
|
||||||
Region: region,
|
|
||||||
Credentials: credentials,
|
|
||||||
Time: v4Internal.NewSigningTime(signingTime.UTC()),
|
|
||||||
DisableHeaderHoisting: options.DisableHeaderHoisting,
|
|
||||||
DisableURIPathEscaping: options.DisableURIPathEscaping,
|
|
||||||
DisableSessionToken: options.DisableSessionToken,
|
|
||||||
KeyDerivator: s.keyDerivator,
|
|
||||||
}
|
|
||||||
|
|
||||||
signedRequest, err := signer.Build()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
logSigningInfo(ctx, options, &signedRequest, false)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PresignHTTP signs AWS v4 requests with the payload hash, service name, region
|
|
||||||
// the request is made to, and time the request is signed at. The signTime
|
|
||||||
// allows you to specify that a request is signed for the future, and cannot
|
|
||||||
// be used until then.
|
|
||||||
//
|
|
||||||
// Returns the signed URL and the map of HTTP headers that were included in the
|
|
||||||
// signature or an error if signing the request failed. For presigned requests
|
|
||||||
// these headers and their values must be included on the HTTP request when it
|
|
||||||
// is made. This is helpful to know what header values need to be shared with
|
|
||||||
// the party the presigned request will be distributed to.
|
|
||||||
//
|
|
||||||
// The payloadHash is the hex encoded SHA-256 hash of the request payload, and
|
|
||||||
// must be provided. Even if the request has no payload (aka body). If the
|
|
||||||
// request has no payload you should use the hex encoded SHA-256 of an empty
|
|
||||||
// string as the payloadHash value.
|
|
||||||
//
|
|
||||||
// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
|
||||||
//
|
|
||||||
// Some services such as Amazon S3 accept alternative values for the payload
|
|
||||||
// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
|
|
||||||
// included in the request signature.
|
|
||||||
//
|
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
|
|
||||||
//
|
|
||||||
// PresignHTTP differs from SignHTTP in that it will sign the request using
|
|
||||||
// query string instead of header values. This allows you to share the
|
|
||||||
// Presigned Request's URL with third parties, or distribute it throughout your
|
|
||||||
// system with minimal dependencies.
|
|
||||||
//
|
|
||||||
// PresignHTTP will not set the expires time of the presigned request
|
|
||||||
// automatically. To specify the expire duration for a request add the
|
|
||||||
// "X-Amz-Expires" query parameter on the request with the value as the
|
|
||||||
// duration in seconds the presigned URL should be considered valid for. This
|
|
||||||
// parameter is not used by all AWS services, and is most notable used by
|
|
||||||
// Amazon S3 APIs.
|
|
||||||
//
|
|
||||||
// expires := 20 * time.Minute
|
|
||||||
// query := req.URL.Query()
|
|
||||||
// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10))
|
|
||||||
// req.URL.RawQuery = query.Encode()
|
|
||||||
//
|
|
||||||
// This method does not modify the provided request.
|
|
||||||
func (s *Signer) PresignHTTP(
|
|
||||||
ctx context.Context, credentials aws.Credentials, r *http.Request,
|
|
||||||
payloadHash string, service string, region string, signingTime time.Time,
|
|
||||||
optFns ...func(*SignerOptions),
|
|
||||||
) (signedURI string, signedHeaders http.Header, err error) {
|
|
||||||
options := s.options
|
|
||||||
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(&options)
|
|
||||||
}
|
|
||||||
|
|
||||||
signer := &httpSigner{
|
|
||||||
Request: r.Clone(r.Context()),
|
|
||||||
PayloadHash: payloadHash,
|
|
||||||
ServiceName: service,
|
|
||||||
Region: region,
|
|
||||||
Credentials: credentials,
|
|
||||||
Time: v4Internal.NewSigningTime(signingTime.UTC()),
|
|
||||||
IsPreSign: true,
|
|
||||||
DisableHeaderHoisting: options.DisableHeaderHoisting,
|
|
||||||
DisableURIPathEscaping: options.DisableURIPathEscaping,
|
|
||||||
DisableSessionToken: options.DisableSessionToken,
|
|
||||||
KeyDerivator: s.keyDerivator,
|
|
||||||
}
|
|
||||||
|
|
||||||
signedRequest, err := signer.Build()
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
logSigningInfo(ctx, options, &signedRequest, true)
|
|
||||||
|
|
||||||
signedHeaders = make(http.Header)
|
|
||||||
|
|
||||||
// For the signed headers we canonicalize the header keys in the returned map.
|
|
||||||
// This avoids situations where can standard library double headers like host header. For example the standard
|
|
||||||
// library will set the Host header, even if it is present in lower-case form.
|
|
||||||
for k, v := range signedRequest.SignedHeaders {
|
|
||||||
key := textproto.CanonicalMIMEHeaderKey(k)
|
|
||||||
signedHeaders[key] = append(signedHeaders[key], v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return signedRequest.Request.URL.String(), signedHeaders, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildCredentialScope() string {
|
|
||||||
return v4Internal.BuildCredentialScope(s.Time, s.Region, s.ServiceName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) {
|
|
||||||
query := url.Values{}
|
|
||||||
unsignedHeaders := http.Header{}
|
|
||||||
|
|
||||||
// A list of headers to be converted to lower case to mitigate a limitation from S3
|
|
||||||
lowerCaseHeaders := map[string]string{
|
|
||||||
"X-Amz-Expected-Bucket-Owner": "x-amz-expected-bucket-owner", // see #2508
|
|
||||||
"X-Amz-Request-Payer": "x-amz-request-payer", // see #2764
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, h := range header {
|
|
||||||
if newKey, ok := lowerCaseHeaders[k]; ok {
|
|
||||||
k = newKey
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.IsValid(k) {
|
|
||||||
query[k] = h
|
|
||||||
} else {
|
|
||||||
unsignedHeaders[k] = h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return query, unsignedHeaders
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) {
|
|
||||||
signed = make(http.Header)
|
|
||||||
|
|
||||||
var headers []string
|
|
||||||
const hostHeader = "host"
|
|
||||||
headers = append(headers, hostHeader)
|
|
||||||
signed[hostHeader] = append(signed[hostHeader], host)
|
|
||||||
|
|
||||||
//const contentLengthHeader = "content-length"
|
|
||||||
//if length > 0 {
|
|
||||||
// headers = append(headers, contentLengthHeader)
|
|
||||||
// signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10))
|
|
||||||
//}
|
|
||||||
|
|
||||||
for k, v := range header {
|
|
||||||
if !rule.IsValid(k) {
|
|
||||||
continue // ignored header
|
|
||||||
}
|
|
||||||
//if strings.EqualFold(k, contentLengthHeader) {
|
|
||||||
// // prevent signing already handled content-length header.
|
|
||||||
// continue
|
|
||||||
//}
|
|
||||||
|
|
||||||
lowerCaseKey := strings.ToLower(k)
|
|
||||||
if _, ok := signed[lowerCaseKey]; ok {
|
|
||||||
// include additional values
|
|
||||||
signed[lowerCaseKey] = append(signed[lowerCaseKey], v...)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
headers = append(headers, lowerCaseKey)
|
|
||||||
signed[lowerCaseKey] = v
|
|
||||||
}
|
|
||||||
sort.Strings(headers)
|
|
||||||
|
|
||||||
signedHeaders = strings.Join(headers, ";")
|
|
||||||
|
|
||||||
var canonicalHeaders strings.Builder
|
|
||||||
n := len(headers)
|
|
||||||
const colon = ':'
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
if headers[i] == hostHeader {
|
|
||||||
canonicalHeaders.WriteString(hostHeader)
|
|
||||||
canonicalHeaders.WriteRune(colon)
|
|
||||||
canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host))
|
|
||||||
} else {
|
|
||||||
canonicalHeaders.WriteString(headers[i])
|
|
||||||
canonicalHeaders.WriteRune(colon)
|
|
||||||
// Trim out leading, trailing, and dedup inner spaces from signed header values.
|
|
||||||
values := signed[headers[i]]
|
|
||||||
for j, v := range values {
|
|
||||||
cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v))
|
|
||||||
canonicalHeaders.WriteString(cleanedValue)
|
|
||||||
if j < len(values)-1 {
|
|
||||||
canonicalHeaders.WriteRune(',')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
canonicalHeaders.WriteRune('\n')
|
|
||||||
}
|
|
||||||
canonicalHeadersStr = canonicalHeaders.String()
|
|
||||||
|
|
||||||
return signed, signedHeaders, canonicalHeadersStr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
method,
|
|
||||||
uri,
|
|
||||||
query,
|
|
||||||
canonicalHeaders,
|
|
||||||
signedHeaders,
|
|
||||||
s.PayloadHash,
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
signingAlgorithm,
|
|
||||||
s.Time.TimeFormat(),
|
|
||||||
credentialScope,
|
|
||||||
hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))),
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeHash(hash hash.Hash, b []byte) []byte {
|
|
||||||
hash.Reset()
|
|
||||||
hash.Write(b)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) buildSignature(strToSign string) (string, error) {
|
|
||||||
key := s.KeyDerivator.DeriveKey(s.Credentials, s.ServiceName, s.Region, s.Time)
|
|
||||||
return hex.EncodeToString(v4Internal.HMACSHA256(key, []byte(strToSign))), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) {
|
|
||||||
amzDate := s.Time.TimeFormat()
|
|
||||||
|
|
||||||
if s.IsPreSign {
|
|
||||||
query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm)
|
|
||||||
sessionToken := s.Credentials.SessionToken
|
|
||||||
if !s.DisableSessionToken && len(sessionToken) > 0 {
|
|
||||||
query.Set("X-Amz-Security-Token", sessionToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
query.Set(v4Internal.AmzDateKey, amzDate)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate)
|
|
||||||
|
|
||||||
if !s.DisableSessionToken && len(s.Credentials.SessionToken) > 0 {
|
|
||||||
headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func logSigningInfo(_ context.Context, options SignerOptions, request *signedRequest, isPresign bool) {
|
|
||||||
if !options.LogSigning {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
signedURLMsg := ""
|
|
||||||
if isPresign {
|
|
||||||
signedURLMsg = fmt.Sprintf(logSignedURLMsg, request.Request.URL.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.Logger != nil {
|
|
||||||
options.Logger.Debug(fmt.Sprintf(logSignInfoMsg, request.CanonicalString, request.StringToSign, signedURLMsg))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type signedRequest struct {
|
|
||||||
Request *http.Request
|
|
||||||
SignedHeaders http.Header
|
|
||||||
CanonicalString string
|
|
||||||
StringToSign string
|
|
||||||
PreSigned bool
|
|
||||||
}
|
|
||||||
|
|
||||||
const logSignInfoMsg = `Request Signature:
|
|
||||||
---[ CANONICAL STRING ]-----------------------------
|
|
||||||
%s
|
|
||||||
---[ STRING TO SIGN ]--------------------------------
|
|
||||||
%s%s
|
|
||||||
-----------------------------------------------------`
|
|
||||||
const logSignedURLMsg = `
|
|
||||||
---[ SIGNED URL ]------------------------------------
|
|
||||||
%s`
|
|
|
@ -1,370 +0,0 @@
|
||||||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/v4_test.go
|
|
||||||
// with changes:
|
|
||||||
// * don't duplicate content-length as signed header
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/internal/v4"
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
|
||||||
)
|
|
||||||
|
|
||||||
var testCredentials = aws.Credentials{AccessKeyID: "AKID", SecretAccessKey: "SECRET", SessionToken: "SESSION"}
|
|
||||||
|
|
||||||
func buildRequest(serviceName, region, body string) (*http.Request, string) {
|
|
||||||
reader := strings.NewReader(body)
|
|
||||||
return buildRequestWithBodyReader(serviceName, region, reader)
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildRequestWithBodyReader(serviceName, region string, body io.Reader) (*http.Request, string) {
|
|
||||||
var bodyLen int
|
|
||||||
|
|
||||||
type lenner interface {
|
|
||||||
Len() int
|
|
||||||
}
|
|
||||||
if lr, ok := body.(lenner); ok {
|
|
||||||
bodyLen = lr.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
|
|
||||||
req, _ := http.NewRequest("POST", endpoint, body)
|
|
||||||
req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()"
|
|
||||||
req.Header.Set("X-Amz-Target", "prefix.Operation")
|
|
||||||
req.Header.Set("Content-Type", "application/x-amz-json-1.0")
|
|
||||||
|
|
||||||
if bodyLen > 0 {
|
|
||||||
req.ContentLength = int64(bodyLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
|
|
||||||
req.Header.Add("X-Amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
|
|
||||||
req.Header.Add("X-amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
|
|
||||||
|
|
||||||
h := sha256.New()
|
|
||||||
_, _ = io.Copy(h, body)
|
|
||||||
payloadHash := hex.EncodeToString(h.Sum(nil))
|
|
||||||
|
|
||||||
return req, payloadHash
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPresignRequest(t *testing.T) {
|
|
||||||
req, body := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
req.Header.Set("Content-Length", "2")
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
query.Set("X-Amz-Expires", "300")
|
|
||||||
req.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
signer := NewSigner()
|
|
||||||
signed, headers, err := signer.PresignHTTP(context.Background(), testCredentials, req, body, "dynamodb", "us-east-1", time.Unix(0, 0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedDate := "19700101T000000Z"
|
|
||||||
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
|
|
||||||
expectedSig := "122f0b9e091e4ba84286097e2b3404a1f1f4c4aad479adda95b7dff0ccbe5581"
|
|
||||||
expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request"
|
|
||||||
expectedTarget := "prefix.Operation"
|
|
||||||
|
|
||||||
q, err := url.ParseQuery(signed[strings.Index(signed, "?"):])
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if e, a := expectedSig, q.Get("X-Amz-Signature"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedCred, q.Get("X-Amz-Credential"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
|
|
||||||
t.Errorf("expect %v to be empty", a)
|
|
||||||
}
|
|
||||||
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, h := range strings.Split(expectedHeaders, ";") {
|
|
||||||
v := headers.Get(h)
|
|
||||||
if len(v) == 0 {
|
|
||||||
t.Errorf("expect %v, to be present in header map", h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPresignBodyWithArrayRequest(t *testing.T) {
|
|
||||||
req, body := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
|
|
||||||
req.Header.Set("Content-Length", "2")
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
query.Set("X-Amz-Expires", "300")
|
|
||||||
req.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
signer := NewSigner()
|
|
||||||
signed, headers, err := signer.PresignHTTP(context.Background(), testCredentials, req, body, "dynamodb", "us-east-1", time.Unix(0, 0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
q, err := url.ParseQuery(signed[strings.Index(signed, "?"):])
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedDate := "19700101T000000Z"
|
|
||||||
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
|
|
||||||
expectedSig := "e3ac55addee8711b76c6d608d762cff285fe8b627a057f8b5ec9268cf82c08b1"
|
|
||||||
expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request"
|
|
||||||
expectedTarget := "prefix.Operation"
|
|
||||||
|
|
||||||
if e, a := expectedSig, q.Get("X-Amz-Signature"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedCred, q.Get("X-Amz-Credential"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
|
|
||||||
t.Errorf("expect %v to be empty, was not", a)
|
|
||||||
}
|
|
||||||
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, h := range strings.Split(expectedHeaders, ";") {
|
|
||||||
v := headers.Get(h)
|
|
||||||
if len(v) == 0 {
|
|
||||||
t.Errorf("expect %v, to be present in header map", h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignRequest(t *testing.T) {
|
|
||||||
req, body := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
req.Header.Set("Content-Length", "2")
|
|
||||||
signer := NewSigner()
|
|
||||||
err := signer.SignHTTP(context.Background(), testCredentials, req, body, "dynamodb", "us-east-1", time.Unix(0, 0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedDate := "19700101T000000Z"
|
|
||||||
expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-security-token;x-amz-target, Signature=a518299330494908a70222cec6899f6f32f297f8595f6df1776d998936652ad9"
|
|
||||||
|
|
||||||
q := req.Header
|
|
||||||
if e, a := expectedSig, q.Get("Authorization"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuildCanonicalRequest(t *testing.T) {
|
|
||||||
req, _ := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
|
|
||||||
|
|
||||||
ctx := &httpSigner{
|
|
||||||
ServiceName: "dynamodb",
|
|
||||||
Region: "us-east-1",
|
|
||||||
Request: req,
|
|
||||||
Time: v4Internal.NewSigningTime(time.Now()),
|
|
||||||
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
|
|
||||||
}
|
|
||||||
|
|
||||||
build, err := ctx.Build()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := "https://example.org/bucket/key-._~,!@#$%^&*()?Foo=a&Foo=m&Foo=o&Foo=z"
|
|
||||||
if e, a := expected, build.Request.URL.String(); e != a {
|
|
||||||
t.Errorf("expect %v, got %v", e, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSigner_SignHTTP_NoReplaceRequestBody(t *testing.T) {
|
|
||||||
req, bodyHash := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
req.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
|
|
||||||
|
|
||||||
s := NewSigner()
|
|
||||||
|
|
||||||
origBody := req.Body
|
|
||||||
|
|
||||||
err := s.SignHTTP(context.Background(), testCredentials, req, bodyHash, "dynamodb", "us-east-1", time.Now())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expect no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.Body != origBody {
|
|
||||||
t.Errorf("expect request body to not be chagned")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRequestHost(t *testing.T) {
|
|
||||||
req, _ := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
|
|
||||||
req.Host = "myhost"
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
query.Set("X-Amz-Expires", "5")
|
|
||||||
req.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
ctx := &httpSigner{
|
|
||||||
ServiceName: "dynamodb",
|
|
||||||
Region: "us-east-1",
|
|
||||||
Request: req,
|
|
||||||
Time: v4Internal.NewSigningTime(time.Now()),
|
|
||||||
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
|
|
||||||
}
|
|
||||||
|
|
||||||
build, err := ctx.Build()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.Contains(build.CanonicalString, "host:"+req.Host) {
|
|
||||||
t.Errorf("canonical host header invalid")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSign_buildCanonicalHeadersContentLengthPresent(t *testing.T) {
|
|
||||||
body := `{"description": "this is a test"}`
|
|
||||||
req, _ := buildRequest("dynamodb", "us-east-1", body)
|
|
||||||
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
|
|
||||||
req.Host = "myhost"
|
|
||||||
|
|
||||||
contentLength := fmt.Sprintf("%d", len([]byte(body)))
|
|
||||||
req.Header.Add("Content-Length", contentLength)
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
query.Set("X-Amz-Expires", "5")
|
|
||||||
req.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
ctx := &httpSigner{
|
|
||||||
ServiceName: "dynamodb",
|
|
||||||
Region: "us-east-1",
|
|
||||||
Request: req,
|
|
||||||
Time: v4Internal.NewSigningTime(time.Now()),
|
|
||||||
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
|
|
||||||
}
|
|
||||||
|
|
||||||
build, err := ctx.Build()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.Contains(build.CanonicalString, "content-length:"+contentLength+"\n") {
|
|
||||||
t.Errorf("canonical header content-length invalid")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSign_buildCanonicalHeaders(t *testing.T) {
|
|
||||||
serviceName := "mockAPI"
|
|
||||||
region := "mock-region"
|
|
||||||
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
|
|
||||||
|
|
||||||
req, err := http.NewRequest("POST", endpoint, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create request, %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("FooInnerSpace", " inner space ")
|
|
||||||
req.Header.Set("FooLeadingSpace", " leading-space")
|
|
||||||
req.Header.Add("FooMultipleSpace", "no-space")
|
|
||||||
req.Header.Add("FooMultipleSpace", "\ttab-space")
|
|
||||||
req.Header.Add("FooMultipleSpace", "trailing-space ")
|
|
||||||
req.Header.Set("FooNoSpace", "no-space")
|
|
||||||
req.Header.Set("FooTabSpace", "\ttab-space\t")
|
|
||||||
req.Header.Set("FooTrailingSpace", "trailing-space ")
|
|
||||||
req.Header.Set("FooWrappedSpace", " wrapped-space ")
|
|
||||||
|
|
||||||
ctx := &httpSigner{
|
|
||||||
ServiceName: serviceName,
|
|
||||||
Region: region,
|
|
||||||
Request: req,
|
|
||||||
Time: v4Internal.NewSigningTime(time.Date(2021, 10, 20, 12, 42, 0, 0, time.UTC)),
|
|
||||||
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
|
|
||||||
}
|
|
||||||
|
|
||||||
build, err := ctx.Build()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectCanonicalString := strings.Join([]string{
|
|
||||||
`POST`,
|
|
||||||
`/`,
|
|
||||||
``,
|
|
||||||
`fooinnerspace:inner space`,
|
|
||||||
`fooleadingspace:leading-space`,
|
|
||||||
`foomultiplespace:no-space,tab-space,trailing-space`,
|
|
||||||
`foonospace:no-space`,
|
|
||||||
`footabspace:tab-space`,
|
|
||||||
`footrailingspace:trailing-space`,
|
|
||||||
`foowrappedspace:wrapped-space`,
|
|
||||||
`host:mockAPI.mock-region.amazonaws.com`,
|
|
||||||
`x-amz-date:20211020T124200Z`,
|
|
||||||
``,
|
|
||||||
`fooinnerspace;fooleadingspace;foomultiplespace;foonospace;footabspace;footrailingspace;foowrappedspace;host;x-amz-date`,
|
|
||||||
``,
|
|
||||||
}, "\n")
|
|
||||||
if diff := cmpDiff(expectCanonicalString, build.CanonicalString); diff != "" {
|
|
||||||
t.Errorf("expect match, got\n%s", diff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkPresignRequest(b *testing.B) {
|
|
||||||
signer := NewSigner()
|
|
||||||
req, bodyHash := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
|
|
||||||
query := req.URL.Query()
|
|
||||||
query.Set("X-Amz-Expires", "5")
|
|
||||||
req.URL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
signer.PresignHTTP(context.Background(), testCredentials, req, bodyHash, "dynamodb", "us-east-1", time.Now())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSignRequest(b *testing.B) {
|
|
||||||
signer := NewSigner()
|
|
||||||
req, bodyHash := buildRequest("dynamodb", "us-east-1", "{}")
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
signer.SignHTTP(context.Background(), testCredentials, req, bodyHash, "dynamodb", "us-east-1", time.Now())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func cmpDiff(e, a interface{}) string {
|
|
||||||
if !reflect.DeepEqual(e, a) {
|
|
||||||
return fmt.Sprintf("%v != %v", e, a)
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
3
api/cache/access_control.go
vendored
3
api/cache/access_control.go
vendored
|
@ -4,7 +4,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
"github.com/bluele/gcache"
|
"github.com/bluele/gcache"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -47,7 +46,7 @@ func (o *AccessControlCache) Get(owner user.ID, key string) bool {
|
||||||
|
|
||||||
result, ok := entry.(bool)
|
result, ok := entry.(bool)
|
||||||
if !ok {
|
if !ok {
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
32
api/cache/accessbox.go
vendored
32
api/cache/accessbox.go
vendored
|
@ -5,8 +5,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"github.com/bluele/gcache"
|
"github.com/bluele/gcache"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -25,13 +23,6 @@ type (
|
||||||
Lifetime time.Duration
|
Lifetime time.Duration
|
||||||
Logger *zap.Logger
|
Logger *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessBoxCacheValue struct {
|
|
||||||
Box *accessbox.Box
|
|
||||||
Attributes []object.Attribute
|
|
||||||
PutTime time.Time
|
|
||||||
Address *oid.Address
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -50,23 +41,23 @@ func DefaultAccessBoxConfig(logger *zap.Logger) *Config {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAccessBoxCache creates an object of AccessBoxCache.
|
// NewAccessBoxCache creates an object of BucketCache.
|
||||||
func NewAccessBoxCache(config *Config) *AccessBoxCache {
|
func NewAccessBoxCache(config *Config) *AccessBoxCache {
|
||||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
||||||
|
|
||||||
return &AccessBoxCache{cache: gc, logger: config.Logger}
|
return &AccessBoxCache{cache: gc, logger: config.Logger}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a cached accessbox.
|
// Get returns a cached object.
|
||||||
func (o *AccessBoxCache) Get(accessKeyID string) *AccessBoxCacheValue {
|
func (o *AccessBoxCache) Get(address oid.Address) *accessbox.Box {
|
||||||
entry, err := o.cache.Get(accessKeyID)
|
entry, err := o.cache.Get(address)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
result, ok := entry.(*AccessBoxCacheValue)
|
result, ok := entry.(*accessbox.Box)
|
||||||
if !ok {
|
if !ok {
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -74,12 +65,7 @@ func (o *AccessBoxCache) Get(accessKeyID string) *AccessBoxCacheValue {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put stores an accessbox to cache.
|
// Put stores an object to cache.
|
||||||
func (o *AccessBoxCache) Put(accessKeyID string, val *AccessBoxCacheValue) error {
|
func (o *AccessBoxCache) Put(address oid.Address, box *accessbox.Box) error {
|
||||||
return o.cache.Set(accessKeyID, val)
|
return o.cache.Set(address, box)
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes an accessbox from cache.
|
|
||||||
func (o *AccessBoxCache) Delete(accessKeyID string) {
|
|
||||||
o.cache.Remove(accessKeyID)
|
|
||||||
}
|
}
|
||||||
|
|
68
api/cache/buckets.go
vendored
68
api/cache/buckets.go
vendored
|
@ -5,17 +5,14 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"github.com/bluele/gcache"
|
"github.com/bluele/gcache"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BucketCache contains cache with objects and the lifetime of cache entries.
|
// BucketCache contains cache with objects and the lifetime of cache entries.
|
||||||
type BucketCache struct {
|
type BucketCache struct {
|
||||||
cache gcache.Cache
|
cache gcache.Cache
|
||||||
cidCache gcache.Cache
|
logger *zap.Logger
|
||||||
logger *zap.Logger
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -35,44 +32,13 @@ func DefaultBucketConfig(logger *zap.Logger) *Config {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBucketCache creates an object of BucketCache.
|
// NewBucketCache creates an object of BucketCache.
|
||||||
func NewBucketCache(config *Config, cidCache bool) *BucketCache {
|
func NewBucketCache(config *Config) *BucketCache {
|
||||||
cache := &BucketCache{
|
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
||||||
cache: gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build(),
|
return &BucketCache{cache: gc, logger: config.Logger}
|
||||||
logger: config.Logger,
|
|
||||||
}
|
|
||||||
|
|
||||||
if cidCache {
|
|
||||||
cache.cidCache = gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
}
|
|
||||||
return cache
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a cached object.
|
// Get returns a cached object.
|
||||||
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
func (o *BucketCache) Get(key string) *data.BucketInfo {
|
||||||
return o.get(formKey(ns, bktName))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *BucketCache) GetByCID(cnrID cid.ID) *data.BucketInfo {
|
|
||||||
if o.cidCache == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
entry, err := o.cidCache.Get(cnrID)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
key, ok := entry.(string)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", key)))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.get(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *BucketCache) get(key string) *data.BucketInfo {
|
|
||||||
entry, err := o.cache.Get(key)
|
entry, err := o.cache.Get(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -80,7 +46,7 @@ func (o *BucketCache) get(key string) *data.BucketInfo {
|
||||||
|
|
||||||
result, ok := entry.(*data.BucketInfo)
|
result, ok := entry.(*data.BucketInfo)
|
||||||
if !ok {
|
if !ok {
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -90,24 +56,10 @@ func (o *BucketCache) get(key string) *data.BucketInfo {
|
||||||
|
|
||||||
// Put puts an object to cache.
|
// Put puts an object to cache.
|
||||||
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
|
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
|
||||||
if o.cidCache != nil {
|
return o.cache.Set(bkt.Name, bkt)
|
||||||
if err := o.cidCache.Set(bkt.CID, formKey(bkt.Zone, bkt.Name)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete deletes an object from cache.
|
// Delete deletes an object from cache.
|
||||||
func (o *BucketCache) Delete(bkt *data.BucketInfo) bool {
|
func (o *BucketCache) Delete(key string) bool {
|
||||||
if o.cidCache != nil {
|
return o.cache.Remove(key)
|
||||||
o.cidCache.Remove(bkt.CID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.cache.Remove(formKey(bkt.Zone, bkt.Name))
|
|
||||||
}
|
|
||||||
|
|
||||||
func formKey(zone, name string) string {
|
|
||||||
return name + "." + zone
|
|
||||||
}
|
}
|
||||||
|
|
68
api/cache/cache_test.go
vendored
68
api/cache/cache_test.go
vendored
|
@ -1,17 +1,12 @@
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"go.uber.org/zap/zaptest/observer"
|
"go.uber.org/zap/zaptest/observer"
|
||||||
|
@ -23,38 +18,33 @@ func TestAccessBoxCacheType(t *testing.T) {
|
||||||
|
|
||||||
addr := oidtest.Address()
|
addr := oidtest.Address()
|
||||||
box := &accessbox.Box{}
|
box := &accessbox.Box{}
|
||||||
val := &AccessBoxCacheValue{
|
|
||||||
Box: box,
|
|
||||||
}
|
|
||||||
|
|
||||||
accessKeyID := getAccessKeyID(addr)
|
err := cache.Put(addr, box)
|
||||||
|
|
||||||
err := cache.Put(accessKeyID, val)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
resVal := cache.Get(accessKeyID)
|
val := cache.Get(addr)
|
||||||
require.Equal(t, box, resVal.Box)
|
require.Equal(t, box, val)
|
||||||
require.Equal(t, 0, observedLog.Len())
|
require.Equal(t, 0, observedLog.Len())
|
||||||
|
|
||||||
err = cache.cache.Set(accessKeyID, "tmp")
|
err = cache.cache.Set(addr, "tmp")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assertInvalidCacheEntry(t, cache.Get(accessKeyID), observedLog)
|
assertInvalidCacheEntry(t, cache.Get(addr), observedLog)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBucketsCacheType(t *testing.T) {
|
func TestBucketsCacheType(t *testing.T) {
|
||||||
logger, observedLog := getObservedLogger()
|
logger, observedLog := getObservedLogger()
|
||||||
cache := NewBucketCache(DefaultBucketConfig(logger), false)
|
cache := NewBucketCache(DefaultBucketConfig(logger))
|
||||||
|
|
||||||
bktInfo := &data.BucketInfo{Name: "bucket"}
|
bktInfo := &data.BucketInfo{Name: "bucket"}
|
||||||
|
|
||||||
err := cache.Put(bktInfo)
|
err := cache.Put(bktInfo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
val := cache.Get("", bktInfo.Name)
|
val := cache.Get(bktInfo.Name)
|
||||||
require.Equal(t, bktInfo, val)
|
require.Equal(t, bktInfo, val)
|
||||||
require.Equal(t, 0, observedLog.Len())
|
require.Equal(t, 0, observedLog.Len())
|
||||||
|
|
||||||
err = cache.cache.Set(bktInfo.Name+"."+bktInfo.Zone, "tmp")
|
err = cache.cache.Set(bktInfo.Name, "tmp")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assertInvalidCacheEntry(t, cache.Get(bktInfo.Zone, bktInfo.Name), observedLog)
|
assertInvalidCacheEntry(t, cache.Get(bktInfo.Name), observedLog)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestObjectNamesCacheType(t *testing.T) {
|
func TestObjectNamesCacheType(t *testing.T) {
|
||||||
|
@ -186,42 +176,22 @@ func TestSettingsCacheType(t *testing.T) {
|
||||||
assertInvalidCacheEntry(t, cache.GetSettings(key), observedLog)
|
assertInvalidCacheEntry(t, cache.GetSettings(key), observedLog)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFrostFSIDSubjectCacheType(t *testing.T) {
|
func TestNotificationConfigurationCacheType(t *testing.T) {
|
||||||
logger, observedLog := getObservedLogger()
|
logger, observedLog := getObservedLogger()
|
||||||
cache := NewFrostfsIDCache(DefaultFrostfsIDConfig(logger))
|
cache := NewSystemCache(DefaultSystemConfig(logger))
|
||||||
|
|
||||||
key, err := util.Uint160DecodeStringLE("4ea976429703418ef00fc4912a409b6a0b973034")
|
key := "key"
|
||||||
require.NoError(t, err)
|
notificationConfig := &data.NotificationConfiguration{}
|
||||||
value := &client.SubjectExtended{}
|
|
||||||
|
|
||||||
err = cache.PutSubject(key, value)
|
err := cache.PutNotificationConfiguration(key, notificationConfig)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
val := cache.GetSubject(key)
|
val := cache.GetNotificationConfiguration(key)
|
||||||
require.Equal(t, value, val)
|
require.Equal(t, notificationConfig, val)
|
||||||
require.Equal(t, 0, observedLog.Len())
|
require.Equal(t, 0, observedLog.Len())
|
||||||
|
|
||||||
err = cache.cache.Set(key, "tmp")
|
err = cache.cache.Set(key, "tmp")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assertInvalidCacheEntry(t, cache.GetSubject(key), observedLog)
|
assertInvalidCacheEntry(t, cache.GetNotificationConfiguration(key), observedLog)
|
||||||
}
|
|
||||||
|
|
||||||
func TestFrostFSIDUserKeyCacheType(t *testing.T) {
|
|
||||||
logger, observedLog := getObservedLogger()
|
|
||||||
cache := NewFrostfsIDCache(DefaultFrostfsIDConfig(logger))
|
|
||||||
|
|
||||||
ns, name := "ns", "name"
|
|
||||||
value, err := keys.NewPrivateKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = cache.PutUserKey(ns, name, value.PublicKey())
|
|
||||||
require.NoError(t, err)
|
|
||||||
val := cache.GetUserKey(ns, name)
|
|
||||||
require.Equal(t, value.PublicKey(), val)
|
|
||||||
require.Equal(t, 0, observedLog.Len())
|
|
||||||
|
|
||||||
err = cache.cache.Set(ns+"/"+name, "tmp")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assertInvalidCacheEntry(t, cache.GetUserKey(ns, name), observedLog)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertInvalidCacheEntry(t *testing.T, val interface{}, observedLog *observer.ObservedLogs) {
|
func assertInvalidCacheEntry(t *testing.T, val interface{}, observedLog *observer.ObservedLogs) {
|
||||||
|
@ -234,7 +204,3 @@ func getObservedLogger() (*zap.Logger, *observer.ObservedLogs) {
|
||||||
loggerCore, observedLog := observer.New(zap.WarnLevel)
|
loggerCore, observedLog := observer.New(zap.WarnLevel)
|
||||||
return zap.New(loggerCore), observedLog
|
return zap.New(loggerCore), observedLog
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAccessKeyID(addr oid.Address) string {
|
|
||||||
return strings.ReplaceAll(addr.EncodeToString(), "/", "0")
|
|
||||||
}
|
|
||||||
|
|
77
api/cache/frostfsid.go
vendored
77
api/cache/frostfsid.go
vendored
|
@ -1,77 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FrostfsIDCache provides lru cache for frostfsid contract.
|
|
||||||
type FrostfsIDCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultFrostfsIDCacheSize is a default maximum number of entries in cache.
|
|
||||||
DefaultFrostfsIDCacheSize = 1e4
|
|
||||||
// DefaultFrostfsIDCacheLifetime is a default lifetime of entries in cache.
|
|
||||||
DefaultFrostfsIDCacheLifetime = time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultFrostfsIDConfig returns new default cache expiration values.
|
|
||||||
func DefaultFrostfsIDConfig(logger *zap.Logger) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: DefaultFrostfsIDCacheSize,
|
|
||||||
Lifetime: DefaultFrostfsIDCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFrostfsIDCache creates an object of FrostfsIDCache.
|
|
||||||
func NewFrostfsIDCache(config *Config) *FrostfsIDCache {
|
|
||||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
return &FrostfsIDCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSubject returns a cached client.SubjectExtended. Returns nil if value is missing.
|
|
||||||
func (c *FrostfsIDCache) GetSubject(key util.Uint160) *client.SubjectExtended {
|
|
||||||
return get[client.SubjectExtended](c, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutSubject puts a client.SubjectExtended to cache.
|
|
||||||
func (c *FrostfsIDCache) PutSubject(key util.Uint160, subject *client.SubjectExtended) error {
|
|
||||||
return c.cache.Set(key, subject)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUserKey returns a cached *keys.PublicKey. Returns nil if value is missing.
|
|
||||||
func (c *FrostfsIDCache) GetUserKey(ns, name string) *keys.PublicKey {
|
|
||||||
return get[keys.PublicKey](c, ns+"/"+name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutUserKey puts a client.SubjectExtended to cache.
|
|
||||||
func (c *FrostfsIDCache) PutUserKey(ns, name string, userKey *keys.PublicKey) error {
|
|
||||||
return c.cache.Set(ns+"/"+name, userKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func get[T any](c *FrostfsIDCache, key any) *T {
|
|
||||||
entry, err := c.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(*T)
|
|
||||||
if !ok {
|
|
||||||
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
107
api/cache/listsession.go
vendored
107
api/cache/listsession.go
vendored
|
@ -1,107 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// ListSessionCache contains cache for list session (during pagination).
|
|
||||||
ListSessionCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListSessionKey is a key to find a ListSessionCache's entry.
|
|
||||||
ListSessionKey struct {
|
|
||||||
cid cid.ID
|
|
||||||
prefix string
|
|
||||||
token string
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultListSessionCacheLifetime is a default lifetime of entries in cache of ListObjects.
|
|
||||||
DefaultListSessionCacheLifetime = time.Second * 60
|
|
||||||
// DefaultListSessionCacheSize is a default size of cache of ListObjects.
|
|
||||||
DefaultListSessionCacheSize = 100
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultListSessionConfig returns new default cache expiration values.
|
|
||||||
func DefaultListSessionConfig(logger *zap.Logger) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: DefaultListSessionCacheSize,
|
|
||||||
Lifetime: DefaultListSessionCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k *ListSessionKey) String() string {
|
|
||||||
return k.cid.EncodeToString() + k.prefix + k.token
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewListSessionCache is a constructor which creates an object of ListObjectsCache with the given lifetime of entries.
|
|
||||||
func NewListSessionCache(config *Config) *ListSessionCache {
|
|
||||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).EvictedFunc(func(_ interface{}, val interface{}) {
|
|
||||||
session, ok := val.(*data.ListSession)
|
|
||||||
if !ok {
|
|
||||||
config.Logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", val)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", session)))
|
|
||||||
}
|
|
||||||
|
|
||||||
if !session.Acquired.Load() {
|
|
||||||
session.Cancel()
|
|
||||||
}
|
|
||||||
}).Build()
|
|
||||||
return &ListSessionCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetListSession returns a list of ObjectInfo.
|
|
||||||
func (l *ListSessionCache) GetListSession(key ListSessionKey) *data.ListSession {
|
|
||||||
entry, err := l.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(*data.ListSession)
|
|
||||||
if !ok {
|
|
||||||
l.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutListSession puts a list of object versions to cache.
|
|
||||||
func (l *ListSessionCache) PutListSession(key ListSessionKey, session *data.ListSession) error {
|
|
||||||
s := l.GetListSession(key)
|
|
||||||
if s != nil && s != session {
|
|
||||||
if !s.Acquired.Load() {
|
|
||||||
s.Cancel()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return l.cache.Set(key, session)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteListSession removes key from cache.
|
|
||||||
func (l *ListSessionCache) DeleteListSession(key ListSessionKey) {
|
|
||||||
l.cache.Remove(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateListSessionCacheKey returns ListSessionKey with the given CID, prefix and token.
|
|
||||||
func CreateListSessionCacheKey(cnr cid.ID, prefix, token string) ListSessionKey {
|
|
||||||
p := ListSessionKey{
|
|
||||||
cid: cnr,
|
|
||||||
prefix: prefix,
|
|
||||||
token: token,
|
|
||||||
}
|
|
||||||
|
|
||||||
return p
|
|
||||||
}
|
|
3
api/cache/names.go
vendored
3
api/cache/names.go
vendored
|
@ -4,7 +4,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"github.com/bluele/gcache"
|
"github.com/bluele/gcache"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -49,7 +48,7 @@ func (o *ObjectsNameCache) Get(key string) *oid.Address {
|
||||||
|
|
||||||
result, ok := entry.(oid.Address)
|
result, ok := entry.(oid.Address)
|
||||||
if !ok {
|
if !ok {
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
86
api/cache/network.go
vendored
86
api/cache/network.go
vendored
|
@ -1,86 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// NetworkCache provides cache for network-related values.
|
|
||||||
NetworkCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkCacheConfig stores expiration params for cache.
|
|
||||||
NetworkCacheConfig struct {
|
|
||||||
Lifetime time.Duration
|
|
||||||
Logger *zap.Logger
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultNetworkCacheLifetime = 1 * time.Minute
|
|
||||||
networkCacheSize = 2
|
|
||||||
networkInfoKey = "network_info"
|
|
||||||
netmapKey = "netmap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultNetworkConfig returns new default cache expiration values.
|
|
||||||
func DefaultNetworkConfig(logger *zap.Logger) *NetworkCacheConfig {
|
|
||||||
return &NetworkCacheConfig{
|
|
||||||
Lifetime: DefaultNetworkCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNetworkCache creates an object of NetworkCache.
|
|
||||||
func NewNetworkCache(config *NetworkCacheConfig) *NetworkCache {
|
|
||||||
gc := gcache.New(networkCacheSize).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
return &NetworkCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *NetworkCache) GetNetworkInfo() *netmap.NetworkInfo {
|
|
||||||
entry, err := c.cache.Get(networkInfoKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(netmap.NetworkInfo)
|
|
||||||
if !ok {
|
|
||||||
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *NetworkCache) PutNetworkInfo(info netmap.NetworkInfo) error {
|
|
||||||
return c.cache.Set(networkInfoKey, info)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *NetworkCache) GetNetmap() *netmap.NetMap {
|
|
||||||
entry, err := c.cache.Get(netmapKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(netmap.NetMap)
|
|
||||||
if !ok {
|
|
||||||
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *NetworkCache) PutNetmap(nm netmap.NetMap) error {
|
|
||||||
return c.cache.Set(netmapKey, nm)
|
|
||||||
}
|
|
3
api/cache/objects.go
vendored
3
api/cache/objects.go
vendored
|
@ -5,7 +5,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"github.com/bluele/gcache"
|
"github.com/bluele/gcache"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -48,7 +47,7 @@ func (o *ObjectsCache) GetObject(address oid.Address) *data.ExtendedObjectInfo {
|
||||||
|
|
||||||
result, ok := entry.(*data.ExtendedObjectInfo)
|
result, ok := entry.(*data.ExtendedObjectInfo)
|
||||||
if !ok {
|
if !ok {
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
5
api/cache/objectslist.go
vendored
5
api/cache/objectslist.go
vendored
|
@ -7,7 +7,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"github.com/bluele/gcache"
|
"github.com/bluele/gcache"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -76,7 +75,7 @@ func (l *ObjectsListCache) GetVersions(key ObjectsListKey) []*data.NodeVersion {
|
||||||
|
|
||||||
result, ok := entry.([]*data.NodeVersion)
|
result, ok := entry.([]*data.NodeVersion)
|
||||||
if !ok {
|
if !ok {
|
||||||
l.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
l.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -95,7 +94,7 @@ func (l *ObjectsListCache) CleanCacheEntriesContainingObject(objectName string,
|
||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
k, ok := key.(ObjectsListKey)
|
k, ok := key.(ObjectsListKey)
|
||||||
if !ok {
|
if !ok {
|
||||||
l.logger.Warn(logs.InvalidCacheKeyType, zap.String("actual", fmt.Sprintf("%T", key)),
|
l.logger.Warn("invalid cache key type", zap.String("actual", fmt.Sprintf("%T", key)),
|
||||||
zap.String("expected", fmt.Sprintf("%T", k)))
|
zap.String("expected", fmt.Sprintf("%T", k)))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
72
api/cache/policy.go
vendored
72
api/cache/policy.go
vendored
|
@ -1,72 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MorphPolicyCache provides lru cache for listing policies stored in policy contract.
|
|
||||||
type MorphPolicyCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
type MorphPolicyCacheKey struct {
|
|
||||||
Target engine.Target
|
|
||||||
Name chain.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultMorphPolicyCacheSize is a default maximum number of entries in cache.
|
|
||||||
DefaultMorphPolicyCacheSize = 1e4
|
|
||||||
// DefaultMorphPolicyCacheLifetime is a default lifetime of entries in cache.
|
|
||||||
DefaultMorphPolicyCacheLifetime = time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultMorphPolicyConfig returns new default cache expiration values.
|
|
||||||
func DefaultMorphPolicyConfig(logger *zap.Logger) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: DefaultMorphPolicyCacheSize,
|
|
||||||
Lifetime: DefaultMorphPolicyCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMorphPolicyCache creates an object of MorphPolicyCache.
|
|
||||||
func NewMorphPolicyCache(config *Config) *MorphPolicyCache {
|
|
||||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
return &MorphPolicyCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a cached object. Returns nil if value is missing.
|
|
||||||
func (o *MorphPolicyCache) Get(key MorphPolicyCacheKey) []*chain.Chain {
|
|
||||||
entry, err := o.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.([]*chain.Chain)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put puts an object to cache.
|
|
||||||
func (o *MorphPolicyCache) Put(key MorphPolicyCacheKey, list []*chain.Chain) error {
|
|
||||||
return o.cache.Set(key, list)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes an object from cache.
|
|
||||||
func (o *MorphPolicyCache) Delete(key MorphPolicyCacheKey) bool {
|
|
||||||
return o.cache.Remove(key)
|
|
||||||
}
|
|
47
api/cache/system.go
vendored
47
api/cache/system.go
vendored
|
@ -5,7 +5,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"github.com/bluele/gcache"
|
"github.com/bluele/gcache"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -49,7 +48,7 @@ func (o *SystemCache) GetObject(key string) *data.ObjectInfo {
|
||||||
|
|
||||||
result, ok := entry.(*data.ObjectInfo)
|
result, ok := entry.(*data.ObjectInfo)
|
||||||
if !ok {
|
if !ok {
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -80,23 +79,7 @@ func (o *SystemCache) GetCORS(key string) *data.CORSConfiguration {
|
||||||
|
|
||||||
result, ok := entry.(*data.CORSConfiguration)
|
result, ok := entry.(*data.CORSConfiguration)
|
||||||
if !ok {
|
if !ok {
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *SystemCache) GetLifecycleConfiguration(key string) *data.LifecycleConfiguration {
|
|
||||||
entry, err := o.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(*data.LifecycleConfiguration)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -112,7 +95,23 @@ func (o *SystemCache) GetSettings(key string) *data.BucketSettings {
|
||||||
|
|
||||||
result, ok := entry.(*data.BucketSettings)
|
result, ok := entry.(*data.BucketSettings)
|
||||||
if !ok {
|
if !ok {
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *SystemCache) GetNotificationConfiguration(key string) *data.NotificationConfiguration {
|
||||||
|
entry, err := o.cache.Get(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
result, ok := entry.(*data.NotificationConfiguration)
|
||||||
|
if !ok {
|
||||||
|
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -149,14 +148,14 @@ func (o *SystemCache) PutCORS(key string, obj *data.CORSConfiguration) error {
|
||||||
return o.cache.Set(key, obj)
|
return o.cache.Set(key, obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *SystemCache) PutLifecycleConfiguration(key string, obj *data.LifecycleConfiguration) error {
|
|
||||||
return o.cache.Set(key, obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *SystemCache) PutSettings(key string, settings *data.BucketSettings) error {
|
func (o *SystemCache) PutSettings(key string, settings *data.BucketSettings) error {
|
||||||
return o.cache.Set(key, settings)
|
return o.cache.Set(key, settings)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (o *SystemCache) PutNotificationConfiguration(key string, obj *data.NotificationConfiguration) error {
|
||||||
|
return o.cache.Set(key, obj)
|
||||||
|
}
|
||||||
|
|
||||||
// PutTagging puts tags of a bucket or an object.
|
// PutTagging puts tags of a bucket or an object.
|
||||||
func (o *SystemCache) PutTagging(key string, tagSet map[string]string) error {
|
func (o *SystemCache) PutTagging(key string, tagSet map[string]string) error {
|
||||||
return o.cache.Set(key, tagSet)
|
return o.cache.Set(key, tagSet)
|
||||||
|
|
113
api/data/info.go
113
api/data/info.go
|
@ -2,20 +2,17 @@ package data
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
bktSettingsObject = ".s3-settings"
|
bktSettingsObject = ".s3-settings"
|
||||||
bktCORSConfigurationObject = ".s3-cors"
|
bktCORSConfigurationObject = ".s3-cors"
|
||||||
bktLifecycleConfigurationObject = ".s3-lifecycle"
|
bktNotificationConfigurationObject = ".s3-notifications"
|
||||||
|
|
||||||
VersioningUnversioned = "Unversioned"
|
VersioningUnversioned = "Unversioned"
|
||||||
VersioningEnabled = "Enabled"
|
VersioningEnabled = "Enabled"
|
||||||
|
@ -25,40 +22,44 @@ const (
|
||||||
type (
|
type (
|
||||||
// BucketInfo stores basic bucket data.
|
// BucketInfo stores basic bucket data.
|
||||||
BucketInfo struct {
|
BucketInfo struct {
|
||||||
Name string // container name from system attribute
|
Name string // container name from system attribute
|
||||||
Zone string // container zone from system attribute
|
Zone string // container zone from system attribute
|
||||||
CID cid.ID
|
CID cid.ID
|
||||||
Owner user.ID
|
Owner user.ID
|
||||||
Created time.Time
|
Created time.Time
|
||||||
LocationConstraint string
|
LocationConstraint string
|
||||||
ObjectLockEnabled bool
|
ObjectLockEnabled bool
|
||||||
HomomorphicHashDisabled bool
|
|
||||||
PlacementPolicy netmap.PlacementPolicy
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ObjectInfo holds S3 object data.
|
// ObjectInfo holds S3 object data.
|
||||||
ObjectInfo struct {
|
ObjectInfo struct {
|
||||||
ID oid.ID
|
ID oid.ID
|
||||||
CID cid.ID
|
CID cid.ID
|
||||||
|
IsDir bool
|
||||||
|
IsDeleteMarker bool
|
||||||
|
|
||||||
Bucket string
|
Bucket string
|
||||||
Name string
|
Name string
|
||||||
Size uint64
|
Size int64
|
||||||
ContentType string
|
ContentType string
|
||||||
Created time.Time
|
Created time.Time
|
||||||
CreationEpoch uint64
|
HashSum string
|
||||||
HashSum string
|
Owner user.ID
|
||||||
MD5Sum string
|
Headers map[string]string
|
||||||
Owner user.ID
|
}
|
||||||
Headers map[string]string
|
|
||||||
|
// NotificationInfo store info to send s3 notification.
|
||||||
|
NotificationInfo struct {
|
||||||
|
Name string
|
||||||
|
Version string
|
||||||
|
Size int64
|
||||||
|
HashSum string
|
||||||
}
|
}
|
||||||
|
|
||||||
// BucketSettings stores settings such as versioning.
|
// BucketSettings stores settings such as versioning.
|
||||||
BucketSettings struct {
|
BucketSettings struct {
|
||||||
Versioning string
|
Versioning string `json:"versioning"`
|
||||||
LockConfiguration *ObjectLockConfiguration
|
LockConfiguration *ObjectLockConfiguration `json:"lock_configuration"`
|
||||||
CannedACL string
|
|
||||||
OwnerKey *keys.PublicKey
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CORSConfiguration stores CORS configuration of a request.
|
// CORSConfiguration stores CORS configuration of a request.
|
||||||
|
@ -76,35 +77,26 @@ type (
|
||||||
ExposeHeaders []string `xml:"ExposeHeader" json:"ExposeHeaders"`
|
ExposeHeaders []string `xml:"ExposeHeader" json:"ExposeHeaders"`
|
||||||
MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty" json:"MaxAgeSeconds,omitempty"`
|
MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty" json:"MaxAgeSeconds,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ObjectVersion stores object version info.
|
|
||||||
ObjectVersion struct {
|
|
||||||
BktInfo *BucketInfo
|
|
||||||
ObjectName string
|
|
||||||
VersionID string
|
|
||||||
NoErrorOnDeleteMarker bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedObjectInfo stores created object info.
|
|
||||||
CreatedObjectInfo struct {
|
|
||||||
ID oid.ID
|
|
||||||
Size uint64
|
|
||||||
HashSum []byte
|
|
||||||
MD5Sum []byte
|
|
||||||
CreationEpoch uint64
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NotificationInfoFromObject creates new NotificationInfo from ObjectInfo.
|
||||||
|
func NotificationInfoFromObject(objInfo *ObjectInfo) *NotificationInfo {
|
||||||
|
return &NotificationInfo{
|
||||||
|
Name: objInfo.Name,
|
||||||
|
Version: objInfo.VersionID(),
|
||||||
|
Size: objInfo.Size,
|
||||||
|
HashSum: objInfo.HashSum,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// SettingsObjectName is a system name for a bucket settings file.
|
// SettingsObjectName is a system name for a bucket settings file.
|
||||||
func (b *BucketInfo) SettingsObjectName() string { return bktSettingsObject }
|
func (b *BucketInfo) SettingsObjectName() string { return bktSettingsObject }
|
||||||
|
|
||||||
// CORSObjectName returns a system name for a bucket CORS configuration file.
|
// CORSObjectName returns a system name for a bucket CORS configuration file.
|
||||||
func (b *BucketInfo) CORSObjectName() string {
|
func (b *BucketInfo) CORSObjectName() string { return bktCORSConfigurationObject }
|
||||||
return b.CID.EncodeToString() + bktCORSConfigurationObject
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BucketInfo) LifecycleConfigurationObjectName() string {
|
func (b *BucketInfo) NotificationConfigurationObjectName() string {
|
||||||
return b.CID.EncodeToString() + bktLifecycleConfigurationObject
|
return bktNotificationConfigurationObject
|
||||||
}
|
}
|
||||||
|
|
||||||
// VersionID returns object version from ObjectInfo.
|
// VersionID returns object version from ObjectInfo.
|
||||||
|
@ -122,13 +114,6 @@ func (o *ObjectInfo) Address() oid.Address {
|
||||||
return addr
|
return addr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *ObjectInfo) ETag(md5Enabled bool) string {
|
|
||||||
if md5Enabled && len(o.MD5Sum) > 0 {
|
|
||||||
return o.MD5Sum
|
|
||||||
}
|
|
||||||
return o.HashSum
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b BucketSettings) Unversioned() bool {
|
func (b BucketSettings) Unversioned() bool {
|
||||||
return b.Versioning == VersioningUnversioned
|
return b.Versioning == VersioningUnversioned
|
||||||
}
|
}
|
||||||
|
@ -140,11 +125,3 @@ func (b BucketSettings) VersioningEnabled() bool {
|
||||||
func (b BucketSettings) VersioningSuspended() bool {
|
func (b BucketSettings) VersioningSuspended() bool {
|
||||||
return b.Versioning == VersioningSuspended
|
return b.Versioning == VersioningSuspended
|
||||||
}
|
}
|
||||||
|
|
||||||
func Quote(val string) string {
|
|
||||||
return "\"" + val + "\""
|
|
||||||
}
|
|
||||||
|
|
||||||
func UnQuote(val string) string {
|
|
||||||
return strings.Trim(val, "\"")
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,56 +0,0 @@
|
||||||
package data
|
|
||||||
|
|
||||||
import "encoding/xml"
|
|
||||||
|
|
||||||
const (
|
|
||||||
LifecycleStatusEnabled = "Enabled"
|
|
||||||
LifecycleStatusDisabled = "Disabled"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
LifecycleConfiguration struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LifecycleConfiguration" json:"-"`
|
|
||||||
Rules []LifecycleRule `xml:"Rule"`
|
|
||||||
}
|
|
||||||
|
|
||||||
LifecycleRule struct {
|
|
||||||
Status string `xml:"Status,omitempty"`
|
|
||||||
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty"`
|
|
||||||
Expiration *LifecycleExpiration `xml:"Expiration,omitempty"`
|
|
||||||
Filter *LifecycleRuleFilter `xml:"Filter,omitempty"`
|
|
||||||
ID string `xml:"ID,omitempty"`
|
|
||||||
NonCurrentVersionExpiration *NonCurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
|
|
||||||
Prefix string `xml:"Prefix,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
AbortIncompleteMultipartUpload struct {
|
|
||||||
DaysAfterInitiation *int `xml:"DaysAfterInitiation,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
LifecycleExpiration struct {
|
|
||||||
Date string `xml:"Date,omitempty"`
|
|
||||||
Days *int `xml:"Days,omitempty"`
|
|
||||||
Epoch *uint64 `xml:"Epoch,omitempty"`
|
|
||||||
ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
LifecycleRuleFilter struct {
|
|
||||||
And *LifecycleRuleAndOperator `xml:"And,omitempty"`
|
|
||||||
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
|
|
||||||
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
|
|
||||||
Prefix string `xml:"Prefix,omitempty"`
|
|
||||||
Tag *Tag `xml:"Tag,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
LifecycleRuleAndOperator struct {
|
|
||||||
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
|
|
||||||
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
|
|
||||||
Prefix string `xml:"Prefix,omitempty"`
|
|
||||||
Tags []Tag `xml:"Tag"`
|
|
||||||
}
|
|
||||||
|
|
||||||
NonCurrentVersionExpiration struct {
|
|
||||||
NewerNonCurrentVersions *int `xml:"NewerNoncurrentVersions,omitempty"`
|
|
||||||
NonCurrentDays *int `xml:"NoncurrentDays,omitempty"`
|
|
||||||
}
|
|
||||||
)
|
|
|
@ -1,19 +0,0 @@
|
||||||
package data
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync/atomic"
|
|
||||||
)
|
|
||||||
|
|
||||||
type VersionsStream interface {
|
|
||||||
Next(ctx context.Context) (*NodeVersion, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ListSession struct {
|
|
||||||
Next []*ExtendedNodeVersion
|
|
||||||
Stream VersionsStream
|
|
||||||
NamesMap map[string]struct{}
|
|
||||||
Context context.Context
|
|
||||||
Cancel context.CancelFunc
|
|
||||||
Acquired atomic.Bool
|
|
||||||
}
|
|
39
api/data/notifications.go
Normal file
39
api/data/notifications.go
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
package data
|
||||||
|
|
||||||
|
type (
|
||||||
|
NotificationConfiguration struct {
|
||||||
|
QueueConfigurations []QueueConfiguration `xml:"QueueConfiguration" json:"QueueConfigurations"`
|
||||||
|
// Not supported topics
|
||||||
|
TopicConfigurations []TopicConfiguration `xml:"TopicConfiguration" json:"TopicConfigurations"`
|
||||||
|
LambdaFunctionConfigurations []LambdaFunctionConfiguration `xml:"CloudFunctionConfiguration" json:"CloudFunctionConfigurations"`
|
||||||
|
}
|
||||||
|
|
||||||
|
QueueConfiguration struct {
|
||||||
|
ID string `xml:"Id" json:"Id"`
|
||||||
|
QueueArn string `xml:"Queue" json:"Queue"`
|
||||||
|
Events []string `xml:"Event" json:"Events"`
|
||||||
|
Filter Filter `xml:"Filter" json:"Filter"`
|
||||||
|
}
|
||||||
|
|
||||||
|
Filter struct {
|
||||||
|
Key Key `xml:"S3Key" json:"S3Key"`
|
||||||
|
}
|
||||||
|
|
||||||
|
Key struct {
|
||||||
|
FilterRules []FilterRule `xml:"FilterRule" json:"FilterRules"`
|
||||||
|
}
|
||||||
|
|
||||||
|
FilterRule struct {
|
||||||
|
Name string `xml:"Name" json:"Name"`
|
||||||
|
Value string `xml:"Value" json:"Value"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// TopicConfiguration and LambdaFunctionConfiguration -- we don't support these configurations,
|
||||||
|
// but we need them to detect in notification configurations in requests.
|
||||||
|
TopicConfiguration struct{}
|
||||||
|
LambdaFunctionConfiguration struct{}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (n NotificationConfiguration) IsEmpty() bool {
|
||||||
|
return len(n.QueueConfigurations) == 0 && len(n.TopicConfigurations) == 0 && len(n.LambdaFunctionConfigurations) == 0
|
||||||
|
}
|
|
@ -1,30 +0,0 @@
|
||||||
package data
|
|
||||||
|
|
||||||
import "encoding/xml"
|
|
||||||
|
|
||||||
// Tagging contains tag set.
|
|
||||||
type Tagging struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Tagging"`
|
|
||||||
TagSet []Tag `xml:"TagSet>Tag"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tag is an AWS key-value tag.
|
|
||||||
type Tag struct {
|
|
||||||
Key string
|
|
||||||
Value string
|
|
||||||
}
|
|
||||||
|
|
||||||
type GetObjectTaggingParams struct {
|
|
||||||
ObjectVersion *ObjectVersion
|
|
||||||
|
|
||||||
// NodeVersion can be nil. If not nil we save one request to tree service.
|
|
||||||
NodeVersion *NodeVersion // optional
|
|
||||||
}
|
|
||||||
|
|
||||||
type PutObjectTaggingParams struct {
|
|
||||||
ObjectVersion *ObjectVersion
|
|
||||||
TagSet map[string]string
|
|
||||||
|
|
||||||
// NodeVersion can be nil. If not nil we save one request to tree service.
|
|
||||||
NodeVersion *NodeVersion // optional
|
|
||||||
}
|
|
114
api/data/tree.go
114
api/data/tree.go
|
@ -16,31 +16,19 @@ const (
|
||||||
// NodeVersion represent node from tree service.
|
// NodeVersion represent node from tree service.
|
||||||
type NodeVersion struct {
|
type NodeVersion struct {
|
||||||
BaseNodeVersion
|
BaseNodeVersion
|
||||||
|
DeleteMarker *DeleteMarkerInfo
|
||||||
IsUnversioned bool
|
IsUnversioned bool
|
||||||
IsCombined bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExtendedNodeVersion contains additional node info to be able to sort versions by timestamp.
|
func (v NodeVersion) IsDeleteMarker() bool {
|
||||||
type ExtendedNodeVersion struct {
|
return v.DeleteMarker != nil
|
||||||
NodeVersion *NodeVersion
|
|
||||||
IsLatest bool
|
|
||||||
DirName string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e ExtendedNodeVersion) Version() string {
|
// DeleteMarkerInfo is used to save object info if node in the tree service is delete marker.
|
||||||
if e.NodeVersion.IsUnversioned {
|
// We need this information because the "delete marker" object is no longer stored in FrostFS.
|
||||||
return UnversionedObjectVersionID
|
type DeleteMarkerInfo struct {
|
||||||
}
|
Created time.Time
|
||||||
|
Owner user.ID
|
||||||
return e.NodeVersion.OID.EncodeToString()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e ExtendedNodeVersion) Name() string {
|
|
||||||
if e.DirName != "" {
|
|
||||||
return e.DirName
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.NodeVersion.FilePath
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExtendedObjectInfo contains additional node info to be able to sort versions by timestamp.
|
// ExtendedObjectInfo contains additional node info to be able to sort versions by timestamp.
|
||||||
|
@ -61,36 +49,13 @@ func (e ExtendedObjectInfo) Version() string {
|
||||||
// BaseNodeVersion is minimal node info from tree service.
|
// BaseNodeVersion is minimal node info from tree service.
|
||||||
// Basically used for "system" object.
|
// Basically used for "system" object.
|
||||||
type BaseNodeVersion struct {
|
type BaseNodeVersion struct {
|
||||||
ID uint64
|
ID uint64
|
||||||
ParentID uint64
|
ParenID uint64
|
||||||
OID oid.ID
|
OID oid.ID
|
||||||
Timestamp uint64
|
Timestamp uint64
|
||||||
Size uint64
|
Size int64
|
||||||
ETag string
|
ETag string
|
||||||
MD5 string
|
FilePath string
|
||||||
FilePath string
|
|
||||||
Created *time.Time
|
|
||||||
Owner *user.ID
|
|
||||||
IsDeleteMarker bool
|
|
||||||
CreationEpoch uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseNodeVersion) GetETag(md5Enabled bool) string {
|
|
||||||
if md5Enabled && len(v.MD5) > 0 {
|
|
||||||
return v.MD5
|
|
||||||
}
|
|
||||||
return v.ETag
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsFilledExtra returns true is node was created by version of gate v0.29.x and later.
|
|
||||||
func (v BaseNodeVersion) IsFilledExtra() bool {
|
|
||||||
return v.Created != nil && v.Owner != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseNodeVersion) FillExtra(owner *user.ID, created *time.Time, realSize uint64) {
|
|
||||||
v.Owner = owner
|
|
||||||
v.Created = created
|
|
||||||
v.Size = realSize
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type ObjectTaggingInfo struct {
|
type ObjectTaggingInfo struct {
|
||||||
|
@ -103,48 +68,29 @@ type ObjectTaggingInfo struct {
|
||||||
type MultipartInfo struct {
|
type MultipartInfo struct {
|
||||||
// ID is node id in tree service.
|
// ID is node id in tree service.
|
||||||
// It's ignored when creating a new multipart upload.
|
// It's ignored when creating a new multipart upload.
|
||||||
ID uint64
|
ID uint64
|
||||||
Key string
|
Key string
|
||||||
UploadID string
|
UploadID string
|
||||||
Owner user.ID
|
Owner user.ID
|
||||||
Created time.Time
|
Created time.Time
|
||||||
Meta map[string]string
|
Meta map[string]string
|
||||||
CopiesNumbers []uint32
|
CopiesNumber uint32
|
||||||
Finished bool
|
|
||||||
CreationEpoch uint64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PartInfo is upload information about part.
|
// PartInfo is upload information about part.
|
||||||
type PartInfo struct {
|
type PartInfo struct {
|
||||||
Key string `json:"key"`
|
Key string
|
||||||
UploadID string `json:"uploadId"`
|
UploadID string
|
||||||
Number int `json:"number"`
|
Number int
|
||||||
OID oid.ID `json:"oid"`
|
OID oid.ID
|
||||||
Size uint64 `json:"size"`
|
Size int64
|
||||||
ETag string `json:"etag"`
|
ETag string
|
||||||
MD5 string `json:"md5"`
|
Created time.Time
|
||||||
Created time.Time `json:"created"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type PartInfoExtended struct {
|
|
||||||
PartInfo
|
|
||||||
|
|
||||||
// Timestamp is used to find the latest version of part info in case of tree split
|
|
||||||
// when there are multiple nodes for the same part.
|
|
||||||
Timestamp uint64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToHeaderString form short part representation to use in S3-Completed-Parts header.
|
// ToHeaderString form short part representation to use in S3-Completed-Parts header.
|
||||||
func (p *PartInfo) ToHeaderString() string {
|
func (p *PartInfo) ToHeaderString() string {
|
||||||
// ETag value contains SHA256 checksum which is used while getting object parts attributes.
|
return strconv.Itoa(p.Number) + "-" + strconv.FormatInt(p.Size, 10) + "-" + p.ETag
|
||||||
return strconv.Itoa(p.Number) + "-" + strconv.FormatUint(p.Size, 10) + "-" + p.ETag
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartInfo) GetETag(md5Enabled bool) string {
|
|
||||||
if md5Enabled && len(p.MD5) > 0 {
|
|
||||||
return p.MD5
|
|
||||||
}
|
|
||||||
return p.ETag
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LockInfo is lock information to create appropriate tree node.
|
// LockInfo is lock information to create appropriate tree node.
|
||||||
|
|
|
@ -1,13 +1,8 @@
|
||||||
package errors
|
package errors
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
|
||||||
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
@ -29,7 +24,6 @@ type (
|
||||||
const (
|
const (
|
||||||
_ ErrorCode = iota
|
_ ErrorCode = iota
|
||||||
ErrAccessDenied
|
ErrAccessDenied
|
||||||
ErrAccessControlListNotSupported
|
|
||||||
ErrBadDigest
|
ErrBadDigest
|
||||||
ErrEntityTooSmall
|
ErrEntityTooSmall
|
||||||
ErrEntityTooLarge
|
ErrEntityTooLarge
|
||||||
|
@ -57,7 +51,6 @@ const (
|
||||||
ErrInvalidCopyDest
|
ErrInvalidCopyDest
|
||||||
ErrInvalidPolicyDocument
|
ErrInvalidPolicyDocument
|
||||||
ErrInvalidObjectState
|
ErrInvalidObjectState
|
||||||
ErrMalformedACL
|
|
||||||
ErrMalformedXML
|
ErrMalformedXML
|
||||||
ErrMissingContentLength
|
ErrMissingContentLength
|
||||||
ErrMissingContentMD5
|
ErrMissingContentMD5
|
||||||
|
@ -78,7 +71,6 @@ const (
|
||||||
ErrInvalidArgument
|
ErrInvalidArgument
|
||||||
ErrInvalidTagKey
|
ErrInvalidTagKey
|
||||||
ErrInvalidTagValue
|
ErrInvalidTagValue
|
||||||
ErrInvalidTagKeyUniqueness
|
|
||||||
ErrInvalidTagsSizeExceed
|
ErrInvalidTagsSizeExceed
|
||||||
ErrNotImplemented
|
ErrNotImplemented
|
||||||
ErrPreconditionFailed
|
ErrPreconditionFailed
|
||||||
|
@ -95,7 +87,6 @@ const (
|
||||||
ErrBucketNotEmpty
|
ErrBucketNotEmpty
|
||||||
ErrAllAccessDisabled
|
ErrAllAccessDisabled
|
||||||
ErrMalformedPolicy
|
ErrMalformedPolicy
|
||||||
ErrMalformedPolicyNotPrincipal
|
|
||||||
ErrMissingFields
|
ErrMissingFields
|
||||||
ErrMissingCredTag
|
ErrMissingCredTag
|
||||||
ErrCredMalformed
|
ErrCredMalformed
|
||||||
|
@ -155,7 +146,6 @@ const (
|
||||||
ErrInvalidEncryptionAlgorithm
|
ErrInvalidEncryptionAlgorithm
|
||||||
ErrInvalidSSECustomerKey
|
ErrInvalidSSECustomerKey
|
||||||
ErrMissingSSECustomerKey
|
ErrMissingSSECustomerKey
|
||||||
ErrMissingSSECustomerAlgorithm
|
|
||||||
ErrMissingSSECustomerKeyMD5
|
ErrMissingSSECustomerKeyMD5
|
||||||
ErrSSECustomerKeyMD5Mismatch
|
ErrSSECustomerKeyMD5Mismatch
|
||||||
ErrInvalidSSECustomerParameters
|
ErrInvalidSSECustomerParameters
|
||||||
|
@ -185,15 +175,9 @@ const (
|
||||||
// Add new extended error codes here.
|
// Add new extended error codes here.
|
||||||
ErrInvalidObjectName
|
ErrInvalidObjectName
|
||||||
ErrOperationTimedOut
|
ErrOperationTimedOut
|
||||||
ErrGatewayTimeout
|
|
||||||
ErrOperationMaxedOut
|
ErrOperationMaxedOut
|
||||||
ErrInvalidRequest
|
ErrInvalidRequest
|
||||||
ErrInvalidRequestLargeCopy
|
|
||||||
ErrInvalidStorageClass
|
ErrInvalidStorageClass
|
||||||
VersionIDMarkerWithoutKeyMarker
|
|
||||||
ErrInvalidRangeLength
|
|
||||||
ErrRangeOutOfBounds
|
|
||||||
ErrMissingContentRange
|
|
||||||
|
|
||||||
ErrMalformedJSON
|
ErrMalformedJSON
|
||||||
ErrInsecureClientRequest
|
ErrInsecureClientRequest
|
||||||
|
@ -325,12 +309,6 @@ var errorCodes = errorCodeMap{
|
||||||
Description: "Invalid storage class.",
|
Description: "Invalid storage class.",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
VersionIDMarkerWithoutKeyMarker: {
|
|
||||||
ErrCode: VersionIDMarkerWithoutKeyMarker,
|
|
||||||
Code: "VersionIDMarkerWithoutKeyMarker",
|
|
||||||
Description: "A version-id marker cannot be specified without a key marker.",
|
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
ErrInvalidRequestBody: {
|
ErrInvalidRequestBody: {
|
||||||
ErrCode: ErrInvalidRequestBody,
|
ErrCode: ErrInvalidRequestBody,
|
||||||
Code: "InvalidArgument",
|
Code: "InvalidArgument",
|
||||||
|
@ -340,7 +318,7 @@ var errorCodes = errorCodeMap{
|
||||||
ErrInvalidMaxUploads: {
|
ErrInvalidMaxUploads: {
|
||||||
ErrCode: ErrInvalidMaxUploads,
|
ErrCode: ErrInvalidMaxUploads,
|
||||||
Code: "InvalidArgument",
|
Code: "InvalidArgument",
|
||||||
Description: "Argument max-uploads must be an integer from 1 to 1000",
|
Description: "Argument max-uploads must be an integer between 0 and 2147483647",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
ErrInvalidMaxKeys: {
|
ErrInvalidMaxKeys: {
|
||||||
|
@ -385,12 +363,6 @@ var errorCodes = errorCodeMap{
|
||||||
Description: "Access Denied.",
|
Description: "Access Denied.",
|
||||||
HTTPStatusCode: http.StatusForbidden,
|
HTTPStatusCode: http.StatusForbidden,
|
||||||
},
|
},
|
||||||
ErrAccessControlListNotSupported: {
|
|
||||||
ErrCode: ErrAccessControlListNotSupported,
|
|
||||||
Code: "AccessControlListNotSupported",
|
|
||||||
Description: "The bucket does not allow ACLs.",
|
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
ErrBadDigest: {
|
ErrBadDigest: {
|
||||||
ErrCode: ErrBadDigest,
|
ErrCode: ErrBadDigest,
|
||||||
Code: "BadDigest",
|
Code: "BadDigest",
|
||||||
|
@ -457,12 +429,6 @@ var errorCodes = errorCodeMap{
|
||||||
Description: "The requested range is not satisfiable",
|
Description: "The requested range is not satisfiable",
|
||||||
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||||
},
|
},
|
||||||
ErrMalformedACL: {
|
|
||||||
ErrCode: ErrMalformedACL,
|
|
||||||
Code: "MalformedACLError",
|
|
||||||
Description: "The ACL that you provided was not well formed or did not validate against our published schema.",
|
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
ErrMalformedXML: {
|
ErrMalformedXML: {
|
||||||
ErrCode: ErrMalformedXML,
|
ErrCode: ErrMalformedXML,
|
||||||
Code: "MalformedXML",
|
Code: "MalformedXML",
|
||||||
|
@ -556,19 +522,13 @@ var errorCodes = errorCodeMap{
|
||||||
ErrInvalidTagKey: {
|
ErrInvalidTagKey: {
|
||||||
ErrCode: ErrInvalidTagKey,
|
ErrCode: ErrInvalidTagKey,
|
||||||
Code: "InvalidTag",
|
Code: "InvalidTag",
|
||||||
Description: "The TagKey you have provided is invalid",
|
Description: "The TagValue you have provided is invalid",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
ErrInvalidTagValue: {
|
ErrInvalidTagValue: {
|
||||||
ErrCode: ErrInvalidTagValue,
|
ErrCode: ErrInvalidTagValue,
|
||||||
Code: "InvalidTag",
|
Code: "InvalidTag",
|
||||||
Description: "The TagValue you have provided is invalid",
|
Description: "The TagKey you have provided is invalid",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
ErrInvalidTagKeyUniqueness: {
|
|
||||||
ErrCode: ErrInvalidTagKeyUniqueness,
|
|
||||||
Code: "InvalidTag",
|
|
||||||
Description: "Cannot provide multiple Tags with the same key",
|
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
ErrInvalidTagsSizeExceed: {
|
ErrInvalidTagsSizeExceed: {
|
||||||
|
@ -634,7 +594,7 @@ var errorCodes = errorCodeMap{
|
||||||
ErrAuthorizationHeaderMalformed: {
|
ErrAuthorizationHeaderMalformed: {
|
||||||
ErrCode: ErrAuthorizationHeaderMalformed,
|
ErrCode: ErrAuthorizationHeaderMalformed,
|
||||||
Code: "AuthorizationHeaderMalformed",
|
Code: "AuthorizationHeaderMalformed",
|
||||||
Description: "The authorization header that you provided is not valid.",
|
Description: "The authorization header is malformed; the region is wrong; expecting 'us-east-1'.",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
ErrMalformedPOSTRequest: {
|
ErrMalformedPOSTRequest: {
|
||||||
|
@ -679,12 +639,6 @@ var errorCodes = errorCodeMap{
|
||||||
Description: "Policy has invalid resource.",
|
Description: "Policy has invalid resource.",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
ErrMalformedPolicyNotPrincipal: {
|
|
||||||
ErrCode: ErrMalformedPolicyNotPrincipal,
|
|
||||||
Code: "MalformedPolicy",
|
|
||||||
Description: "Allow with NotPrincipal is not allowed.",
|
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
ErrMissingFields: {
|
ErrMissingFields: {
|
||||||
ErrCode: ErrMissingFields,
|
ErrCode: ErrMissingFields,
|
||||||
Code: "MissingFields",
|
Code: "MissingFields",
|
||||||
|
@ -1090,12 +1044,6 @@ var errorCodes = errorCodeMap{
|
||||||
Description: "Requests specifying Server Side Encryption with Customer provided keys must provide an appropriate secret key.",
|
Description: "Requests specifying Server Side Encryption with Customer provided keys must provide an appropriate secret key.",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
ErrMissingSSECustomerAlgorithm: {
|
|
||||||
ErrCode: ErrMissingSSECustomerAlgorithm,
|
|
||||||
Code: "InvalidArgument",
|
|
||||||
Description: "Requests specifying Server Side Encryption with Customer provided keys must provide a valid encryption algorithm.",
|
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
ErrMissingSSECustomerKeyMD5: {
|
ErrMissingSSECustomerKeyMD5: {
|
||||||
ErrCode: ErrMissingSSECustomerKeyMD5,
|
ErrCode: ErrMissingSSECustomerKeyMD5,
|
||||||
Code: "InvalidArgument",
|
Code: "InvalidArgument",
|
||||||
|
@ -1176,12 +1124,6 @@ var errorCodes = errorCodeMap{
|
||||||
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
|
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
|
||||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||||
},
|
},
|
||||||
ErrGatewayTimeout: {
|
|
||||||
ErrCode: ErrGatewayTimeout,
|
|
||||||
Code: "GatewayTimeout",
|
|
||||||
Description: "The server is acting as a gateway and cannot get a response in time",
|
|
||||||
HTTPStatusCode: http.StatusGatewayTimeout,
|
|
||||||
},
|
|
||||||
ErrOperationMaxedOut: {
|
ErrOperationMaxedOut: {
|
||||||
ErrCode: ErrOperationMaxedOut,
|
ErrCode: ErrOperationMaxedOut,
|
||||||
Code: "SlowDown",
|
Code: "SlowDown",
|
||||||
|
@ -1210,12 +1152,6 @@ var errorCodes = errorCodeMap{
|
||||||
Description: "Invalid Request",
|
Description: "Invalid Request",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
ErrInvalidRequestLargeCopy: {
|
|
||||||
ErrCode: ErrInvalidRequestLargeCopy,
|
|
||||||
Code: "InvalidRequest",
|
|
||||||
Description: "CopyObject request made on objects larger than 5GB in size.",
|
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
ErrIncorrectContinuationToken: {
|
ErrIncorrectContinuationToken: {
|
||||||
ErrCode: ErrIncorrectContinuationToken,
|
ErrCode: ErrIncorrectContinuationToken,
|
||||||
Code: "InvalidArgument",
|
Code: "InvalidArgument",
|
||||||
|
@ -1752,30 +1688,11 @@ var errorCodes = errorCodeMap{
|
||||||
Description: "Part number must be an integer between 1 and 10000, inclusive",
|
Description: "Part number must be an integer between 1 and 10000, inclusive",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
ErrInvalidRangeLength: {
|
|
||||||
ErrCode: ErrInvalidRangeLength,
|
|
||||||
Code: "InvalidRange",
|
|
||||||
Description: "Provided range length must be equal to content length",
|
|
||||||
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
|
||||||
},
|
|
||||||
ErrRangeOutOfBounds: {
|
|
||||||
ErrCode: ErrRangeOutOfBounds,
|
|
||||||
Code: "InvalidRange",
|
|
||||||
Description: "Provided range is outside of object bounds",
|
|
||||||
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
|
||||||
},
|
|
||||||
ErrMissingContentRange: {
|
|
||||||
ErrCode: ErrMissingContentRange,
|
|
||||||
Code: "MissingContentRange",
|
|
||||||
Description: "Content-Range header is mandatory for this type of request",
|
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
// Add your error structure here.
|
// Add your error structure here.
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsS3Error checks if the provided error is a specific s3 error.
|
// IsS3Error checks if the provided error is a specific s3 error.
|
||||||
func IsS3Error(err error, code ErrorCode) bool {
|
func IsS3Error(err error, code ErrorCode) bool {
|
||||||
err = frosterr.UnwrapErr(err)
|
|
||||||
e, ok := err.(Error)
|
e, ok := err.(Error)
|
||||||
return ok && e.ErrCode == code
|
return ok && e.ErrCode == code
|
||||||
}
|
}
|
||||||
|
@ -1812,30 +1729,6 @@ func GetAPIErrorWithError(code ErrorCode, err error) Error {
|
||||||
return errorCodes.toAPIErrWithErr(code, err)
|
return errorCodes.toAPIErrWithErr(code, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TransformToS3Error converts FrostFS error to the corresponding S3 error type.
|
|
||||||
func TransformToS3Error(err error) error {
|
|
||||||
err = frosterr.UnwrapErr(err) // this wouldn't work with errors.Join
|
|
||||||
var s3err Error
|
|
||||||
if errors.As(err, &s3err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if errors.Is(err, frostfs.ErrAccessDenied) ||
|
|
||||||
errors.Is(err, tree.ErrNodeAccessDenied) {
|
|
||||||
return GetAPIError(ErrAccessDenied)
|
|
||||||
}
|
|
||||||
|
|
||||||
if errors.Is(err, frostfs.ErrGatewayTimeout) {
|
|
||||||
return GetAPIError(ErrGatewayTimeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
if errors.Is(err, frostfs.ErrGlobalDomainIsAlreadyTaken) {
|
|
||||||
return GetAPIError(ErrBucketAlreadyExists)
|
|
||||||
}
|
|
||||||
|
|
||||||
return GetAPIError(ErrInternalError)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectError -- error that is linked to a specific object.
|
// ObjectError -- error that is linked to a specific object.
|
||||||
type ObjectError struct {
|
type ObjectError struct {
|
||||||
Err error
|
Err error
|
||||||
|
|
|
@ -2,12 +2,7 @@ package errors
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func BenchmarkErrCode(b *testing.B) {
|
func BenchmarkErrCode(b *testing.B) {
|
||||||
|
@ -29,56 +24,3 @@ func BenchmarkErrorsIs(b *testing.B) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTransformS3Errors(t *testing.T) {
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
err error
|
|
||||||
expected ErrorCode
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "simple std error to internal error",
|
|
||||||
err: errors.New("some error"),
|
|
||||||
expected: ErrInternalError,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "layer access denied error to s3 access denied error",
|
|
||||||
err: frostfs.ErrAccessDenied,
|
|
||||||
expected: ErrAccessDenied,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "wrapped layer access denied error to s3 access denied error",
|
|
||||||
err: fmt.Errorf("wrap: %w", frostfs.ErrAccessDenied),
|
|
||||||
expected: ErrAccessDenied,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "layer node access denied error to s3 access denied error",
|
|
||||||
err: tree.ErrNodeAccessDenied,
|
|
||||||
expected: ErrAccessDenied,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "layer gateway timeout error to s3 gateway timeout error",
|
|
||||||
err: frostfs.ErrGatewayTimeout,
|
|
||||||
expected: ErrGatewayTimeout,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "s3 error to s3 error",
|
|
||||||
err: GetAPIError(ErrInvalidPart),
|
|
||||||
expected: ErrInvalidPart,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "wrapped s3 error to s3 error",
|
|
||||||
err: fmt.Errorf("wrap: %w", GetAPIError(ErrInvalidPart)),
|
|
||||||
expected: ErrInvalidPart,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
err := TransformToS3Error(tc.err)
|
|
||||||
s3err, ok := err.(Error)
|
|
||||||
require.True(t, ok, "error must be s3 error")
|
|
||||||
require.Equalf(t, tc.expected, s3err.ErrCode,
|
|
||||||
"expected: '%s', got: '%s'",
|
|
||||||
GetAPIError(tc.expected).Code, GetAPIError(s3err.ErrCode).Code)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
1633
api/handler/acl.go
1633
api/handler/acl.go
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -3,129 +3,78 @@ package handler
|
||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
handler struct {
|
handler struct {
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
obj *layer.Layer
|
obj layer.Client
|
||||||
cfg Config
|
notificator Notificator
|
||||||
ape APE
|
cfg *Config
|
||||||
frostfsid FrostFSID
|
}
|
||||||
|
|
||||||
|
Notificator interface {
|
||||||
|
SendNotifications(topics map[string]string, p *SendNotificationParams) error
|
||||||
|
SendTestNotification(topic, bucketName, requestID, HostID string, now time.Time) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config contains data which handler needs to keep.
|
// Config contains data which handler needs to keep.
|
||||||
Config interface {
|
Config struct {
|
||||||
DefaultPlacementPolicy(namespace string) netmap.PlacementPolicy
|
Policy PlacementPolicy
|
||||||
PlacementPolicy(namespace, constraint string) (netmap.PlacementPolicy, bool)
|
XMLDecoder XMLDecoderProvider
|
||||||
CopiesNumbers(namespace, constraint string) ([]uint32, bool)
|
DefaultMaxAge int
|
||||||
DefaultCopiesNumbers(namespace string) []uint32
|
NotificatorEnabled bool
|
||||||
NewXMLDecoder(reader io.Reader, agent string) *xml.Decoder
|
CopiesNumber uint32
|
||||||
DefaultMaxAge() int
|
ResolveZoneList []string
|
||||||
ResolveZoneList() []string
|
IsResolveListAllow bool // True if ResolveZoneList contains allowed zones
|
||||||
IsResolveListAllow() bool
|
CompleteMultipartKeepalive time.Duration
|
||||||
BypassContentEncodingInChunks(agent string) bool
|
|
||||||
MD5Enabled() bool
|
|
||||||
RetryMaxAttempts() int
|
|
||||||
RetryMaxBackoff() time.Duration
|
|
||||||
RetryStrategy() RetryStrategy
|
|
||||||
TLSTerminationHeader() string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
FrostFSID interface {
|
PlacementPolicy interface {
|
||||||
GetUserAddress(account, user string) (string, error)
|
Default() netmap.PlacementPolicy
|
||||||
GetUserKey(account, name string) (string, error)
|
Get(string) (netmap.PlacementPolicy, bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
// APE is Access Policy Engine that needs to save policy and acl info to different places.
|
XMLDecoderProvider interface {
|
||||||
APE interface {
|
NewCompleteMultipartDecoder(io.Reader) *xml.Decoder
|
||||||
PutBucketPolicy(ns string, cnrID cid.ID, policy []byte, chains []*chain.Chain) error
|
|
||||||
DeleteBucketPolicy(ns string, cnrID cid.ID, chainIDs []chain.ID) error
|
|
||||||
GetBucketPolicy(ns string, cnrID cid.ID) ([]byte, error)
|
|
||||||
SaveACLChains(cid string, chains []*chain.Chain) error
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
type RetryStrategy string
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
RetryStrategyExponential = "exponential"
|
// DefaultPolicy is a default policy of placing containers in FrostFS if it's not set at the request.
|
||||||
RetryStrategyConstant = "constant"
|
DefaultPolicy = "REP 3"
|
||||||
|
// DefaultCopiesNumber is a default number of object copies that is enough to consider put successful if it's not set in config.
|
||||||
|
DefaultCopiesNumber uint32 = 0
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ api.Handler = (*handler)(nil)
|
var _ api.Handler = (*handler)(nil)
|
||||||
|
|
||||||
// New creates new api.Handler using given logger and client.
|
// New creates new api.Handler using given logger and client.
|
||||||
func New(log *zap.Logger, obj *layer.Layer, cfg Config, storage APE, ffsid FrostFSID) (api.Handler, error) {
|
func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg *Config) (api.Handler, error) {
|
||||||
switch {
|
switch {
|
||||||
case obj == nil:
|
case obj == nil:
|
||||||
return nil, errors.New("empty FrostFS Object Layer")
|
return nil, errors.New("empty FrostFS Object Layer")
|
||||||
case log == nil:
|
case log == nil:
|
||||||
return nil, errors.New("empty logger")
|
return nil, errors.New("empty logger")
|
||||||
case storage == nil:
|
}
|
||||||
return nil, errors.New("empty policy storage")
|
|
||||||
case ffsid == nil:
|
if !cfg.NotificatorEnabled {
|
||||||
return nil, errors.New("empty frostfsid")
|
log.Warn("notificator is disabled, s3 won't produce notification events")
|
||||||
|
} else if notificator == nil {
|
||||||
|
return nil, errors.New("empty notificator")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &handler{
|
return &handler{
|
||||||
log: log,
|
log: log,
|
||||||
obj: obj,
|
obj: obj,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
ape: storage,
|
notificator: notificator,
|
||||||
frostfsid: ffsid,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// pickCopiesNumbers chooses the return values following this logic:
|
|
||||||
// 1) array of copies numbers sent in request's header has the highest priority.
|
|
||||||
// 2) array of copies numbers with corresponding location constraint provided in the config file.
|
|
||||||
// 3) default copies number from the config file wrapped into array.
|
|
||||||
func (h *handler) pickCopiesNumbers(metadata map[string]string, namespace, locationConstraint string) ([]uint32, error) {
|
|
||||||
copiesNumbersStr, ok := metadata[layer.AttributeFrostfsCopiesNumber]
|
|
||||||
if ok {
|
|
||||||
result, err := parseCopiesNumbers(copiesNumbersStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
copiesNumbers, ok := h.cfg.CopiesNumbers(namespace, locationConstraint)
|
|
||||||
if ok {
|
|
||||||
return copiesNumbers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return h.cfg.DefaultCopiesNumbers(namespace), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseCopiesNumbers(copiesNumbersStr string) ([]uint32, error) {
|
|
||||||
var result []uint32
|
|
||||||
copiesNumbersSplit := strings.Split(copiesNumbersStr, ",")
|
|
||||||
|
|
||||||
for i := range copiesNumbersSplit {
|
|
||||||
item := strings.ReplaceAll(copiesNumbersSplit[i], " ", "")
|
|
||||||
if len(item) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
copiesNumber, err := strconv.ParseUint(item, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("pasrse copies number: %w", err)
|
|
||||||
}
|
|
||||||
result = append(result, uint32(copiesNumber))
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,69 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCopiesNumberPicker(t *testing.T) {
|
|
||||||
var locationConstraints = map[string][]uint32{}
|
|
||||||
locationConstraint1 := "one"
|
|
||||||
locationConstraint2 := "two"
|
|
||||||
locationConstraints[locationConstraint1] = []uint32{2, 3, 4}
|
|
||||||
|
|
||||||
config := &configMock{
|
|
||||||
copiesNumbers: locationConstraints,
|
|
||||||
defaultCopiesNumbers: []uint32{1},
|
|
||||||
}
|
|
||||||
h := handler{
|
|
||||||
cfg: config,
|
|
||||||
}
|
|
||||||
|
|
||||||
metadata := map[string]string{}
|
|
||||||
|
|
||||||
t.Run("pick default copies number", func(t *testing.T) {
|
|
||||||
metadata["somekey1"] = "5, 6, 7"
|
|
||||||
expectedCopiesNumbers := []uint32{1}
|
|
||||||
|
|
||||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("pick copies number vector according to location constraint", func(t *testing.T) {
|
|
||||||
metadata["somekey2"] = "6, 7, 8"
|
|
||||||
expectedCopiesNumbers := []uint32{2, 3, 4}
|
|
||||||
|
|
||||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("pick copies number from metadata", func(t *testing.T) {
|
|
||||||
metadata["frostfs-copies-number"] = "7, 8, 9"
|
|
||||||
expectedCopiesNumbers := []uint32{7, 8, 9}
|
|
||||||
|
|
||||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("pick copies number from metadata with no space", func(t *testing.T) {
|
|
||||||
metadata["frostfs-copies-number"] = "7,8,9"
|
|
||||||
expectedCopiesNumbers := []uint32{7, 8, 9}
|
|
||||||
|
|
||||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("pick copies number from metadata with trailing comma", func(t *testing.T) {
|
|
||||||
metadata["frostfs-copies-number"] = "11, 12, 13, "
|
|
||||||
expectedCopiesNumbers := []uint32{11, 12, 13}
|
|
||||||
|
|
||||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,8 +1,6 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -12,7 +10,6 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -20,7 +17,7 @@ type (
|
||||||
GetObjectAttributesResponse struct {
|
GetObjectAttributesResponse struct {
|
||||||
ETag string `xml:"ETag,omitempty"`
|
ETag string `xml:"ETag,omitempty"`
|
||||||
Checksum *Checksum `xml:"Checksum,omitempty"`
|
Checksum *Checksum `xml:"Checksum,omitempty"`
|
||||||
ObjectSize uint64 `xml:"ObjectSize,omitempty"`
|
ObjectSize int64 `xml:"ObjectSize,omitempty"`
|
||||||
StorageClass string `xml:"StorageClass,omitempty"`
|
StorageClass string `xml:"StorageClass,omitempty"`
|
||||||
ObjectParts *ObjectParts `xml:"ObjectParts,omitempty"`
|
ObjectParts *ObjectParts `xml:"ObjectParts,omitempty"`
|
||||||
}
|
}
|
||||||
|
@ -70,18 +67,17 @@ var validAttributes = map[string]struct{}{
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
reqInfo := api.GetReqInfo(r.Context())
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
params, err := parseGetObjectAttributeArgs(r, h.reqLogger(ctx))
|
params, err := parseGetObjectAttributeArgs(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "invalid request", reqInfo, err)
|
h.logAndSendError(w, "invalid request", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,44 +87,44 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
|
||||||
VersionID: params.VersionID,
|
VersionID: params.VersionID,
|
||||||
}
|
}
|
||||||
|
|
||||||
extendedInfo, err := h.obj.GetExtendedObjectInfo(ctx, p)
|
extendedInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not fetch object info", reqInfo, err)
|
h.logAndSendError(w, "could not fetch object info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
info := extendedInfo.ObjectInfo
|
info := extendedInfo.ObjectInfo
|
||||||
|
|
||||||
encryptionParams, err := h.formEncryptionParams(r)
|
encryptionParams, err := formEncryptionParams(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
|
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
|
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
|
||||||
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = checkPreconditions(info, params.Conditional, h.cfg.MD5Enabled()); err != nil {
|
if err = checkPreconditions(info, params.Conditional); err != nil {
|
||||||
h.logAndSendError(ctx, w, "precondition failed", reqInfo, err)
|
h.logAndSendError(w, "precondition failed", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := encodeToObjectAttributesResponse(info, params, h.cfg.MD5Enabled())
|
response, err := encodeToObjectAttributesResponse(info, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "couldn't encode object info to response", reqInfo, err)
|
h.logAndSendError(w, "couldn't encode object info to response", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
writeAttributesHeaders(w.Header(), extendedInfo, bktSettings.Unversioned())
|
writeAttributesHeaders(w.Header(), extendedInfo, bktSettings.Unversioned())
|
||||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
if err = api.EncodeToResponse(w, response); err != nil {
|
||||||
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,14 +134,14 @@ func writeAttributesHeaders(h http.Header, info *data.ExtendedObjectInfo, isBuck
|
||||||
h.Set(api.AmzVersionID, info.Version())
|
h.Set(api.AmzVersionID, info.Version())
|
||||||
}
|
}
|
||||||
|
|
||||||
if info.NodeVersion.IsDeleteMarker {
|
if info.NodeVersion.IsDeleteMarker() {
|
||||||
h.Set(api.AmzDeleteMarker, strconv.FormatBool(true))
|
h.Set(api.AmzDeleteMarker, strconv.FormatBool(true))
|
||||||
}
|
}
|
||||||
|
|
||||||
// x-amz-request-charged
|
// x-amz-request-charged
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseGetObjectAttributeArgs(r *http.Request, log *zap.Logger) (*GetObjectAttributesArgs, error) {
|
func parseGetObjectAttributeArgs(r *http.Request) (*GetObjectAttributesArgs, error) {
|
||||||
res := &GetObjectAttributesArgs{
|
res := &GetObjectAttributesArgs{
|
||||||
VersionID: r.URL.Query().Get(api.QueryVersionID),
|
VersionID: r.URL.Query().Get(api.QueryVersionID),
|
||||||
}
|
}
|
||||||
|
@ -178,27 +174,23 @@ func parseGetObjectAttributeArgs(r *http.Request, log *zap.Logger) (*GetObjectAt
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
res.Conditional = parseConditionalHeaders(r.Header, log)
|
res.Conditional, err = parseConditionalHeaders(r.Header)
|
||||||
return res, nil
|
return res, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttributesArgs, md5Enabled bool) (*GetObjectAttributesResponse, error) {
|
func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttributesArgs) (*GetObjectAttributesResponse, error) {
|
||||||
resp := &GetObjectAttributesResponse{}
|
resp := &GetObjectAttributesResponse{}
|
||||||
|
|
||||||
for _, attr := range p.Attributes {
|
for _, attr := range p.Attributes {
|
||||||
switch attr {
|
switch attr {
|
||||||
case eTag:
|
case eTag:
|
||||||
resp.ETag = data.Quote(info.ETag(md5Enabled))
|
resp.ETag = info.HashSum
|
||||||
case storageClass:
|
case storageClass:
|
||||||
resp.StorageClass = api.DefaultStorageClass
|
resp.StorageClass = "STANDARD"
|
||||||
case objectSize:
|
case objectSize:
|
||||||
resp.ObjectSize = info.Size
|
resp.ObjectSize = info.Size
|
||||||
case checksum:
|
case checksum:
|
||||||
checksumBytes, err := hex.DecodeString(info.HashSum)
|
resp.Checksum = &Checksum{ChecksumSHA256: info.HashSum}
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("form upload attributes: %w", err)
|
|
||||||
}
|
|
||||||
resp.Checksum = &Checksum{ChecksumSHA256: base64.StdEncoding.EncodeToString(checksumBytes)}
|
|
||||||
case objectParts:
|
case objectParts:
|
||||||
parts, err := formUploadAttributes(info, p.MaxParts, p.PartNumberMarker)
|
parts, err := formUploadAttributes(info, p.MaxParts, p.PartNumberMarker)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -226,15 +218,10 @@ func formUploadAttributes(info *data.ObjectInfo, maxParts, marker int) (*ObjectP
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid completed part: %w", err)
|
return nil, fmt.Errorf("invalid completed part: %w", err)
|
||||||
}
|
}
|
||||||
// ETag value contains SHA256 checksum.
|
|
||||||
checksumBytes, err := hex.DecodeString(part.ETag)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid sha256 checksum in completed part: %w", err)
|
|
||||||
}
|
|
||||||
parts[i] = Part{
|
parts[i] = Part{
|
||||||
PartNumber: part.PartNumber,
|
PartNumber: part.PartNumber,
|
||||||
Size: int(part.Size),
|
Size: int(part.Size),
|
||||||
ChecksumSHA256: base64.StdEncoding.EncodeToString(checksumBytes),
|
ChecksumSHA256: part.ETag,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
|
||||||
"encoding/hex"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -19,20 +17,18 @@ func TestGetObjectPartsAttributes(t *testing.T) {
|
||||||
|
|
||||||
createTestBucket(hc, bktName)
|
createTestBucket(hc, bktName)
|
||||||
|
|
||||||
putObject(hc, bktName, objName)
|
putObject(t, hc, bktName, objName)
|
||||||
result := getObjectAttributes(hc, bktName, objName, objectParts)
|
result := getObjectAttributes(hc, bktName, objName, objectParts)
|
||||||
require.Nil(t, result.ObjectParts)
|
require.Nil(t, result.ObjectParts)
|
||||||
|
|
||||||
multipartUpload := createMultipartUpload(hc, bktName, objMultipartName, map[string]string{})
|
multipartUpload := createMultipartUpload(hc, bktName, objMultipartName, map[string]string{})
|
||||||
etag, _ := uploadPart(hc, bktName, objMultipartName, multipartUpload.UploadID, 1, partSize)
|
etag, _ := uploadPart(hc, bktName, objMultipartName, multipartUpload.UploadID, 1, partSize)
|
||||||
completeMultipartUpload(hc, bktName, objMultipartName, multipartUpload.UploadID, []string{etag})
|
completeMultipartUpload(hc, bktName, objMultipartName, multipartUpload.UploadID, []string{etag})
|
||||||
etagBytes, err := hex.DecodeString(etag[1 : len(etag)-1])
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
result = getObjectAttributes(hc, bktName, objMultipartName, objectParts)
|
result = getObjectAttributes(hc, bktName, objMultipartName, objectParts)
|
||||||
require.NotNil(t, result.ObjectParts)
|
require.NotNil(t, result.ObjectParts)
|
||||||
require.Len(t, result.ObjectParts.Parts, 1)
|
require.Len(t, result.ObjectParts.Parts, 1)
|
||||||
require.Equal(t, base64.StdEncoding.EncodeToString(etagBytes), result.ObjectParts.Parts[0].ChecksumSHA256)
|
require.Equal(t, etag, result.ObjectParts.Parts[0].ChecksumSHA256)
|
||||||
require.Equal(t, partSize, result.ObjectParts.Parts[0].Size)
|
require.Equal(t, partSize, result.ObjectParts.Parts[0].Size)
|
||||||
require.Equal(t, 1, result.ObjectParts.PartsCount)
|
require.Equal(t, 1, result.ObjectParts.PartsCount)
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,8 +11,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -41,15 +40,15 @@ func path2BucketObject(path string) (string, string, error) {
|
||||||
|
|
||||||
func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
versionID string
|
versionID string
|
||||||
metadata map[string]string
|
metadata map[string]string
|
||||||
tagSet map[string]string
|
tagSet map[string]string
|
||||||
|
sessionTokenEACL *session.Container
|
||||||
|
|
||||||
ctx = r.Context()
|
reqInfo = api.GetReqInfo(r.Context())
|
||||||
reqInfo = middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
cannedACLStatus = aclHeadersStatus(r)
|
containsACL = containsACLHeaders(r)
|
||||||
)
|
)
|
||||||
|
|
||||||
src := r.Header.Get(api.AmzCopySource)
|
src := r.Header.Get(api.AmzCopySource)
|
||||||
|
@ -65,7 +64,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
srcBucket, srcObject, err := path2BucketObject(src)
|
srcBucket, srcObject, err := path2BucketObject(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "invalid source copy", reqInfo, err)
|
h.logAndSendError(w, "invalid source copy", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,74 +74,44 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if srcObjPrm.BktInfo, err = h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner); err != nil {
|
if srcObjPrm.BktInfo, err = h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner); err != nil {
|
||||||
h.logAndSendError(ctx, w, "couldn't get source bucket", reqInfo, err)
|
h.logAndSendError(w, "couldn't get source bucket", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
dstBktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
dstBktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "couldn't get target bucket", reqInfo, err)
|
h.logAndSendError(w, "couldn't get target bucket", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(ctx, dstBktInfo)
|
settings, err := h.obj.GetBucketSettings(r.Context(), dstBktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if cannedACLStatus == aclStatusYes {
|
if containsACL {
|
||||||
h.logAndSendError(ctx, w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
if sessionTokenEACL, err = getSessionTokenSetEACL(r.Context()); err != nil {
|
||||||
return
|
h.logAndSendError(w, "could not get eacl session token from a box", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm)
|
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), srcObjPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not find object", reqInfo, err)
|
h.logAndSendError(w, "could not find object", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
srcObjInfo := extendedSrcObjInfo.ObjectInfo
|
srcObjInfo := extendedSrcObjInfo.ObjectInfo
|
||||||
|
|
||||||
srcEncryptionParams, err := h.formCopySourceEncryptionParams(r)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dstEncryptionParams, err := h.formEncryptionParams(r)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = srcEncryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
|
|
||||||
if errors.IsS3Error(err, errors.ErrInvalidEncryptionParameters) || errors.IsS3Error(err, errors.ErrSSEEncryptedObject) ||
|
|
||||||
errors.IsS3Error(err, errors.ErrInvalidSSECustomerParameters) {
|
|
||||||
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, err, zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var dstSize uint64
|
|
||||||
srcSize, err := layer.GetObjectSize(srcObjInfo)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "failed to get source object size", reqInfo, err)
|
|
||||||
return
|
|
||||||
} else if srcSize > layer.UploadMaxSize { // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
|
|
||||||
h.logAndSendError(ctx, w, "too bid object to copy with single copy operation, use multipart upload copy instead", reqInfo, errors.GetAPIError(errors.ErrInvalidRequestLargeCopy))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dstSize = srcSize
|
|
||||||
|
|
||||||
args, err := parseCopyObjectArgs(r.Header)
|
args, err := parseCopyObjectArgs(r.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not parse request params", reqInfo, err)
|
h.logAndSendError(w, "could not parse request params", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if isCopyingToItselfForbidden(reqInfo, srcBucket, srcObject, settings, args) {
|
if isCopyingToItselfForbidden(reqInfo, srcBucket, srcObject, settings, args) {
|
||||||
h.logAndSendError(ctx, w, "copying to itself without changing anything", reqInfo, errors.GetAPIError(errors.ErrInvalidCopyDest))
|
h.logAndSendError(w, "copying to itself without changing anything", reqInfo, errors.GetAPIError(errors.ErrInvalidCopyDest))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,12 +122,12 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
if args.TaggingDirective == replaceDirective {
|
if args.TaggingDirective == replaceDirective {
|
||||||
tagSet, err = parseTaggingHeader(r.Header)
|
tagSet, err = parseTaggingHeader(r.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not parse tagging header", reqInfo, err)
|
h.logAndSendError(w, "could not parse tagging header", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tagPrm := &data.GetObjectTaggingParams{
|
tagPrm := &layer.GetObjectTaggingParams{
|
||||||
ObjectVersion: &data.ObjectVersion{
|
ObjectVersion: &layer.ObjectVersion{
|
||||||
BktInfo: srcObjPrm.BktInfo,
|
BktInfo: srcObjPrm.BktInfo,
|
||||||
ObjectName: srcObject,
|
ObjectName: srcObject,
|
||||||
VersionID: srcObjInfo.VersionID(),
|
VersionID: srcObjInfo.VersionID(),
|
||||||
|
@ -166,15 +135,26 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
NodeVersion: extendedSrcObjInfo.NodeVersion,
|
NodeVersion: extendedSrcObjInfo.NodeVersion,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, tagSet, err = h.obj.GetObjectTagging(ctx, tagPrm)
|
_, tagSet, err = h.obj.GetObjectTagging(r.Context(), tagPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not get object tagging", reqInfo, err)
|
h.logAndSendError(w, "could not get object tagging", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = checkPreconditions(srcObjInfo, args.Conditional, h.cfg.MD5Enabled()); err != nil {
|
encryptionParams, err := formEncryptionParams(r)
|
||||||
h.logAndSendError(ctx, w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed))
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
|
||||||
|
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = checkPreconditions(srcObjInfo, args.Conditional); err != nil {
|
||||||
|
h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,55 +162,69 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
if len(srcObjInfo.ContentType) > 0 {
|
if len(srcObjInfo.ContentType) > 0 {
|
||||||
srcObjInfo.Headers[api.ContentType] = srcObjInfo.ContentType
|
srcObjInfo.Headers[api.ContentType] = srcObjInfo.ContentType
|
||||||
}
|
}
|
||||||
metadata = makeCopyMap(srcObjInfo.Headers)
|
metadata = srcObjInfo.Headers
|
||||||
filterMetadataMap(metadata)
|
|
||||||
} else if contentType := r.Header.Get(api.ContentType); len(contentType) > 0 {
|
} else if contentType := r.Header.Get(api.ContentType); len(contentType) > 0 {
|
||||||
metadata[api.ContentType] = contentType
|
metadata[api.ContentType] = contentType
|
||||||
}
|
}
|
||||||
|
|
||||||
params := &layer.CopyObjectParams{
|
copiesNumber, err := getCopiesNumberOrDefault(metadata, h.cfg.CopiesNumber)
|
||||||
SrcVersioned: srcObjPrm.Versioned(),
|
|
||||||
SrcObject: srcObjInfo,
|
|
||||||
ScrBktInfo: srcObjPrm.BktInfo,
|
|
||||||
DstBktInfo: dstBktInfo,
|
|
||||||
DstObject: reqInfo.ObjectName,
|
|
||||||
DstSize: dstSize,
|
|
||||||
Header: metadata,
|
|
||||||
SrcEncryption: srcEncryptionParams,
|
|
||||||
DstEncryption: dstEncryptionParams,
|
|
||||||
}
|
|
||||||
|
|
||||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, dstBktInfo.LocationConstraint)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
|
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
params.Lock, err = formObjectLock(ctx, dstBktInfo, settings.LockConfiguration, r.Header)
|
params := &layer.CopyObjectParams{
|
||||||
|
SrcObject: srcObjInfo,
|
||||||
|
ScrBktInfo: srcObjPrm.BktInfo,
|
||||||
|
DstBktInfo: dstBktInfo,
|
||||||
|
DstObject: reqInfo.ObjectName,
|
||||||
|
SrcSize: srcObjInfo.Size,
|
||||||
|
Header: metadata,
|
||||||
|
Encryption: encryptionParams,
|
||||||
|
CopiesNuber: copiesNumber,
|
||||||
|
}
|
||||||
|
|
||||||
|
params.Lock, err = formObjectLock(r.Context(), dstBktInfo, settings.LockConfiguration, r.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not form object lock", reqInfo, err)
|
h.logAndSendError(w, "could not form object lock", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
additional := []zap.Field{zap.String("src_bucket_name", srcBucket), zap.String("src_object_name", srcObject)}
|
additional := []zap.Field{zap.String("src_bucket_name", srcBucket), zap.String("src_object_name", srcObject)}
|
||||||
extendedDstObjInfo, err := h.obj.CopyObject(ctx, params)
|
extendedDstObjInfo, err := h.obj.CopyObject(r.Context(), params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "couldn't copy object", reqInfo, err, additional...)
|
h.logAndSendError(w, "couldn't copy object", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
dstObjInfo := extendedDstObjInfo.ObjectInfo
|
dstObjInfo := extendedDstObjInfo.ObjectInfo
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, &CopyObjectResponse{
|
if err = api.EncodeToResponse(w, &CopyObjectResponse{LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339), ETag: dstObjInfo.HashSum}); err != nil {
|
||||||
LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339),
|
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
|
||||||
ETag: data.Quote(dstObjInfo.ETag(h.cfg.MD5Enabled())),
|
|
||||||
}); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err, additional...)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if containsACL {
|
||||||
|
newEaclTable, err := h.getNewEAclTable(r, dstBktInfo, dstObjInfo)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "could not get new eacl table", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &layer.PutBucketACLParams{
|
||||||
|
BktInfo: dstBktInfo,
|
||||||
|
EACL: newEaclTable,
|
||||||
|
SessionToken: sessionTokenEACL,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = h.obj.PutBucketACL(r.Context(), p); err != nil {
|
||||||
|
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if tagSet != nil {
|
if tagSet != nil {
|
||||||
tagPrm := &data.PutObjectTaggingParams{
|
tagPrm := &layer.PutObjectTaggingParams{
|
||||||
ObjectVersion: &data.ObjectVersion{
|
ObjectVersion: &layer.ObjectVersion{
|
||||||
BktInfo: dstBktInfo,
|
BktInfo: dstBktInfo,
|
||||||
ObjectName: reqInfo.ObjectName,
|
ObjectName: reqInfo.ObjectName,
|
||||||
VersionID: dstObjInfo.VersionID(),
|
VersionID: dstObjInfo.VersionID(),
|
||||||
|
@ -238,35 +232,33 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
TagSet: tagSet,
|
TagSet: tagSet,
|
||||||
NodeVersion: extendedDstObjInfo.NodeVersion,
|
NodeVersion: extendedDstObjInfo.NodeVersion,
|
||||||
}
|
}
|
||||||
if err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not upload object tagging", reqInfo, err)
|
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
h.reqLogger(ctx).Info(logs.ObjectIsCopied, zap.Stringer("object_id", dstObjInfo.ID))
|
h.log.Info("object is copied",
|
||||||
|
zap.String("bucket", dstObjInfo.Bucket),
|
||||||
|
zap.String("object", dstObjInfo.Name),
|
||||||
|
zap.Stringer("object_id", dstObjInfo.ID))
|
||||||
|
|
||||||
if dstEncryptionParams.Enabled() {
|
s := &SendNotificationParams{
|
||||||
|
Event: EventObjectCreatedCopy,
|
||||||
|
NotificationInfo: data.NotificationInfoFromObject(dstObjInfo),
|
||||||
|
BktInfo: dstBktInfo,
|
||||||
|
ReqInfo: reqInfo,
|
||||||
|
}
|
||||||
|
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||||
|
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if encryptionParams.Enabled() {
|
||||||
addSSECHeaders(w.Header(), r.Header)
|
addSSECHeaders(w.Header(), r.Header)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeCopyMap(headers map[string]string) map[string]string {
|
func isCopyingToItselfForbidden(reqInfo *api.ReqInfo, srcBucket string, srcObject string, settings *data.BucketSettings, args *copyObjectArgs) bool {
|
||||||
res := make(map[string]string, len(headers))
|
|
||||||
for key, val := range headers {
|
|
||||||
res[key] = val
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func filterMetadataMap(metadata map[string]string) {
|
|
||||||
delete(metadata, layer.MultipartObjectSize) // object payload will be real one rather than list of compound parts
|
|
||||||
for key := range layer.EncryptionMetadata {
|
|
||||||
delete(metadata, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isCopyingToItselfForbidden(reqInfo *middleware.ReqInfo, srcBucket string, srcObject string, settings *data.BucketSettings, args *copyObjectArgs) bool {
|
|
||||||
if reqInfo.BucketName != srcBucket || reqInfo.ObjectName != srcObject {
|
if reqInfo.BucketName != srcBucket || reqInfo.ObjectName != srcObject {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -281,8 +273,8 @@ func isCopyingToItselfForbidden(reqInfo *middleware.ReqInfo, srcBucket string, s
|
||||||
func parseCopyObjectArgs(headers http.Header) (*copyObjectArgs, error) {
|
func parseCopyObjectArgs(headers http.Header) (*copyObjectArgs, error) {
|
||||||
var err error
|
var err error
|
||||||
args := &conditionalArgs{
|
args := &conditionalArgs{
|
||||||
IfMatch: data.UnQuote(headers.Get(api.AmzCopyIfMatch)),
|
IfMatch: headers.Get(api.AmzCopyIfMatch),
|
||||||
IfNoneMatch: data.UnQuote(headers.Get(api.AmzCopyIfNoneMatch)),
|
IfNoneMatch: headers.Get(api.AmzCopyIfNoneMatch),
|
||||||
}
|
}
|
||||||
|
|
||||||
if args.IfModifiedSince, err = parseHTTPTime(headers.Get(api.AmzCopyIfModifiedSince)); err != nil {
|
if args.IfModifiedSince, err = parseHTTPTime(headers.Get(api.AmzCopyIfModifiedSince)); err != nil {
|
||||||
|
|
|
@ -1,21 +1,12 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/md5"
|
|
||||||
"crypto/tls"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -24,7 +15,6 @@ type CopyMeta struct {
|
||||||
Tags map[string]string
|
Tags map[string]string
|
||||||
MetadataDirective string
|
MetadataDirective string
|
||||||
Metadata map[string]string
|
Metadata map[string]string
|
||||||
Headers map[string]string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCopyWithTaggingDirective(t *testing.T) {
|
func TestCopyWithTaggingDirective(t *testing.T) {
|
||||||
|
@ -39,14 +29,14 @@ func TestCopyWithTaggingDirective(t *testing.T) {
|
||||||
copyMeta := CopyMeta{
|
copyMeta := CopyMeta{
|
||||||
Tags: map[string]string{"key2": "val"},
|
Tags: map[string]string{"key2": "val"},
|
||||||
}
|
}
|
||||||
copyObject(tc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
|
copyObject(t, tc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
|
||||||
tagging := getObjectTagging(t, tc, bktName, objToCopy, emptyVersion)
|
tagging := getObjectTagging(t, tc, bktName, objToCopy, emptyVersion)
|
||||||
require.Len(t, tagging.TagSet, 1)
|
require.Len(t, tagging.TagSet, 1)
|
||||||
require.Equal(t, "key", tagging.TagSet[0].Key)
|
require.Equal(t, "key", tagging.TagSet[0].Key)
|
||||||
require.Equal(t, "val", tagging.TagSet[0].Value)
|
require.Equal(t, "val", tagging.TagSet[0].Value)
|
||||||
|
|
||||||
copyMeta.TaggingDirective = replaceDirective
|
copyMeta.TaggingDirective = replaceDirective
|
||||||
copyObject(tc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
|
copyObject(t, tc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
|
||||||
tagging = getObjectTagging(t, tc, bktName, objToCopy2, emptyVersion)
|
tagging = getObjectTagging(t, tc, bktName, objToCopy2, emptyVersion)
|
||||||
require.Len(t, tagging.TagSet, 1)
|
require.Len(t, tagging.TagSet, 1)
|
||||||
require.Equal(t, "key2", tagging.TagSet[0].Key)
|
require.Equal(t, "key2", tagging.TagSet[0].Key)
|
||||||
|
@ -61,213 +51,20 @@ func TestCopyToItself(t *testing.T) {
|
||||||
|
|
||||||
copyMeta := CopyMeta{MetadataDirective: replaceDirective}
|
copyMeta := CopyMeta{MetadataDirective: replaceDirective}
|
||||||
|
|
||||||
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusBadRequest)
|
copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusBadRequest)
|
||||||
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
||||||
|
|
||||||
putBucketVersioning(t, tc, bktName, true)
|
putBucketVersioning(t, tc, bktName, true)
|
||||||
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
||||||
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
||||||
|
|
||||||
putBucketVersioning(t, tc, bktName, false)
|
putBucketVersioning(t, tc, bktName, false)
|
||||||
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
||||||
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCopyMultipart(t *testing.T) {
|
func copyObject(t *testing.T, tc *handlerContext, bktName, fromObject, toObject string, copyMeta CopyMeta, statusCode int) {
|
||||||
hc := prepareHandlerContext(t)
|
w, r := prepareTestRequest(tc, bktName, toObject, nil)
|
||||||
|
|
||||||
bktName, objName := "bucket-for-copy", "object-for-copy"
|
|
||||||
createTestBucket(hc, bktName)
|
|
||||||
|
|
||||||
partSize := layer.UploadMinSize
|
|
||||||
objLen := 6 * partSize
|
|
||||||
headers := map[string]string{}
|
|
||||||
|
|
||||||
data := multipartUpload(hc, bktName, objName, headers, objLen, partSize)
|
|
||||||
require.Equal(t, objLen, len(data))
|
|
||||||
|
|
||||||
objToCopy := "copy-target"
|
|
||||||
var copyMeta CopyMeta
|
|
||||||
copyObject(hc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
|
|
||||||
|
|
||||||
copiedData, _ := getObject(hc, bktName, objToCopy)
|
|
||||||
equalDataSlices(t, data, copiedData)
|
|
||||||
|
|
||||||
result := getObjectAttributes(hc, bktName, objToCopy, objectParts)
|
|
||||||
require.NotNil(t, result.ObjectParts)
|
|
||||||
|
|
||||||
objToCopy2 := "copy-target2"
|
|
||||||
copyMeta.MetadataDirective = replaceDirective
|
|
||||||
copyObject(hc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
|
|
||||||
|
|
||||||
result = getObjectAttributes(hc, bktName, objToCopy2, objectParts)
|
|
||||||
require.Nil(t, result.ObjectParts)
|
|
||||||
|
|
||||||
copiedData, _ = getObject(hc, bktName, objToCopy2)
|
|
||||||
equalDataSlices(t, data, copiedData)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCopyEncryptedToUnencrypted(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, srcObjName := "bucket-for-copy", "object-for-copy"
|
|
||||||
key1 := []byte("firstencriptionkeyofsourceobject")
|
|
||||||
key1Md5 := md5.Sum(key1)
|
|
||||||
key2 := []byte("anotherencriptionkeysourceobject")
|
|
||||||
key2Md5 := md5.Sum(key2)
|
|
||||||
bktInfo := createTestBucket(tc, bktName)
|
|
||||||
|
|
||||||
srcEnc, err := encryption.NewParams(key1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
srcObjInfo := createTestObject(tc, bktInfo, srcObjName, *srcEnc)
|
|
||||||
require.True(t, containEncryptionMetadataHeaders(srcObjInfo.Headers))
|
|
||||||
|
|
||||||
dstObjName := "copy-object"
|
|
||||||
|
|
||||||
// empty copy-source-sse headers
|
|
||||||
w, r := prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusBadRequest)
|
|
||||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrSSEEncryptedObject))
|
|
||||||
|
|
||||||
// empty copy-source-sse-custom-key
|
|
||||||
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusBadRequest)
|
|
||||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrMissingSSECustomerKey))
|
|
||||||
|
|
||||||
// empty copy-source-sse-custom-algorithm
|
|
||||||
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key1))
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusBadRequest)
|
|
||||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrMissingSSECustomerAlgorithm))
|
|
||||||
|
|
||||||
// invalid copy-source-sse-custom-key
|
|
||||||
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key2))
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key2Md5[:]))
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusBadRequest)
|
|
||||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrInvalidSSECustomerParameters))
|
|
||||||
|
|
||||||
// success copy
|
|
||||||
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key1))
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key1Md5[:]))
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
dstObjInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: dstObjName})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, srcObjInfo.Headers[layer.AttributeDecryptedSize], strconv.Itoa(int(dstObjInfo.Size)))
|
|
||||||
require.False(t, containEncryptionMetadataHeaders(dstObjInfo.Headers))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCopyUnencryptedToEncrypted(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, srcObjName := "bucket-for-copy", "object-for-copy"
|
|
||||||
key := []byte("firstencriptionkeyofsourceobject")
|
|
||||||
keyMd5 := md5.Sum(key)
|
|
||||||
bktInfo := createTestBucket(tc, bktName)
|
|
||||||
|
|
||||||
srcObjInfo := createTestObject(tc, bktInfo, srcObjName, encryption.Params{})
|
|
||||||
require.False(t, containEncryptionMetadataHeaders(srcObjInfo.Headers))
|
|
||||||
|
|
||||||
dstObjName := "copy-object"
|
|
||||||
|
|
||||||
// invalid copy-source-sse headers
|
|
||||||
w, r := prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key))
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMd5[:]))
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusBadRequest)
|
|
||||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrInvalidEncryptionParameters))
|
|
||||||
|
|
||||||
// success copy
|
|
||||||
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key))
|
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMd5[:]))
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
dstObjInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: dstObjName})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.True(t, containEncryptionMetadataHeaders(dstObjInfo.Headers))
|
|
||||||
require.Equal(t, strconv.Itoa(int(srcObjInfo.Size)), dstObjInfo.Headers[layer.AttributeDecryptedSize])
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCopyEncryptedToEncryptedWithAnotherKey(t *testing.T) {
|
|
||||||
tc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, srcObjName := "bucket-for-copy", "object-for-copy"
|
|
||||||
key1 := []byte("firstencriptionkeyofsourceobject")
|
|
||||||
key1Md5 := md5.Sum(key1)
|
|
||||||
key2 := []byte("anotherencriptionkeysourceobject")
|
|
||||||
key2Md5 := md5.Sum(key2)
|
|
||||||
bktInfo := createTestBucket(tc, bktName)
|
|
||||||
|
|
||||||
srcEnc, err := encryption.NewParams(key1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
srcObjInfo := createTestObject(tc, bktInfo, srcObjName, *srcEnc)
|
|
||||||
require.True(t, containEncryptionMetadataHeaders(srcObjInfo.Headers))
|
|
||||||
|
|
||||||
dstObjName := "copy-object"
|
|
||||||
|
|
||||||
w, r := prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
||||||
r.TLS = &tls.ConnectionState{}
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key1))
|
|
||||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key1Md5[:]))
|
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key2))
|
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key2Md5[:]))
|
|
||||||
tc.Handler().CopyObjectHandler(w, r)
|
|
||||||
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
dstObjInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: dstObjName})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.True(t, containEncryptionMetadataHeaders(dstObjInfo.Headers))
|
|
||||||
require.Equal(t, srcObjInfo.Headers[layer.AttributeDecryptedSize], dstObjInfo.Headers[layer.AttributeDecryptedSize])
|
|
||||||
}
|
|
||||||
|
|
||||||
func containEncryptionMetadataHeaders(headers map[string]string) bool {
|
|
||||||
for k := range headers {
|
|
||||||
if _, ok := layer.EncryptionMetadata[k]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyObject(hc *handlerContext, bktName, fromObject, toObject string, copyMeta CopyMeta, statusCode int) {
|
|
||||||
w, r := prepareTestRequest(hc, bktName, toObject, nil)
|
|
||||||
r.Header.Set(api.AmzCopySource, bktName+"/"+fromObject)
|
r.Header.Set(api.AmzCopySource, bktName+"/"+fromObject)
|
||||||
|
|
||||||
r.Header.Set(api.AmzMetadataDirective, copyMeta.MetadataDirective)
|
r.Header.Set(api.AmzMetadataDirective, copyMeta.MetadataDirective)
|
||||||
|
@ -282,33 +79,28 @@ func copyObject(hc *handlerContext, bktName, fromObject, toObject string, copyMe
|
||||||
}
|
}
|
||||||
r.Header.Set(api.AmzTagging, tagsQuery.Encode())
|
r.Header.Set(api.AmzTagging, tagsQuery.Encode())
|
||||||
|
|
||||||
for key, val := range copyMeta.Headers {
|
tc.Handler().CopyObjectHandler(w, r)
|
||||||
r.Header.Set(key, val)
|
assertStatus(t, w, statusCode)
|
||||||
}
|
|
||||||
|
|
||||||
hc.Handler().CopyObjectHandler(w, r)
|
|
||||||
assertStatus(hc.t, w, statusCode)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func putObjectTagging(t *testing.T, tc *handlerContext, bktName, objName string, tags map[string]string) {
|
func putObjectTagging(t *testing.T, tc *handlerContext, bktName, objName string, tags map[string]string) {
|
||||||
body := &data.Tagging{
|
body := &Tagging{
|
||||||
TagSet: make([]data.Tag, 0, len(tags)),
|
TagSet: make([]Tag, 0, len(tags)),
|
||||||
}
|
}
|
||||||
|
|
||||||
for key, val := range tags {
|
for key, val := range tags {
|
||||||
body.TagSet = append(body.TagSet, data.Tag{
|
body.TagSet = append(body.TagSet, Tag{
|
||||||
Key: key,
|
Key: key,
|
||||||
Value: val,
|
Value: val,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
w, r := prepareTestRequest(tc, bktName, objName, body)
|
w, r := prepareTestRequest(tc, bktName, objName, body)
|
||||||
middleware.GetReqInfo(r.Context()).Tagging = body
|
|
||||||
tc.Handler().PutObjectTaggingHandler(w, r)
|
tc.Handler().PutObjectTaggingHandler(w, r)
|
||||||
assertStatus(t, w, http.StatusOK)
|
assertStatus(t, w, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getObjectTagging(t *testing.T, tc *handlerContext, bktName, objName, version string) *data.Tagging {
|
func getObjectTagging(t *testing.T, tc *handlerContext, bktName, objName, version string) *Tagging {
|
||||||
query := make(url.Values)
|
query := make(url.Values)
|
||||||
query.Add(api.QueryVersionID, version)
|
query.Add(api.QueryVersionID, version)
|
||||||
|
|
||||||
|
@ -316,7 +108,7 @@ func getObjectTagging(t *testing.T, tc *handlerContext, bktName, objName, versio
|
||||||
tc.Handler().GetObjectTaggingHandler(w, r)
|
tc.Handler().GetObjectTaggingHandler(w, r)
|
||||||
assertStatus(t, w, http.StatusOK)
|
assertStatus(t, w, http.StatusOK)
|
||||||
|
|
||||||
tagging := &data.Tagging{}
|
tagging := &Tagging{}
|
||||||
err := xml.NewDecoder(w.Result().Body).Decode(tagging)
|
err := xml.NewDecoder(w.Result().Body).Decode(tagging)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return tagging
|
return tagging
|
||||||
|
|
|
@ -8,8 +8,6 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -20,73 +18,60 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
reqInfo := api.GetReqInfo(r.Context())
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo)
|
cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not get cors", reqInfo, err)
|
h.logAndSendError(w, "could not get cors", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, cors); err != nil {
|
if err = api.EncodeToResponse(w, cors); err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not encode cors to response", reqInfo, err)
|
h.logAndSendError(w, "could not encode cors to response", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
reqInfo := api.GetReqInfo(r.Context())
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.PutCORSParams{
|
p := &layer.PutCORSParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Reader: r.Body,
|
Reader: r.Body,
|
||||||
NewDecoder: h.cfg.NewXMLDecoder,
|
CopiesNumber: h.cfg.CopiesNumber,
|
||||||
UserAgent: r.UserAgent(),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
if err = h.obj.PutBucketCORS(r.Context(), p); err != nil {
|
||||||
if err != nil {
|
h.logAndSendError(w, "could not put cors configuration", reqInfo, err)
|
||||||
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutBucketCORS(ctx, p); err != nil {
|
api.WriteSuccessResponseHeadersOnly(w)
|
||||||
h.logAndSendError(ctx, w, "could not put cors configuration", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "write response", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
reqInfo := api.GetReqInfo(r.Context())
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.DeleteBucketCORS(ctx, bktInfo); err != nil {
|
if err = h.obj.DeleteBucketCORS(r.Context(), bktInfo); err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not delete cors", reqInfo, err)
|
h.logAndSendError(w, "could not delete cors", reqInfo, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
@ -100,21 +85,19 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
|
||||||
if origin == "" {
|
if origin == "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
reqInfo := api.GetReqInfo(r.Context())
|
||||||
ctx := r.Context()
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
if reqInfo.BucketName == "" {
|
if reqInfo.BucketName == "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
bktInfo, err := h.getBucketInfo(ctx, reqInfo.BucketName)
|
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.reqLogger(ctx).Warn(logs.GetBucketInfo, zap.Error(err))
|
h.log.Warn("get bucket info", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo)
|
cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.reqLogger(ctx).Warn(logs.GetBucketCors, zap.Error(err))
|
h.log.Warn("get bucket cors", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,22 +136,21 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
reqInfo := api.GetReqInfo(r.Context())
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
|
||||||
bktInfo, err := h.getBucketInfo(ctx, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
origin := r.Header.Get(api.Origin)
|
origin := r.Header.Get(api.Origin)
|
||||||
if origin == "" {
|
if origin == "" {
|
||||||
h.logAndSendError(ctx, w, "origin request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
h.logAndSendError(w, "origin request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
||||||
}
|
}
|
||||||
|
|
||||||
method := r.Header.Get(api.AccessControlRequestMethod)
|
method := r.Header.Get(api.AccessControlRequestMethod)
|
||||||
if method == "" {
|
if method == "" {
|
||||||
h.logAndSendError(ctx, w, "Access-Control-Request-Method request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
h.logAndSendError(w, "Access-Control-Request-Method request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,9 +160,9 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
||||||
headers = strings.Split(requestHeaders, ", ")
|
headers = strings.Split(requestHeaders, ", ")
|
||||||
}
|
}
|
||||||
|
|
||||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo)
|
cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not get cors", reqInfo, err)
|
h.logAndSendError(w, "could not get cors", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,8 +174,8 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
||||||
if !checkSubslice(rule.AllowedHeaders, headers) {
|
if !checkSubslice(rule.AllowedHeaders, headers) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
w.Header().Set(api.AccessControlAllowOrigin, origin)
|
w.Header().Set(api.AccessControlAllowOrigin, o)
|
||||||
w.Header().Set(api.AccessControlAllowMethods, method)
|
w.Header().Set(api.AccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
|
||||||
if headers != nil {
|
if headers != nil {
|
||||||
w.Header().Set(api.AccessControlAllowHeaders, requestHeaders)
|
w.Header().Set(api.AccessControlAllowHeaders, requestHeaders)
|
||||||
}
|
}
|
||||||
|
@ -203,22 +185,19 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
||||||
if rule.MaxAgeSeconds > 0 || rule.MaxAgeSeconds == -1 {
|
if rule.MaxAgeSeconds > 0 || rule.MaxAgeSeconds == -1 {
|
||||||
w.Header().Set(api.AccessControlMaxAge, strconv.Itoa(rule.MaxAgeSeconds))
|
w.Header().Set(api.AccessControlMaxAge, strconv.Itoa(rule.MaxAgeSeconds))
|
||||||
} else {
|
} else {
|
||||||
w.Header().Set(api.AccessControlMaxAge, strconv.Itoa(h.cfg.DefaultMaxAge()))
|
w.Header().Set(api.AccessControlMaxAge, strconv.Itoa(h.cfg.DefaultMaxAge))
|
||||||
}
|
}
|
||||||
if o != wildcard {
|
if o != wildcard {
|
||||||
w.Header().Set(api.AccessControlAllowCredentials, "true")
|
w.Header().Set(api.AccessControlAllowCredentials, "true")
|
||||||
}
|
}
|
||||||
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
api.WriteSuccessResponseHeadersOnly(w)
|
||||||
h.logAndSendError(ctx, w, "write response", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
h.logAndSendError(ctx, w, "Forbidden", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
h.logAndSendError(w, "Forbidden", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkSubslice(slice []string, subSlice []string) bool {
|
func checkSubslice(slice []string, subSlice []string) bool {
|
||||||
|
|
|
@ -1,13 +1,12 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCORSOriginWildcard(t *testing.T) {
|
func TestCORSOriginWildcard(t *testing.T) {
|
||||||
|
@ -24,14 +23,14 @@ func TestCORSOriginWildcard(t *testing.T) {
|
||||||
bktName := "bucket-for-cors"
|
bktName := "bucket-for-cors"
|
||||||
box, _ := createAccessBox(t)
|
box, _ := createAccessBox(t)
|
||||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||||
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
ctx := context.WithValue(r.Context(), api.BoxData, box)
|
||||||
r = r.WithContext(ctx)
|
r = r.WithContext(ctx)
|
||||||
r.Header.Add(api.AmzACL, "public-read")
|
r.Header.Add(api.AmzACL, "public-read")
|
||||||
hc.Handler().CreateBucketHandler(w, r)
|
hc.Handler().CreateBucketHandler(w, r)
|
||||||
assertStatus(t, w, http.StatusOK)
|
assertStatus(t, w, http.StatusOK)
|
||||||
|
|
||||||
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(body))
|
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(body))
|
||||||
ctx = middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
ctx = context.WithValue(r.Context(), api.BoxData, box)
|
||||||
r = r.WithContext(ctx)
|
r = r.WithContext(ctx)
|
||||||
hc.Handler().PutBucketCorsHandler(w, r)
|
hc.Handler().PutBucketCorsHandler(w, r)
|
||||||
assertStatus(t, w, http.StatusOK)
|
assertStatus(t, w, http.StatusOK)
|
||||||
|
@ -40,181 +39,3 @@ func TestCORSOriginWildcard(t *testing.T) {
|
||||||
hc.Handler().GetBucketCorsHandler(w, r)
|
hc.Handler().GetBucketCorsHandler(w, r)
|
||||||
assertStatus(t, w, http.StatusOK)
|
assertStatus(t, w, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPreflight(t *testing.T) {
|
|
||||||
body := `
|
|
||||||
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
|
||||||
<CORSRule>
|
|
||||||
<AllowedMethod>GET</AllowedMethod>
|
|
||||||
<AllowedOrigin>http://www.example.com</AllowedOrigin>
|
|
||||||
<AllowedHeader>Authorization</AllowedHeader>
|
|
||||||
<ExposeHeader>x-amz-*</ExposeHeader>
|
|
||||||
<ExposeHeader>X-Amz-*</ExposeHeader>
|
|
||||||
<MaxAgeSeconds>600</MaxAgeSeconds>
|
|
||||||
</CORSRule>
|
|
||||||
</CORSConfiguration>
|
|
||||||
`
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName := "bucket-preflight-test"
|
|
||||||
box, _ := createAccessBox(t)
|
|
||||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
|
||||||
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
hc.Handler().CreateBucketHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
|
|
||||||
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(body))
|
|
||||||
ctx = middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
hc.Handler().PutBucketCorsHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
origin string
|
|
||||||
method string
|
|
||||||
headers string
|
|
||||||
expectedStatus int
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "Valid",
|
|
||||||
origin: "http://www.example.com",
|
|
||||||
method: "GET",
|
|
||||||
headers: "Authorization",
|
|
||||||
expectedStatus: http.StatusOK,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Empty origin",
|
|
||||||
method: "GET",
|
|
||||||
headers: "Authorization",
|
|
||||||
expectedStatus: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Empty request method",
|
|
||||||
origin: "http://www.example.com",
|
|
||||||
headers: "Authorization",
|
|
||||||
expectedStatus: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Not allowed method",
|
|
||||||
origin: "http://www.example.com",
|
|
||||||
method: "PUT",
|
|
||||||
headers: "Authorization",
|
|
||||||
expectedStatus: http.StatusForbidden,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Not allowed headers",
|
|
||||||
origin: "http://www.example.com",
|
|
||||||
method: "GET",
|
|
||||||
headers: "Authorization, Last-Modified",
|
|
||||||
expectedStatus: http.StatusForbidden,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
w, r = prepareTestPayloadRequest(hc, bktName, "", nil)
|
|
||||||
r.Header.Set(api.Origin, tc.origin)
|
|
||||||
r.Header.Set(api.AccessControlRequestMethod, tc.method)
|
|
||||||
r.Header.Set(api.AccessControlRequestHeaders, tc.headers)
|
|
||||||
hc.Handler().Preflight(w, r)
|
|
||||||
assertStatus(t, w, tc.expectedStatus)
|
|
||||||
|
|
||||||
if tc.expectedStatus == http.StatusOK {
|
|
||||||
require.Equal(t, tc.origin, w.Header().Get(api.AccessControlAllowOrigin))
|
|
||||||
require.Equal(t, tc.method, w.Header().Get(api.AccessControlAllowMethods))
|
|
||||||
require.Equal(t, tc.headers, w.Header().Get(api.AccessControlAllowHeaders))
|
|
||||||
require.Equal(t, "x-amz-*, X-Amz-*", w.Header().Get(api.AccessControlExposeHeaders))
|
|
||||||
require.Equal(t, "true", w.Header().Get(api.AccessControlAllowCredentials))
|
|
||||||
require.Equal(t, "600", w.Header().Get(api.AccessControlMaxAge))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPreflightWildcardOrigin(t *testing.T) {
|
|
||||||
body := `
|
|
||||||
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
|
||||||
<CORSRule>
|
|
||||||
<AllowedMethod>GET</AllowedMethod>
|
|
||||||
<AllowedMethod>PUT</AllowedMethod>
|
|
||||||
<AllowedOrigin>*</AllowedOrigin>
|
|
||||||
<AllowedHeader>*</AllowedHeader>
|
|
||||||
</CORSRule>
|
|
||||||
</CORSConfiguration>
|
|
||||||
`
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName := "bucket-preflight-wildcard-test"
|
|
||||||
box, _ := createAccessBox(t)
|
|
||||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
|
||||||
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
hc.Handler().CreateBucketHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
|
|
||||||
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(body))
|
|
||||||
ctx = middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
|
||||||
r = r.WithContext(ctx)
|
|
||||||
hc.Handler().PutBucketCorsHandler(w, r)
|
|
||||||
assertStatus(t, w, http.StatusOK)
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
origin string
|
|
||||||
method string
|
|
||||||
headers string
|
|
||||||
expectedStatus int
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "Valid get",
|
|
||||||
origin: "http://www.example.com",
|
|
||||||
method: "GET",
|
|
||||||
headers: "Authorization, Last-Modified",
|
|
||||||
expectedStatus: http.StatusOK,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Valid put",
|
|
||||||
origin: "http://example.com",
|
|
||||||
method: "PUT",
|
|
||||||
headers: "Authorization, Content-Type",
|
|
||||||
expectedStatus: http.StatusOK,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Empty origin",
|
|
||||||
method: "GET",
|
|
||||||
headers: "Authorization, Last-Modified",
|
|
||||||
expectedStatus: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Empty request method",
|
|
||||||
origin: "http://www.example.com",
|
|
||||||
headers: "Authorization, Last-Modified",
|
|
||||||
expectedStatus: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Not allowed method",
|
|
||||||
origin: "http://www.example.com",
|
|
||||||
method: "DELETE",
|
|
||||||
headers: "Authorization, Last-Modified",
|
|
||||||
expectedStatus: http.StatusForbidden,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
w, r = prepareTestPayloadRequest(hc, bktName, "", nil)
|
|
||||||
r.Header.Set(api.Origin, tc.origin)
|
|
||||||
r.Header.Set(api.AccessControlRequestMethod, tc.method)
|
|
||||||
r.Header.Set(api.AccessControlRequestHeaders, tc.headers)
|
|
||||||
hc.Handler().Preflight(w, r)
|
|
||||||
assertStatus(t, w, tc.expectedStatus)
|
|
||||||
|
|
||||||
if tc.expectedStatus == http.StatusOK {
|
|
||||||
require.Equal(t, tc.origin, w.Header().Get(api.AccessControlAllowOrigin))
|
|
||||||
require.Equal(t, tc.method, w.Header().Get(api.AccessControlAllowMethods))
|
|
||||||
require.Equal(t, tc.headers, w.Header().Get(api.AccessControlAllowHeaders))
|
|
||||||
require.Empty(t, w.Header().Get(api.AccessControlExposeHeaders))
|
|
||||||
require.Empty(t, w.Header().Get(api.AccessControlAllowCredentials))
|
|
||||||
require.Equal(t, "0", w.Header().Get(api.AccessControlMaxAge))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -2,18 +2,19 @@ package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
"go.uber.org/zap"
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
)
|
)
|
||||||
|
|
||||||
// limitation of AWS https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
|
// limitation of AWS https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
|
||||||
|
@ -21,9 +22,8 @@ const maxObjectsToDelete = 1000
|
||||||
|
|
||||||
// DeleteObjectsRequest -- xml carrying the object key names which should be deleted.
|
// DeleteObjectsRequest -- xml carrying the object key names which should be deleted.
|
||||||
type DeleteObjectsRequest struct {
|
type DeleteObjectsRequest struct {
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delete" json:"-"`
|
|
||||||
// Element to enable quiet mode for the request
|
// Element to enable quiet mode for the request
|
||||||
Quiet bool `xml:"Quiet,omitempty"`
|
Quiet bool
|
||||||
// List of objects to be deleted
|
// List of objects to be deleted
|
||||||
Objects []ObjectIdentifier `xml:"Object"`
|
Objects []ObjectIdentifier `xml:"Object"`
|
||||||
}
|
}
|
||||||
|
@ -43,10 +43,10 @@ type DeletedObject struct {
|
||||||
|
|
||||||
// DeleteError structure.
|
// DeleteError structure.
|
||||||
type DeleteError struct {
|
type DeleteError struct {
|
||||||
Code string `xml:"Code,omitempty"`
|
Code string
|
||||||
Message string `xml:"Message,omitempty"`
|
Message string
|
||||||
Key string `xml:"Key,omitempty"`
|
Key string
|
||||||
VersionID string `xml:"VersionId,omitempty"`
|
VersionID string `xml:"versionId,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObjectsResponse container for multiple object deletes.
|
// DeleteObjectsResponse container for multiple object deletes.
|
||||||
|
@ -61,8 +61,7 @@ type DeleteObjectsResponse struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
reqInfo := api.GetReqInfo(r.Context())
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
versionID := reqInfo.URL.Query().Get(api.QueryVersionID)
|
versionID := reqInfo.URL.Query().Get(api.QueryVersionID)
|
||||||
versionedObject := []*layer.VersionedObject{{
|
versionedObject := []*layer.VersionedObject{{
|
||||||
Name: reqInfo.ObjectName,
|
Name: reqInfo.ObjectName,
|
||||||
|
@ -71,39 +70,67 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get network info", reqInfo, err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.DeleteObjectParams{
|
p := &layer.DeleteObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Objects: versionedObject,
|
Objects: versionedObject,
|
||||||
Settings: bktSettings,
|
Settings: bktSettings,
|
||||||
NetworkInfo: networkInfo,
|
|
||||||
}
|
}
|
||||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
deletedObjects := h.obj.DeleteObjects(r.Context(), p)
|
||||||
deletedObject := deletedObjects[0]
|
deletedObject := deletedObjects[0]
|
||||||
if deletedObject.Error != nil {
|
if deletedObject.Error != nil {
|
||||||
if isErrObjectLocked(deletedObject.Error) {
|
if isErrObjectLocked(deletedObject.Error) {
|
||||||
h.logAndSendError(ctx, w, "object is locked", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
h.logAndSendError(w, "object is locked", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
||||||
} else {
|
} else {
|
||||||
h.logAndSendError(ctx, w, "could not delete object", reqInfo, deletedObject.Error)
|
h.logAndSendError(w, "could not delete object", reqInfo, deletedObject.Error)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var m *SendNotificationParams
|
||||||
|
|
||||||
|
if bktSettings.VersioningEnabled() && len(versionID) == 0 {
|
||||||
|
m = &SendNotificationParams{
|
||||||
|
Event: EventObjectRemovedDeleteMarkerCreated,
|
||||||
|
NotificationInfo: &data.NotificationInfo{
|
||||||
|
Name: reqInfo.ObjectName,
|
||||||
|
HashSum: deletedObject.DeleteMarkerEtag,
|
||||||
|
},
|
||||||
|
BktInfo: bktInfo,
|
||||||
|
ReqInfo: reqInfo,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var objID oid.ID
|
||||||
|
if len(versionID) != 0 {
|
||||||
|
if err = objID.DecodeString(versionID); err != nil {
|
||||||
|
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
m = &SendNotificationParams{
|
||||||
|
Event: EventObjectRemovedDelete,
|
||||||
|
NotificationInfo: &data.NotificationInfo{
|
||||||
|
Name: reqInfo.ObjectName,
|
||||||
|
Version: objID.EncodeToString(),
|
||||||
|
},
|
||||||
|
BktInfo: bktInfo,
|
||||||
|
ReqInfo: reqInfo,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = h.sendNotifications(r.Context(), m); err != nil {
|
||||||
|
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
if deletedObject.VersionID != "" {
|
if deletedObject.VersionID != "" {
|
||||||
w.Header().Set(api.AmzVersionID, deletedObject.VersionID)
|
w.Header().Set(api.AmzVersionID, deletedObject.VersionID)
|
||||||
}
|
}
|
||||||
|
@ -121,54 +148,51 @@ func isErrObjectLocked(err error) bool {
|
||||||
switch err.(type) {
|
switch err.(type) {
|
||||||
default:
|
default:
|
||||||
return strings.Contains(err.Error(), "object is locked")
|
return strings.Contains(err.Error(), "object is locked")
|
||||||
case *apistatus.ObjectLocked:
|
case apistatus.ObjectLocked,
|
||||||
|
*apistatus.ObjectLocked:
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteMultipleObjectsHandler handles multiple delete requests.
|
// DeleteMultipleObjectsHandler handles multiple delete requests.
|
||||||
func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
reqInfo := api.GetReqInfo(r.Context())
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
// Content-Md5 is required and should be set
|
// Content-Md5 is required and should be set
|
||||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||||
if _, ok := r.Header[api.ContentMD5]; !ok {
|
if _, ok := r.Header[api.ContentMD5]; !ok {
|
||||||
h.logAndSendError(ctx, w, "missing Content-MD5", reqInfo, errors.GetAPIError(errors.ErrMissingContentMD5))
|
h.logAndSendError(w, "missing Content-MD5", reqInfo, errors.GetAPIError(errors.ErrMissingContentMD5))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Content-Length is required and should be non-zero
|
// Content-Length is required and should be non-zero
|
||||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||||
if r.ContentLength <= 0 {
|
if r.ContentLength <= 0 {
|
||||||
h.logAndSendError(ctx, w, "missing Content-Length", reqInfo, errors.GetAPIError(errors.ErrMissingContentLength))
|
h.logAndSendError(w, "missing Content-Length", reqInfo, errors.GetAPIError(errors.ErrMissingContentLength))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal list of keys to be deleted.
|
// Unmarshal list of keys to be deleted.
|
||||||
requested := &DeleteObjectsRequest{}
|
requested := &DeleteObjectsRequest{}
|
||||||
if err := h.cfg.NewXMLDecoder(r.Body, r.UserAgent()).Decode(requested); err != nil {
|
if err := xml.NewDecoder(r.Body).Decode(requested); err != nil {
|
||||||
h.logAndSendError(ctx, w, "couldn't decode body", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error()))
|
h.logAndSendError(w, "couldn't decode body", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(requested.Objects) == 0 || len(requested.Objects) > maxObjectsToDelete {
|
if len(requested.Objects) == 0 || len(requested.Objects) > maxObjectsToDelete {
|
||||||
h.logAndSendError(ctx, w, "number of objects to delete must be greater than 0 and less or equal to 1000", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
|
h.logAndSendError(w, "number of objects to delete must be greater than 0 and less or equal to 1000", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
unique := make(map[string]struct{})
|
removed := make(map[string]*layer.VersionedObject)
|
||||||
toRemove := make([]*layer.VersionedObject, 0, len(requested.Objects))
|
toRemove := make([]*layer.VersionedObject, 0, len(requested.Objects))
|
||||||
for _, obj := range requested.Objects {
|
for _, obj := range requested.Objects {
|
||||||
versionedObj := &layer.VersionedObject{
|
versionedObj := &layer.VersionedObject{
|
||||||
Name: obj.ObjectName,
|
Name: obj.ObjectName,
|
||||||
VersionID: obj.VersionID,
|
VersionID: obj.VersionID,
|
||||||
}
|
}
|
||||||
key := versionedObj.String()
|
toRemove = append(toRemove, versionedObj)
|
||||||
if _, ok := unique[key]; !ok {
|
removed[versionedObj.String()] = versionedObj
|
||||||
toRemove = append(toRemove, versionedObj)
|
|
||||||
unique[key] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
response := &DeleteObjectsResponse{
|
response := &DeleteObjectsResponse{
|
||||||
|
@ -178,31 +202,31 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
marshaler := zapcore.ArrayMarshalerFunc(func(encoder zapcore.ArrayEncoder) error {
|
||||||
if err != nil {
|
for _, obj := range toRemove {
|
||||||
h.logAndSendError(ctx, w, "could not get network info", reqInfo, err)
|
encoder.AppendString(obj.String())
|
||||||
return
|
}
|
||||||
}
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
p := &layer.DeleteObjectParams{
|
p := &layer.DeleteObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Objects: toRemove,
|
Objects: toRemove,
|
||||||
Settings: bktSettings,
|
Settings: bktSettings,
|
||||||
NetworkInfo: networkInfo,
|
|
||||||
IsMultiple: true,
|
|
||||||
}
|
}
|
||||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
deletedObjects := h.obj.DeleteObjects(r.Context(), p)
|
||||||
|
|
||||||
|
var errs []error
|
||||||
for _, obj := range deletedObjects {
|
for _, obj := range deletedObjects {
|
||||||
if obj.Error != nil {
|
if obj.Error != nil {
|
||||||
code := "BadRequest"
|
code := "BadRequest"
|
||||||
|
@ -215,6 +239,7 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
||||||
Key: obj.Name,
|
Key: obj.Name,
|
||||||
VersionID: obj.VersionID,
|
VersionID: obj.VersionID,
|
||||||
})
|
})
|
||||||
|
errs = append(errs, obj.Error)
|
||||||
} else if !requested.Quiet {
|
} else if !requested.Quiet {
|
||||||
deletedObj := DeletedObject{
|
deletedObj := DeletedObject{
|
||||||
ObjectIdentifier: ObjectIdentifier{
|
ObjectIdentifier: ObjectIdentifier{
|
||||||
|
@ -229,61 +254,40 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
||||||
response.DeletedObjects = append(response.DeletedObjects, deletedObj)
|
response.DeletedObjects = append(response.DeletedObjects, deletedObj)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(errs) != 0 {
|
||||||
|
fields := []zap.Field{
|
||||||
|
zap.Array("objects", marshaler),
|
||||||
|
zap.Errors("errors", errs),
|
||||||
|
}
|
||||||
|
h.log.Error("couldn't delete objects", fields...)
|
||||||
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
if err = api.EncodeToResponse(w, response); err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not write response", reqInfo, err)
|
h.logAndSendError(w, "could not write response", reqInfo, err, zap.Array("objects", marshaler))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
reqInfo := api.GetReqInfo(r.Context())
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = checkOwner(bktInfo, reqInfo.User); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "request owner id does not match bucket owner id", reqInfo, err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var sessionToken *session.Container
|
var sessionToken *session.Container
|
||||||
|
|
||||||
boxData, err := middleware.GetBoxData(ctx)
|
boxData, err := layer.GetBoxData(r.Context())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
sessionToken = boxData.Gate.SessionTokenForDelete()
|
sessionToken = boxData.Gate.SessionTokenForDelete()
|
||||||
}
|
}
|
||||||
|
|
||||||
skipObjCheck := false
|
if err = h.obj.DeleteBucket(r.Context(), &layer.DeleteBucketParams{
|
||||||
if value, ok := r.Header[api.AmzForceBucketDelete]; ok {
|
|
||||||
s := value[0]
|
|
||||||
if s == "true" {
|
|
||||||
skipObjCheck = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = h.obj.DeleteBucket(ctx, &layer.DeleteBucketParams{
|
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
SessionToken: sessionToken,
|
SessionToken: sessionToken,
|
||||||
SkipCheck: skipObjCheck,
|
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
h.logAndSendError(ctx, w, "couldn't delete bucket", reqInfo, err)
|
h.logAndSendError(w, "couldn't delete bucket", reqInfo, err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
chainIDs := []chain.ID{
|
|
||||||
getBucketChainID(chain.S3, bktInfo),
|
|
||||||
getBucketChainID(chain.Ingress, bktInfo),
|
|
||||||
getBucketCannedChainID(chain.S3, bktInfo.CID),
|
|
||||||
getBucketCannedChainID(chain.Ingress, bktInfo.CID),
|
|
||||||
}
|
|
||||||
if err = h.ape.DeleteBucketPolicy(reqInfo.Namespace, bktInfo.CID, chainIDs); err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "failed to delete policy from storage", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
w.WriteHeader(http.StatusNoContent)
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue