forked from TrueCloudLab/frostfs-s3-gw
Compare commits
158 commits
Author | SHA1 | Date | |
---|---|---|---|
16eb289929 | |||
0ae7c35352 | |||
95d847d611 | |||
e0ce59fd32 | |||
09412d8f20 | |||
f2274b2786 | |||
f391966326 | |||
d986e74897 | |||
df1af2d2c9 | |||
04b8fc2b5f | |||
59b789f57e | |||
128939c01e | |||
4a4ce00994 | |||
980763c468 | |||
9395b5f39d | |||
11c1a86404 | |||
4515a7ae88 | |||
c5deb2e148 | |||
ea714c2e9e | |||
7bf31bea18 | |||
cc43975536 | |||
c4c757eea6 | |||
389e0de403 | |||
8da71c3ae0 | |||
cc9a68401f | |||
8f7ccb0f62 | |||
2c002b657e | |||
f215d200e8 | |||
51322cccdf | |||
3cd88d6204 | |||
e71ba5e22a | |||
e3141fc8e3 | |||
a12fea8a5b | |||
9875307c9b | |||
b1775f9478 | |||
4fa45bdac2 | |||
368c7d2acd | |||
31076796ce | |||
eff0de43d5 | |||
fb00dff83b | |||
d8f126b339 | |||
7ab902d8d2 | |||
0792fcf456 | |||
c46ffa8146 | |||
3260308cc0 | |||
d6e6a13576 | |||
17d40245de | |||
979d85b046 | |||
539dab8680 | |||
76008d4ba1 | |||
8bc19725ba | |||
9e64304499 | |||
94504e9746 | |||
a8458dbc27 | |||
424038de6c | |||
3cf27d281d | |||
3c7cb82553 | |||
57b7e83380 | |||
6a90f4e624 | |||
cb3753f286 | |||
81209e308c | |||
b78e55e101 | |||
25c24f5ce6 | |||
09c11262c6 | |||
f120715a37 | |||
aaed083d82 | |||
e35b582fe2 | |||
39fc7aa3ee | |||
da41f47826 | |||
9e5fb4be95 | |||
346243b159 | |||
03481274f0 | |||
c2adbd758a | |||
bc17ab5e47 | |||
9fadfbbc2f | |||
827ea1a41e | |||
968f10a72f | |||
582e6ac642 | |||
99f273f9af | |||
cd96adef36 | |||
738ce14f50 | |||
5358e39f71 | |||
34c1426b9f | |||
8ca73e2079 | |||
a87c636b4c | |||
26baf8a94e | |||
f187141ae5 | |||
3cffc782e9 | |||
d0e4d55772 | |||
42e72889a5 | |||
98815d5473 | |||
62615d7ab7 | |||
575ab4d294 | |||
d919e6cce2 | |||
056f168d77 | |||
9bdfe2a016 | |||
d6b506f6d9 | |||
a2e0b92575 | |||
b08f476ea7 | |||
f4275d837a | |||
664f83b2b7 | |||
136b5521fe | |||
a5f670d904 | |||
d76c4fe2a2 | |||
0637133c61 | |||
bf00fa6aa9 | |||
ff690ce996 | |||
534ae7f0f1 | |||
77673797f9 | |||
9e1766ff74 | |||
e73f11c251 | |||
5cb77018f8 | |||
fa68a4ce40 | |||
0644067496 | |||
481520705a | |||
28723f4a68 | |||
20719bd85c | |||
4f27e34974 | |||
3dc989d7fe | |||
69e77aecc9 | |||
c34680d157 | |||
f5326b9f04 | |||
51c5c227c2 | |||
c506620199 | |||
6cb0026007 | |||
971006a28c | |||
527e0dc612 | |||
3213dd7236 | |||
a031777a1b | |||
b2a5da8247 | |||
ec349e4523 | |||
977a20760b | |||
2948d1f942 | |||
c0011ebb8d | |||
456319d2f1 | |||
1d965b23ab | |||
3b83de31d2 | |||
70eedfc077 | |||
f86b82351a | |||
465eaa816a | |||
9241954496 | |||
77f8bdac58 | |||
91541a432d | |||
943b30d9f4 | |||
414f3943e2 | |||
9432782ce6 | |||
2b04fcb5ec | |||
280d11c794 | |||
ed34b2cae4 | |||
76f553d292 | |||
1513a9252b | |||
bb81afc14a | |||
58850f590e | |||
e25dc90c20 | |||
71bae5cd9a | |||
b5fae316cf | |||
9f3ea470e6 | |||
9787b29542 |
236 changed files with 20426 additions and 13425 deletions
|
@ -1,13 +1,14 @@
|
||||||
FROM golang:1.21 as builder
|
FROM golang:1.22 AS builder
|
||||||
|
|
||||||
ARG BUILD=now
|
ARG BUILD=now
|
||||||
ARG REPO=git.frostfs.info/TrueCloudLab/frostfs-s3-gw
|
ARG REPO=git.frostfs.info/TrueCloudLab/frostfs-s3-gw
|
||||||
ARG VERSION=dev
|
ARG VERSION=dev
|
||||||
|
ARG GOFLAGS=""
|
||||||
|
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
COPY . /src
|
COPY . /src
|
||||||
|
|
||||||
RUN make
|
RUN make GOFLAGS=${GOFLAGS}
|
||||||
|
|
||||||
# Executable image
|
# Executable image
|
||||||
FROM alpine AS frostfs-s3-gw
|
FROM alpine AS frostfs-s3-gw
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
.git
|
.git
|
||||||
.cache
|
.cache
|
||||||
.github
|
.forgejo
|
||||||
|
|
Before Width: | Height: | Size: 5.5 KiB After Width: | Height: | Size: 5.5 KiB |
|
@ -1,4 +1,8 @@
|
||||||
on: [pull_request]
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
builds:
|
builds:
|
||||||
|
@ -6,7 +10,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go_versions: [ '1.20', '1.21' ]
|
go_versions: [ '1.22', '1.23' ]
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
|
@ -12,7 +12,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.21'
|
go-version: '1.23'
|
||||||
|
|
||||||
- name: Run commit format checker
|
- name: Run commit format checker
|
||||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||||
|
|
|
@ -1,4 +1,8 @@
|
||||||
on: [pull_request]
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint:
|
lint:
|
||||||
|
@ -10,7 +14,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.21'
|
go-version: '1.23'
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
- name: Install linters
|
- name: Install linters
|
||||||
|
@ -24,7 +28,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go_versions: [ '1.20', '1.21' ]
|
go_versions: [ '1.22', '1.23' ]
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
|
@ -1,4 +1,8 @@
|
||||||
on: [pull_request]
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
vulncheck:
|
vulncheck:
|
||||||
|
@ -12,7 +16,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.21'
|
go-version: '1.23'
|
||||||
|
|
||||||
- name: Install govulncheck
|
- name: Install govulncheck
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
|
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
|
@ -1 +0,0 @@
|
||||||
* @alexvanin @dkirillov
|
|
|
@ -12,7 +12,8 @@ run:
|
||||||
# output configuration options
|
# output configuration options
|
||||||
output:
|
output:
|
||||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||||
format: tab
|
formats:
|
||||||
|
- format: tab
|
||||||
|
|
||||||
# all available settings of specific linters
|
# all available settings of specific linters
|
||||||
linters-settings:
|
linters-settings:
|
||||||
|
@ -65,3 +66,6 @@ issues:
|
||||||
- EXC0003 # test/Test ... consider calling this
|
- EXC0003 # test/Test ... consider calling this
|
||||||
- EXC0004 # govet
|
- EXC0004 # govet
|
||||||
- EXC0005 # C-style breaks
|
- EXC0005 # C-style breaks
|
||||||
|
exclude-dirs:
|
||||||
|
- api/auth/signer/v4asdk2
|
||||||
|
- api/auth/signer/v4sdk2
|
||||||
|
|
241
CHANGELOG.md
241
CHANGELOG.md
|
@ -4,15 +4,208 @@ This document outlines major changes between releases.
|
||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
## [0.32.0] - Khumbu - 2024-12-20
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Metric of dropped logs by log sampler (#502)
|
||||||
|
- SigV4A signature algorithm (#339)
|
||||||
|
- TLS Termination header for SSE-C (#562)
|
||||||
|
- Kludge profile support (#147)
|
||||||
|
- Netmap support in tree pool (#577)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Improved multipart removal speed (#559)
|
||||||
|
- Updated tree service pool without api-go dependency (#570)
|
||||||
|
|
||||||
|
## [0.31.3] - 2024-12-17
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Return BucketAlreadyExists when global domain taken (#584)
|
||||||
|
- Fix list-buckets vhs routing (#583)
|
||||||
|
- Skip port when matching listen domains (#586)
|
||||||
|
|
||||||
|
## [0.31.2] - 2024-12-13
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Unable to remove EC object (#576)
|
||||||
|
|
||||||
|
## [0.31.1] - 2024-11-28
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Ignore precondition headers with invalid date format (#563)
|
||||||
|
- MD5 calculation of object-part with SSE-C (#543)
|
||||||
|
|
||||||
|
## [0.31.0] - Rongbuk - 2024-11-20
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Docker warnings during image build (#421)
|
||||||
|
- `PartNumberMarker` in ListMultipart response (#451)
|
||||||
|
- PostObject handling (#456)
|
||||||
|
- Tag logging errors (#452)
|
||||||
|
- Removing of duplicated parts in tree service during split brain (#448)
|
||||||
|
- Container resolving (#482)
|
||||||
|
- FrostFS to S3 error transformation (#488)
|
||||||
|
- Default bucket routing (#507)
|
||||||
|
- encoding-type in ListBucketObjectVersions (#404)
|
||||||
|
- SIGHUP support for `tracing.enabled` config parameter (#520)
|
||||||
|
- `trace_id` parameter in logs (#501)
|
||||||
|
- Listing marker processing (#539)
|
||||||
|
- Content-MD5 header check (#540)
|
||||||
|
- Precondition check (#538)
|
||||||
|
- Bucket name check during all S3 operations (#556)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Support for separate container for all CORS settings (#422)
|
||||||
|
- `X-Amz-Force-Delete-Bucket` header for forced bucket removal (#31)
|
||||||
|
- `Location` support in CompleteMultipart response (#451)
|
||||||
|
- Tree pool request duration metric (#447)
|
||||||
|
- Expiration lifecycle configuration support (#42, #412, #459, #460, #516, #536)
|
||||||
|
- Add support for virtual hosted style addressing (#446, #449, #493)
|
||||||
|
- Support `frostfs.graceful_close_on_switch_timeout` (#475)
|
||||||
|
- Vulnerability report document (#413)
|
||||||
|
- Support patch object method (#462, #473, #466, #479)
|
||||||
|
- Enhanced logging and request reproducer (#369)
|
||||||
|
- Root CA configuration for tracing (#484)
|
||||||
|
- Log sampling policy configuration (#461)
|
||||||
|
- `sign` command to `frostfs-s3-authmate` (#467)
|
||||||
|
- Support custom aws credentials (#509)
|
||||||
|
- Source IP binding configuration for FrostFS requests (#521)
|
||||||
|
- Tracing attributes (#549)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Split `FrostFS` interface into separate read methods (#427)
|
||||||
|
- golangci-lint v1.60 support (#474)
|
||||||
|
- Updated Go version to 1.22 (#470)
|
||||||
|
- Container removal after failed bucket creation (#434)
|
||||||
|
- Explicit check for `.` symbol in bucket name (#506)
|
||||||
|
- Transaction waiter in contract clients (#522)
|
||||||
|
- Avoid maintenance mode storage node during object operations (#524)
|
||||||
|
- Content-Type does not include in Presigned URL of s3-authmate (#505)
|
||||||
|
- Check owner ID before deleting bucket (#528)
|
||||||
|
- S3-Authmate now uses APE instead basic-ACL (#553)
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
- Reduce using mutex when update app settings (#329)
|
||||||
|
|
||||||
|
## [0.30.9] - 2024-12-13
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Unable to remove EC object (#576)
|
||||||
|
|
||||||
|
## [0.30.8] - 2024-10-18
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Error handling for correct connection switch in SDK Pool (#517)
|
||||||
|
|
||||||
|
## [0.30.7] - 2024-10-03
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Correct aws-chunk encoding size handling (#511)
|
||||||
|
|
||||||
|
|
||||||
|
## [0.30.6] - 2024-09-17
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Object size of objects upload with aws-chunked encoding (#450)
|
||||||
|
- Object size of objects upload with negative Content-Length (#486)
|
||||||
|
|
||||||
|
## [0.30.5] - 2024-09-16
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Panic catchers for fuzzing tests (#492)
|
||||||
|
|
||||||
|
## [0.30.4] - 2024-09-03
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Fuzzing tests (#480)
|
||||||
|
|
||||||
|
## [0.30.3] - 2024-08-27
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Empty listing when multipart upload contains more than 1000 parts (#471)
|
||||||
|
|
||||||
|
## [0.30.2] - 2024-08-20
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Error counting in pool component before connection switch (#468)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Log of endpoint address during tree pool errors (#468)
|
||||||
|
|
||||||
|
## [0.30.1] - 2024-07-25
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Redundant system node removal in tree service (#437)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Log details on SDK Pool health status change (#439)
|
||||||
|
|
||||||
|
## [0.30.0] - Kangshung -2024-07-19
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Fix HTTP/2 requests (#341)
|
||||||
|
- Fix Decoder.CharsetReader is nil (#379)
|
||||||
|
- Fix flaky ACL encode test (#340)
|
||||||
|
- Docs grammar (#432)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Add new `reconnect_interval` config param for server rebinding (#291)
|
||||||
|
- Support `GetBucketPolicyStatus` (#301)
|
||||||
|
- Support request IP filter with policy (#371, #377)
|
||||||
|
- Support tag checks in policies (#357, #365, #392, #403, #411)
|
||||||
|
- Support IAM-MFA checks (#367)
|
||||||
|
- More docs (#334, #353)
|
||||||
|
- Add `register-user` command to `authmate` (#414)
|
||||||
|
- `User` field in request log (#396)
|
||||||
|
- Erasure coding support in placement policy (#400)
|
||||||
|
- Improved test coverage (#402)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Update dependencies noted by govulncheck (#368)
|
||||||
|
- Improve test coverage (#380, #387)
|
||||||
|
- Support updated naming in native policy JSON (#385)
|
||||||
|
- Improve determining AccessBox latest version (#335)
|
||||||
|
- Don't set full_control policy for bucket owner (#407)
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
- Remove control api (#406)
|
||||||
|
- Remove notifications (#401)
|
||||||
|
- Remove `layer.Client` interface (#410)
|
||||||
|
- Remove extended ACL related code (#372)
|
||||||
|
|
||||||
|
## [0.29.3] - 2024-07-19
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Support tree split environment when multiple nodes
|
||||||
|
may be part of the same sub path (#430)
|
||||||
|
- Collision of multipart name and system data in the tree (#430)
|
||||||
|
- Workaround for removal of multiple null versions in unversioned bucket (#430)
|
||||||
|
|
||||||
|
## [0.29.2] - 2024-07-03
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Parsing of put-bucket-setting retry configuration (#398)
|
||||||
|
|
||||||
|
## [0.29.1] - 2024-06-20
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- OPTIONS request processing for object operations (#399)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Retries of put-bucket-setting operation during container creation (#398)
|
||||||
|
|
||||||
|
## [0.29.0] - Zemu - 2024-05-27
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
- Fix marshaling errors in `DeleteObjects` method (#222)
|
- Fix marshaling errors in `DeleteObjects` method (#222)
|
||||||
- Fix status code in GET/HEAD delete marker (#226)
|
- Fix status code in GET/HEAD delete marker (#226)
|
||||||
- Fix `NextVersionIDMarker` in `list-object-versions` (#248)
|
- Fix `NextVersionIDMarker` in `list-object-versions` (#248)
|
||||||
- Fix possibility of panic during SIGHUP (#288)
|
- Fix possibility of panic during SIGHUP (#288)
|
||||||
- Fix flaky `TestErrorTimeoutChecking` (`make test` sometimes failed) (#290)
|
- Fix flaky `TestErrorTimeoutChecking` (`make test` sometimes failed) (#290)
|
||||||
- Fix user owner ID in billing metrics (#321)
|
- Fix log-level change on SIGHUP (#313)
|
||||||
- Fix HTTP/2 requests (#341)
|
- Fix anonymous put request (#311)
|
||||||
- Fix Decoder.CharsetReader is nil (#379)
|
- Fix routine leak from nns resolver (#324)
|
||||||
|
- Fix svace errors (#325, #328)
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
- Add new `frostfs.buffer_max_size_for_put` config param and sync TZ hash for PUT operations (#197)
|
- Add new `frostfs.buffer_max_size_for_put` config param and sync TZ hash for PUT operations (#197)
|
||||||
|
@ -24,14 +217,10 @@ This document outlines major changes between releases.
|
||||||
- Support per namespace placement policies configuration (see `namespaces.config` config param) (#266)
|
- Support per namespace placement policies configuration (see `namespaces.config` config param) (#266)
|
||||||
- Support control api to manage policies. See `control` config section (#258)
|
- Support control api to manage policies. See `control` config section (#258)
|
||||||
- Add `namespace` label to billing metrics (#271)
|
- Add `namespace` label to billing metrics (#271)
|
||||||
- Support policy-engine (#257)
|
- Support policy-engine (#257, #259, #282, #283, #302, #307, #345, #351, #358, #360, #362, #383, #354)
|
||||||
- Support `policy` contract (#259)
|
|
||||||
- Support `proxy` contract (#287)
|
- Support `proxy` contract (#287)
|
||||||
- Authmate: support custom attributes (#292)
|
- Authmate: support custom attributes (#292)
|
||||||
- Add new `reconnect_interval` config param (#291)
|
|
||||||
- Support `GetBucketPolicyStatus` (#301)
|
|
||||||
- Add FrostfsID cache (#269)
|
- Add FrostfsID cache (#269)
|
||||||
- Add new `source_ip_header` config param (#371)
|
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
- Generalise config param `use_default_xmlns_for_complete_multipart` to `use_default_xmlns` so that use default xmlns for all requests (#221)
|
- Generalise config param `use_default_xmlns_for_complete_multipart` to `use_default_xmlns` so that use default xmlns for all requests (#221)
|
||||||
|
@ -40,9 +229,23 @@ This document outlines major changes between releases.
|
||||||
- Use tombstone when delete multipart upload (#275)
|
- Use tombstone when delete multipart upload (#275)
|
||||||
- Support new parameter `cache.accessbox.removing_check_interval` (#305)
|
- Support new parameter `cache.accessbox.removing_check_interval` (#305)
|
||||||
- Use APE rules instead of eACL in container creation (#306)
|
- Use APE rules instead of eACL in container creation (#306)
|
||||||
|
- Rework bucket policy with policy-engine (#261)
|
||||||
|
- Improved object listing speed (#165, #347)
|
||||||
|
- Logging improvement (#300, #318)
|
||||||
|
|
||||||
### Removed
|
### Removed
|
||||||
- Drop sending whitespace characters during complete multipart upload and related config param `kludge.complete_multipart_keepalive` (#227)
|
- Drop sending whitespace characters during complete multipart upload and related config param `kludge.complete_multipart_keepalive` (#227)
|
||||||
|
- Unused legacy minio related code (#299)
|
||||||
|
- Redundant output with journald logging (#298)
|
||||||
|
|
||||||
|
## [0.28.2] - 2024-05-27
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- `anon` user in billing metrics (#321)
|
||||||
|
- Parts are not removed when multipart object removed (#370)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Put request in duration metrics (#280)
|
||||||
|
|
||||||
## [0.28.1] - 2024-01-24
|
## [0.28.1] - 2024-01-24
|
||||||
|
|
||||||
|
@ -160,4 +363,24 @@ To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs
|
||||||
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/b2148cc3...v0.27.0
|
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/b2148cc3...v0.27.0
|
||||||
[0.28.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.27.0...v0.28.0
|
[0.28.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.27.0...v0.28.0
|
||||||
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.0...v0.28.1
|
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.0...v0.28.1
|
||||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.1...master
|
[0.28.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.1...v0.28.2
|
||||||
|
[0.29.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.2...v0.29.0
|
||||||
|
[0.29.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.0...v0.29.1
|
||||||
|
[0.29.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.1...v0.29.2
|
||||||
|
[0.29.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.2...v0.29.3
|
||||||
|
[0.30.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.3...v0.30.0
|
||||||
|
[0.30.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.0...v0.30.1
|
||||||
|
[0.30.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.1...v0.30.2
|
||||||
|
[0.30.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.2...v0.30.3
|
||||||
|
[0.30.4]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.3...v0.30.4
|
||||||
|
[0.30.5]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.4...v0.30.5
|
||||||
|
[0.30.6]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.5...v0.30.6
|
||||||
|
[0.30.7]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.6...v0.30.7
|
||||||
|
[0.30.8]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.7...v0.30.8
|
||||||
|
[0.30.9]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.8...v0.30.9
|
||||||
|
[0.31.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.9...v0.31.0
|
||||||
|
[0.31.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.0...v0.31.1
|
||||||
|
[0.31.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.1...v0.31.2
|
||||||
|
[0.31.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.2...v0.31.3
|
||||||
|
[0.32.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.3...v0.32.0
|
||||||
|
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.0...master
|
3
CODEOWNERS
Normal file
3
CODEOWNERS
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
.* @TrueCloudLab/storage-services-developers @TrueCloudLab/storage-services-committers
|
||||||
|
.forgejo/.* @potyarkin
|
||||||
|
Makefile @potyarkin
|
48
Makefile
48
Makefile
|
@ -3,9 +3,9 @@
|
||||||
# Common variables
|
# Common variables
|
||||||
REPO ?= $(shell go list -m)
|
REPO ?= $(shell go list -m)
|
||||||
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||||
GO_VERSION ?= 1.20
|
GO_VERSION ?= 1.22
|
||||||
LINT_VERSION ?= 1.56.1
|
LINT_VERSION ?= 1.60.1
|
||||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.5
|
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
|
||||||
BINDIR = bin
|
BINDIR = bin
|
||||||
|
|
||||||
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
||||||
|
@ -14,15 +14,23 @@ METRICS_DUMP_OUT ?= ./metrics-dump.json
|
||||||
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
|
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
|
||||||
BINS = $(addprefix $(BINDIR)/, $(CMDS))
|
BINS = $(addprefix $(BINDIR)/, $(CMDS))
|
||||||
|
|
||||||
|
GOFLAGS ?=
|
||||||
|
|
||||||
# Variables for docker
|
# Variables for docker
|
||||||
REPO_BASENAME = $(shell basename `go list -m`)
|
REPO_BASENAME = $(shell basename `go list -m`)
|
||||||
HUB_IMAGE ?= "truecloudlab/$(REPO_BASENAME)"
|
HUB_IMAGE ?= "git.frostfs.info/truecloudlab/$(REPO_BASENAME)"
|
||||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||||
|
|
||||||
OUTPUT_LINT_DIR ?= $(shell pwd)/bin
|
OUTPUT_LINT_DIR ?= $(shell pwd)/bin
|
||||||
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
|
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
|
||||||
TMP_DIR := .cache
|
TMP_DIR := .cache
|
||||||
|
|
||||||
|
# Variables for fuzzing
|
||||||
|
FUZZ_NGFUZZ_DIR ?= ""
|
||||||
|
FUZZ_TIMEOUT ?= 30
|
||||||
|
FUZZ_FUNCTIONS ?= "all"
|
||||||
|
FUZZ_AUX ?= ""
|
||||||
|
|
||||||
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean protoc
|
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean protoc
|
||||||
|
|
||||||
# .deb package versioning
|
# .deb package versioning
|
||||||
|
@ -38,6 +46,7 @@ all: $(BINS)
|
||||||
$(BINS): $(BINDIR) dep
|
$(BINS): $(BINDIR) dep
|
||||||
@echo "⇒ Build $@"
|
@echo "⇒ Build $@"
|
||||||
CGO_ENABLED=0 \
|
CGO_ENABLED=0 \
|
||||||
|
GOFLAGS=$(GOFLAGS) \
|
||||||
go build -v -trimpath \
|
go build -v -trimpath \
|
||||||
-ldflags "-X $(REPO)/internal/version.Version=$(VERSION)" \
|
-ldflags "-X $(REPO)/internal/version.Version=$(VERSION)" \
|
||||||
-o $@ ./cmd/$(subst frostfs-,,$(notdir $@))
|
-o $@ ./cmd/$(subst frostfs-,,$(notdir $@))
|
||||||
|
@ -64,7 +73,7 @@ docker/%:
|
||||||
-w /src \
|
-w /src \
|
||||||
-u `stat -c "%u:%g" .` \
|
-u `stat -c "%u:%g" .` \
|
||||||
--env HOME=/src \
|
--env HOME=/src \
|
||||||
golang:$(GO_VERSION) make $*,\
|
golang:$(GO_VERSION) make GOFLAGS=$(GOFLAGS) $*,\
|
||||||
@echo "supported docker targets: all $(BINS) lint")
|
@echo "supported docker targets: all $(BINS) lint")
|
||||||
|
|
||||||
# Run tests
|
# Run tests
|
||||||
|
@ -76,6 +85,34 @@ cover:
|
||||||
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
|
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
|
||||||
@go tool cover -html=coverage.txt -o coverage.html
|
@go tool cover -html=coverage.txt -o coverage.html
|
||||||
|
|
||||||
|
# Run fuzzing
|
||||||
|
CLANG := $(shell which clang-17 2>/dev/null)
|
||||||
|
.PHONY: check-clang all
|
||||||
|
check-clang:
|
||||||
|
ifeq ($(CLANG),)
|
||||||
|
@echo "clang-17 is not installed. Please install it before proceeding - https://apt.llvm.org/llvm.sh "
|
||||||
|
@exit 1
|
||||||
|
endif
|
||||||
|
|
||||||
|
.PHONY: check-ngfuzz all
|
||||||
|
check-ngfuzz:
|
||||||
|
@if [ -z "$(FUZZ_NGFUZZ_DIR)" ]; then \
|
||||||
|
echo "Please set a variable FUZZ_NGFUZZ_DIR to specify path to the ngfuzz"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
.PHONY: install-fuzzing-deps
|
||||||
|
install-fuzzing-deps: check-clang check-ngfuzz
|
||||||
|
|
||||||
|
.PHONY: fuzz
|
||||||
|
fuzz: install-fuzzing-deps
|
||||||
|
@START_PATH=$$(pwd); \
|
||||||
|
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
|
||||||
|
cd $(FUZZ_NGFUZZ_DIR) && \
|
||||||
|
./ngfuzz -clean && \
|
||||||
|
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
|
||||||
|
./ngfuzz -report
|
||||||
|
|
||||||
# Reformat code
|
# Reformat code
|
||||||
format:
|
format:
|
||||||
@echo "⇒ Processing gofmt check"
|
@echo "⇒ Processing gofmt check"
|
||||||
|
@ -87,6 +124,7 @@ image:
|
||||||
@docker build \
|
@docker build \
|
||||||
--build-arg REPO=$(REPO) \
|
--build-arg REPO=$(REPO) \
|
||||||
--build-arg VERSION=$(VERSION) \
|
--build-arg VERSION=$(VERSION) \
|
||||||
|
--build-arg GOFLAGS=$(GOFLAGS) \
|
||||||
--rm \
|
--rm \
|
||||||
-f .docker/Dockerfile \
|
-f .docker/Dockerfile \
|
||||||
-t $(HUB_IMAGE):$(HUB_TAG) .
|
-t $(HUB_IMAGE):$(HUB_TAG) .
|
||||||
|
|
20
README.md
20
README.md
|
@ -1,5 +1,5 @@
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
|
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo">
|
||||||
</p>
|
</p>
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
|
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
|
||||||
|
@ -93,6 +93,24 @@ HTTP/1.1 200 OK
|
||||||
|
|
||||||
Also, you can configure domains using `.env` variables or `yaml` file.
|
Also, you can configure domains using `.env` variables or `yaml` file.
|
||||||
|
|
||||||
|
## Fuzzing
|
||||||
|
To run fuzzing tests use the following command:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ make fuzz
|
||||||
|
```
|
||||||
|
|
||||||
|
This command will install dependencies for the fuzzing process and run existing fuzzing tests.
|
||||||
|
|
||||||
|
You can also use the following arguments:
|
||||||
|
|
||||||
|
```
|
||||||
|
FUZZ_TIMEOUT - time to run each fuzzing test (default 30)
|
||||||
|
FUZZ_FUNCTIONS - fuzzing tests that will be started (default "all")
|
||||||
|
FUZZ_AUX - additional parameters for the fuzzer (for example, "-debug")
|
||||||
|
FUZZ_NGFUZZ_DIR - path to ngfuzz tool
|
||||||
|
````
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
- [Configuration](./docs/configuration.md)
|
- [Configuration](./docs/configuration.md)
|
||||||
|
|
26
SECURITY.md
Normal file
26
SECURITY.md
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
# Security Policy
|
||||||
|
|
||||||
|
|
||||||
|
## How To Report a Vulnerability
|
||||||
|
|
||||||
|
If you think you have found a vulnerability in this repository, please report it to us through coordinated disclosure.
|
||||||
|
|
||||||
|
**Please do not report security vulnerabilities through public issues, discussions, or change requests.**
|
||||||
|
|
||||||
|
Instead, you can report it using one of the following ways:
|
||||||
|
|
||||||
|
* Contact the [TrueCloudLab Security Team](mailto:security@frostfs.info) via email
|
||||||
|
|
||||||
|
Please include as much of the information listed below as you can to help us better understand and resolve the issue:
|
||||||
|
|
||||||
|
* The type of issue (e.g., buffer overflow, or cross-site scripting)
|
||||||
|
* Affected version(s)
|
||||||
|
* Impact of the issue, including how an attacker might exploit the issue
|
||||||
|
* Step-by-step instructions to reproduce the issue
|
||||||
|
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||||
|
* Full paths of source file(s) related to the manifestation of the issue
|
||||||
|
* Any special configuration required to reproduce the issue
|
||||||
|
* Any log files that are related to this issue (if possible)
|
||||||
|
* Proof-of-concept or exploit code (if possible)
|
||||||
|
|
||||||
|
This information will help us triage your report more quickly.
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
v0.28.1
|
v0.32.0
|
||||||
|
|
|
@ -2,38 +2,55 @@ package auth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"crypto"
|
||||||
"crypto/hmac"
|
"crypto/hmac"
|
||||||
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
|
||||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/v4"
|
||||||
|
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||||
)
|
)
|
||||||
|
|
||||||
// authorizationFieldRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
|
var (
|
||||||
var authorizationFieldRegexp = regexp.MustCompile(`AWS4-HMAC-SHA256 Credential=(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request,\s*SignedHeaders=(?P<signed_header_fields>.+),\s*Signature=(?P<v4_signature>.+)`)
|
// AuthorizationFieldRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
|
||||||
|
AuthorizationFieldRegexp = regexp.MustCompile(`AWS4-HMAC-SHA256 Credential=(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request,\s*SignedHeaders=(?P<signed_header_fields>.+),\s*Signature=(?P<v4_signature>.+)`)
|
||||||
|
|
||||||
// postPolicyCredentialRegexp -- is regexp for credentials when uploading file using POST with policy.
|
// authorizationFieldV4aRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
|
||||||
var postPolicyCredentialRegexp = regexp.MustCompile(`(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request`)
|
authorizationFieldV4aRegexp = regexp.MustCompile(`AWS4-ECDSA-P256-SHA256 Credential=(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<service>[^/]+)/aws4_request,\s*SignedHeaders=(?P<signed_header_fields>.+),\s*Signature=(?P<v4_signature>.+)`)
|
||||||
|
|
||||||
|
// postPolicyCredentialRegexp -- is regexp for credentials when uploading file using POST with policy.
|
||||||
|
postPolicyCredentialRegexp = regexp.MustCompile(`(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request`)
|
||||||
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
Center struct {
|
Center struct {
|
||||||
reg *RegexpSubmatcher
|
reg *RegexpSubmatcher
|
||||||
|
regV4a *RegexpSubmatcher
|
||||||
postReg *RegexpSubmatcher
|
postReg *RegexpSubmatcher
|
||||||
cli tokens.Credentials
|
cli tokens.Credentials
|
||||||
allowedAccessKeyIDPrefixes []string // empty slice means all access key ids are allowed
|
allowedAccessKeyIDPrefixes []string // empty slice means all access key ids are allowed
|
||||||
|
settings CenterSettings
|
||||||
|
}
|
||||||
|
|
||||||
|
CenterSettings interface {
|
||||||
|
AccessBoxContainer() (cid.ID, bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:revive
|
//nolint:revive
|
||||||
|
@ -41,23 +58,26 @@ type (
|
||||||
AccessKeyID string
|
AccessKeyID string
|
||||||
Service string
|
Service string
|
||||||
Region string
|
Region string
|
||||||
SignatureV4 string
|
Signature string
|
||||||
SignedFields []string
|
SignedFields []string
|
||||||
Date string
|
Date string
|
||||||
IsPresigned bool
|
IsPresigned bool
|
||||||
Expiration time.Duration
|
Expiration time.Duration
|
||||||
|
Preamble string
|
||||||
|
PayloadHash string
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
accessKeyPartsNum = 2
|
authHeaderPartsNum = 6
|
||||||
authHeaderPartsNum = 6
|
authHeaderV4aPartsNum = 5
|
||||||
maxFormSizeMemory = 50 * 1048576 // 50 MB
|
maxFormSizeMemory = 50 * 1048576 // 50 MB
|
||||||
|
|
||||||
AmzAlgorithm = "X-Amz-Algorithm"
|
AmzAlgorithm = "X-Amz-Algorithm"
|
||||||
AmzCredential = "X-Amz-Credential"
|
AmzCredential = "X-Amz-Credential"
|
||||||
AmzSignature = "X-Amz-Signature"
|
AmzSignature = "X-Amz-Signature"
|
||||||
AmzSignedHeaders = "X-Amz-SignedHeaders"
|
AmzSignedHeaders = "X-Amz-SignedHeaders"
|
||||||
|
AmzRegionSet = "X-Amz-Region-Set"
|
||||||
AmzExpires = "X-Amz-Expires"
|
AmzExpires = "X-Amz-Expires"
|
||||||
AmzDate = "X-Amz-Date"
|
AmzDate = "X-Amz-Date"
|
||||||
AmzContentSHA256 = "X-Amz-Content-Sha256"
|
AmzContentSHA256 = "X-Amz-Content-Sha256"
|
||||||
|
@ -82,47 +102,59 @@ var ContentSHA256HeaderStandardValue = map[string]struct{}{
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates an instance of AuthCenter.
|
// New creates an instance of AuthCenter.
|
||||||
func New(creds tokens.Credentials, prefixes []string) *Center {
|
func New(creds tokens.Credentials, prefixes []string, settings CenterSettings) *Center {
|
||||||
return &Center{
|
return &Center{
|
||||||
cli: creds,
|
cli: creds,
|
||||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
||||||
|
regV4a: NewRegexpMatcher(authorizationFieldV4aRegexp),
|
||||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||||
allowedAccessKeyIDPrefixes: prefixes,
|
allowedAccessKeyIDPrefixes: prefixes,
|
||||||
|
settings: settings,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Center) parseAuthHeader(header string) (*AuthHeader, error) {
|
const (
|
||||||
submatches := c.reg.GetSubmatches(header)
|
signaturePreambleSigV4 = "AWS4-HMAC-SHA256"
|
||||||
if len(submatches) != authHeaderPartsNum {
|
signaturePreambleSigV4A = "AWS4-ECDSA-P256-SHA256"
|
||||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed), header)
|
)
|
||||||
}
|
|
||||||
|
|
||||||
accessKey := strings.Split(submatches["access_key_id"], "0")
|
func (c *Center) parseAuthHeader(authHeader string, headers http.Header) (*AuthHeader, error) {
|
||||||
if len(accessKey) != accessKeyPartsNum {
|
preamble, _, _ := strings.Cut(authHeader, " ")
|
||||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID), accessKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
signedFields := strings.Split(submatches["signed_header_fields"], ";")
|
var (
|
||||||
|
submatches map[string]string
|
||||||
|
region string
|
||||||
|
)
|
||||||
|
|
||||||
|
switch preamble {
|
||||||
|
case signaturePreambleSigV4:
|
||||||
|
submatches = c.reg.GetSubmatches(authHeader)
|
||||||
|
if len(submatches) != authHeaderPartsNum {
|
||||||
|
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), authHeader)
|
||||||
|
}
|
||||||
|
region = submatches["region"]
|
||||||
|
case signaturePreambleSigV4A:
|
||||||
|
submatches = c.regV4a.GetSubmatches(authHeader)
|
||||||
|
if len(submatches) != authHeaderV4aPartsNum {
|
||||||
|
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), authHeader)
|
||||||
|
}
|
||||||
|
region = headers.Get(AmzRegionSet)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), authHeader)
|
||||||
|
}
|
||||||
|
|
||||||
return &AuthHeader{
|
return &AuthHeader{
|
||||||
AccessKeyID: submatches["access_key_id"],
|
AccessKeyID: submatches["access_key_id"],
|
||||||
Service: submatches["service"],
|
Service: submatches["service"],
|
||||||
Region: submatches["region"],
|
Region: region,
|
||||||
SignatureV4: submatches["v4_signature"],
|
Signature: submatches["v4_signature"],
|
||||||
SignedFields: signedFields,
|
SignedFields: strings.Split(submatches["signed_header_fields"], ";"),
|
||||||
Date: submatches["date"],
|
Date: submatches["date"],
|
||||||
|
Preamble: preamble,
|
||||||
|
PayloadHash: headers.Get(AmzContentSHA256),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAddress(accessKeyID string) (oid.Address, error) {
|
|
||||||
var addr oid.Address
|
|
||||||
if err := addr.DecodeString(strings.ReplaceAll(accessKeyID, "0", "/")); err != nil {
|
|
||||||
return addr, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID), accessKeyID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return addr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsStandardContentSHA256(key string) bool {
|
func IsStandardContentSHA256(key string) bool {
|
||||||
_, ok := ContentSHA256HeaderStandardValue[key]
|
_, ok := ContentSHA256HeaderStandardValue[key]
|
||||||
return ok
|
return ok
|
||||||
|
@ -137,7 +169,7 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
||||||
)
|
)
|
||||||
|
|
||||||
queryValues := r.URL.Query()
|
queryValues := r.URL.Query()
|
||||||
if queryValues.Get(AmzAlgorithm) == "AWS4-HMAC-SHA256" {
|
if queryValues.Get(AmzAlgorithm) == signaturePreambleSigV4 {
|
||||||
creds := strings.Split(queryValues.Get(AmzCredential), "/")
|
creds := strings.Split(queryValues.Get(AmzCredential), "/")
|
||||||
if len(creds) != 5 || creds[4] != "aws4_request" {
|
if len(creds) != 5 || creds[4] != "aws4_request" {
|
||||||
return nil, fmt.Errorf("bad X-Amz-Credential")
|
return nil, fmt.Errorf("bad X-Amz-Credential")
|
||||||
|
@ -146,14 +178,37 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
||||||
AccessKeyID: creds[0],
|
AccessKeyID: creds[0],
|
||||||
Service: creds[3],
|
Service: creds[3],
|
||||||
Region: creds[2],
|
Region: creds[2],
|
||||||
SignatureV4: queryValues.Get(AmzSignature),
|
Signature: queryValues.Get(AmzSignature),
|
||||||
SignedFields: strings.Split(queryValues.Get(AmzSignedHeaders), ";"),
|
SignedFields: strings.Split(queryValues.Get(AmzSignedHeaders), ";"),
|
||||||
Date: creds[1],
|
Date: creds[1],
|
||||||
IsPresigned: true,
|
IsPresigned: true,
|
||||||
|
Preamble: signaturePreambleSigV4,
|
||||||
|
PayloadHash: r.Header.Get(AmzContentSHA256),
|
||||||
}
|
}
|
||||||
authHdr.Expiration, err = time.ParseDuration(queryValues.Get(AmzExpires) + "s")
|
authHdr.Expiration, err = time.ParseDuration(queryValues.Get(AmzExpires) + "s")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't parse X-Amz-Expires: %w", err)
|
return nil, fmt.Errorf("%w: couldn't parse X-Amz-Expires %v", apierr.GetAPIError(apierr.ErrMalformedExpires), err)
|
||||||
|
}
|
||||||
|
signatureDateTimeStr = queryValues.Get(AmzDate)
|
||||||
|
} else if queryValues.Get(AmzAlgorithm) == signaturePreambleSigV4A {
|
||||||
|
creds := strings.Split(queryValues.Get(AmzCredential), "/")
|
||||||
|
if len(creds) != 4 || creds[3] != "aws4_request" {
|
||||||
|
return nil, fmt.Errorf("bad X-Amz-Credential")
|
||||||
|
}
|
||||||
|
authHdr = &AuthHeader{
|
||||||
|
AccessKeyID: creds[0],
|
||||||
|
Service: creds[2],
|
||||||
|
Region: queryValues.Get(AmzRegionSet),
|
||||||
|
Signature: queryValues.Get(AmzSignature),
|
||||||
|
SignedFields: strings.Split(queryValues.Get(AmzSignedHeaders), ";"),
|
||||||
|
Date: creds[1],
|
||||||
|
IsPresigned: true,
|
||||||
|
Preamble: signaturePreambleSigV4A,
|
||||||
|
PayloadHash: r.Header.Get(AmzContentSHA256),
|
||||||
|
}
|
||||||
|
authHdr.Expiration, err = time.ParseDuration(queryValues.Get(AmzExpires) + "s")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%w: couldn't parse X-Amz-Expires %v", apierr.GetAPIError(apierr.ErrMalformedExpires), err)
|
||||||
}
|
}
|
||||||
signatureDateTimeStr = queryValues.Get(AmzDate)
|
signatureDateTimeStr = queryValues.Get(AmzDate)
|
||||||
} else {
|
} else {
|
||||||
|
@ -164,7 +219,7 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("%w: %v", middleware.ErrNoAuthorizationHeader, authHeaderField)
|
return nil, fmt.Errorf("%w: %v", middleware.ErrNoAuthorizationHeader, authHeaderField)
|
||||||
}
|
}
|
||||||
authHdr, err = c.parseAuthHeader(authHeaderField[0])
|
authHdr, err = c.parseAuthHeader(authHeaderField[0], r.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -181,14 +236,14 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
addr, err := getAddress(authHdr.AccessKeyID)
|
cnrID, err := c.getAccessBoxContainer(authHdr.AccessKeyID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
box, attrs, err := c.cli.GetBox(r.Context(), addr)
|
box, attrs, err := c.cli.GetBox(r.Context(), cnrID, authHdr.AccessKeyID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("get box '%s': %w", addr, err)
|
return nil, fmt.Errorf("get box by access key '%s': %w", authHdr.AccessKeyID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = checkFormatHashContentSHA256(r.Header.Get(AmzContentSHA256)); err != nil {
|
if err = checkFormatHashContentSHA256(r.Header.Get(AmzContentSHA256)); err != nil {
|
||||||
|
@ -196,7 +251,7 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
clonedRequest := cloneRequest(r, authHdr)
|
clonedRequest := cloneRequest(r, authHdr)
|
||||||
if err = c.checkSign(authHdr, box, clonedRequest, signatureDateTime); err != nil {
|
if err = c.checkSign(r.Context(), authHdr, box, clonedRequest, signatureDateTime); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -205,7 +260,7 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
||||||
AuthHeaders: &middleware.AuthHeader{
|
AuthHeaders: &middleware.AuthHeader{
|
||||||
AccessKeyID: authHdr.AccessKeyID,
|
AccessKeyID: authHdr.AccessKeyID,
|
||||||
Region: authHdr.Region,
|
Region: authHdr.Region,
|
||||||
SignatureV4: authHdr.SignatureV4,
|
SignatureV4: authHdr.Signature,
|
||||||
},
|
},
|
||||||
Attributes: attrs,
|
Attributes: attrs,
|
||||||
}
|
}
|
||||||
|
@ -216,15 +271,29 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Center) getAccessBoxContainer(accessKeyID string) (cid.ID, error) {
|
||||||
|
var addr oid.Address
|
||||||
|
if err := addr.DecodeString(strings.ReplaceAll(accessKeyID, "0", "/")); err == nil {
|
||||||
|
return addr.Container(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cnrID, ok := c.settings.AccessBoxContainer()
|
||||||
|
if ok {
|
||||||
|
return cnrID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return cid.ID{}, fmt.Errorf("%w: unknown container for creds '%s'", apierr.GetAPIError(apierr.ErrInvalidAccessKeyID), accessKeyID)
|
||||||
|
}
|
||||||
|
|
||||||
func checkFormatHashContentSHA256(hash string) error {
|
func checkFormatHashContentSHA256(hash string) error {
|
||||||
if !IsStandardContentSHA256(hash) {
|
if !IsStandardContentSHA256(hash) {
|
||||||
hashBinary, err := hex.DecodeString(hash)
|
hashBinary, err := hex.DecodeString(hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("%w: decode hash: %s: %s", apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch),
|
return fmt.Errorf("%w: decode hash: %s: %s", apierr.GetAPIError(apierr.ErrContentSHA256Mismatch),
|
||||||
hash, err.Error())
|
hash, err.Error())
|
||||||
}
|
}
|
||||||
if len(hashBinary) != sha256.Size && len(hash) != 0 {
|
if len(hashBinary) != sha256.Size && len(hash) != 0 {
|
||||||
return fmt.Errorf("%w: invalid hash size %d", apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch), len(hashBinary))
|
return fmt.Errorf("%w: invalid hash size %d", apierr.GetAPIError(apierr.ErrContentSHA256Mismatch), len(hashBinary))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -242,12 +311,12 @@ func (c Center) checkAccessKeyID(accessKeyID string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("%w: accesskeyID prefix isn't allowed", apiErrors.GetAPIError(apiErrors.ErrAccessDenied))
|
return fmt.Errorf("%w: accesskeyID prefix isn't allowed", apierr.GetAPIError(apierr.ErrAccessDenied))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
||||||
if err := r.ParseMultipartForm(maxFormSizeMemory); err != nil {
|
if err := r.ParseMultipartForm(maxFormSizeMemory); err != nil {
|
||||||
return nil, fmt.Errorf("%w: parse multipart form with max size %d", apiErrors.GetAPIError(apiErrors.ErrInvalidArgument), maxFormSizeMemory)
|
return nil, fmt.Errorf("%w: parse multipart form with max size %d", apierr.GetAPIError(apierr.ErrInvalidArgument), maxFormSizeMemory)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := prepareForm(r.MultipartForm); err != nil {
|
if err := prepareForm(r.MultipartForm); err != nil {
|
||||||
|
@ -262,7 +331,7 @@ func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
||||||
creds := MultipartFormValue(r, "x-amz-credential")
|
creds := MultipartFormValue(r, "x-amz-credential")
|
||||||
submatches := c.postReg.GetSubmatches(creds)
|
submatches := c.postReg.GetSubmatches(creds)
|
||||||
if len(submatches) != 4 {
|
if len(submatches) != 4 {
|
||||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed), creds)
|
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), creds)
|
||||||
}
|
}
|
||||||
|
|
||||||
signatureDateTime, err := time.Parse("20060102T150405Z", MultipartFormValue(r, "x-amz-date"))
|
signatureDateTime, err := time.Parse("20060102T150405Z", MultipartFormValue(r, "x-amz-date"))
|
||||||
|
@ -270,27 +339,37 @@ func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
||||||
return nil, fmt.Errorf("failed to parse x-amz-date field: %w", err)
|
return nil, fmt.Errorf("failed to parse x-amz-date field: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
addr, err := getAddress(submatches["access_key_id"])
|
accessKeyID := submatches["access_key_id"]
|
||||||
|
|
||||||
|
cnrID, err := c.getAccessBoxContainer(accessKeyID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
box, attrs, err := c.cli.GetBox(r.Context(), addr)
|
box, attrs, err := c.cli.GetBox(r.Context(), cnrID, accessKeyID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("get box '%s': %w", addr, err)
|
return nil, fmt.Errorf("get box by accessKeyID '%s': %w", accessKeyID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
secret := box.Gate.SecretKey
|
secret := box.Gate.SecretKey
|
||||||
service, region := submatches["service"], submatches["region"]
|
service, region := submatches["service"], submatches["region"]
|
||||||
|
|
||||||
signature := signStr(secret, service, region, signatureDateTime, policy)
|
signature := SignStr(secret, service, region, signatureDateTime, policy)
|
||||||
reqSignature := MultipartFormValue(r, "x-amz-signature")
|
reqSignature := MultipartFormValue(r, "x-amz-signature")
|
||||||
if signature != reqSignature {
|
if signature != reqSignature {
|
||||||
return nil, fmt.Errorf("%w: %s != %s", apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch),
|
return nil, fmt.Errorf("%w: %s != %s", apierr.GetAPIError(apierr.ErrSignatureDoesNotMatch),
|
||||||
reqSignature, signature)
|
reqSignature, signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &middleware.Box{AccessBox: box, Attributes: attrs}, nil
|
return &middleware.Box{
|
||||||
|
AccessBox: box,
|
||||||
|
AuthHeaders: &middleware.AuthHeader{
|
||||||
|
AccessKeyID: accessKeyID,
|
||||||
|
Region: region,
|
||||||
|
SignatureV4: signature,
|
||||||
|
},
|
||||||
|
Attributes: attrs,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
||||||
|
@ -314,47 +393,117 @@ func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
||||||
return otherRequest
|
return otherRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
|
func (c *Center) checkSign(ctx context.Context, authHeader *AuthHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
|
||||||
awsCreds := credentials.NewStaticCredentials(authHeader.AccessKeyID, box.Gate.SecretKey, "")
|
|
||||||
signer := v4.NewSigner(awsCreds)
|
|
||||||
signer.DisableURIPathEscaping = true
|
|
||||||
|
|
||||||
var signature string
|
var signature string
|
||||||
if authHeader.IsPresigned {
|
|
||||||
now := time.Now()
|
|
||||||
if signatureDateTime.Add(authHeader.Expiration).Before(now) {
|
|
||||||
return fmt.Errorf("%w: expired: now %s, signature %s", apiErrors.GetAPIError(apiErrors.ErrExpiredPresignRequest),
|
|
||||||
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
|
|
||||||
}
|
|
||||||
if now.Before(signatureDateTime) {
|
|
||||||
return fmt.Errorf("%w: signature time from the future: now %s, signature %s", apiErrors.GetAPIError(apiErrors.ErrBadRequest),
|
|
||||||
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
|
|
||||||
}
|
|
||||||
if _, err := signer.Presign(request, nil, authHeader.Service, authHeader.Region, authHeader.Expiration, signatureDateTime); err != nil {
|
|
||||||
return fmt.Errorf("failed to pre-sign temporary HTTP request: %w", err)
|
|
||||||
}
|
|
||||||
signature = request.URL.Query().Get(AmzSignature)
|
|
||||||
} else {
|
|
||||||
if _, err := signer.Sign(request, nil, authHeader.Service, authHeader.Region, signatureDateTime); err != nil {
|
|
||||||
return fmt.Errorf("failed to sign temporary HTTP request: %w", err)
|
|
||||||
}
|
|
||||||
signature = c.reg.GetSubmatches(request.Header.Get(AuthorizationHdr))["v4_signature"]
|
|
||||||
}
|
|
||||||
|
|
||||||
if authHeader.SignatureV4 != signature {
|
switch authHeader.Preamble {
|
||||||
return fmt.Errorf("%w: %s != %s: headers %v", apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch),
|
case signaturePreambleSigV4:
|
||||||
authHeader.SignatureV4, signature, authHeader.SignedFields)
|
creds := aws.Credentials{
|
||||||
|
AccessKeyID: authHeader.AccessKeyID,
|
||||||
|
SecretAccessKey: box.Gate.SecretKey,
|
||||||
|
}
|
||||||
|
signer := v4.NewSigner(func(options *v4.SignerOptions) {
|
||||||
|
options.DisableURIPathEscaping = true
|
||||||
|
})
|
||||||
|
|
||||||
|
if authHeader.IsPresigned {
|
||||||
|
if err := checkPresignedDate(authHeader, signatureDateTime); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
signedURI, _, err := signer.PresignHTTP(ctx, creds, request, authHeader.PayloadHash, authHeader.Service, authHeader.Region, signatureDateTime)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to pre-sign temporary HTTP request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := url.ParseRequestURI(signedURI)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
signature = u.Query().Get(AmzSignature)
|
||||||
|
} else {
|
||||||
|
if err := signer.SignHTTP(ctx, creds, request, authHeader.PayloadHash, authHeader.Service, authHeader.Region, signatureDateTime); err != nil {
|
||||||
|
return fmt.Errorf("failed to sign temporary HTTP request: %w", err)
|
||||||
|
}
|
||||||
|
signature = c.reg.GetSubmatches(request.Header.Get(AuthorizationHdr))["v4_signature"]
|
||||||
|
}
|
||||||
|
if authHeader.Signature != signature {
|
||||||
|
return fmt.Errorf("%w: %s != %s: headers %v", apierr.GetAPIError(apierr.ErrSignatureDoesNotMatch),
|
||||||
|
authHeader.Signature, signature, authHeader.SignedFields)
|
||||||
|
}
|
||||||
|
|
||||||
|
case signaturePreambleSigV4A:
|
||||||
|
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
|
||||||
|
options.DisableURIPathEscaping = true
|
||||||
|
})
|
||||||
|
|
||||||
|
credAdapter := v4a.SymmetricCredentialAdaptor{
|
||||||
|
SymmetricProvider: credentials.NewStaticCredentialsProvider(authHeader.AccessKeyID, box.Gate.SecretKey, ""),
|
||||||
|
}
|
||||||
|
|
||||||
|
creds, err := credAdapter.RetrievePrivateKey(request.Context())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to derive assymetric key from credentials: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !authHeader.IsPresigned {
|
||||||
|
return signer.VerifySignature(creds, request, authHeader.PayloadHash, authHeader.Service,
|
||||||
|
strings.Split(authHeader.Region, ","), signatureDateTime, authHeader.Signature)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = checkPresignedDate(authHeader, signatureDateTime); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return signer.VerifyPresigned(creds, request, authHeader.PayloadHash, authHeader.Service,
|
||||||
|
strings.Split(authHeader.Region, ","), signatureDateTime, authHeader.Signature)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid preamble: %s", authHeader.Preamble)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func signStr(secret, service, region string, t time.Time, strToSign string) string {
|
func checkPresignedDate(authHeader *AuthHeader, signatureDateTime time.Time) error {
|
||||||
|
now := time.Now()
|
||||||
|
if signatureDateTime.Add(authHeader.Expiration).Before(now) {
|
||||||
|
return fmt.Errorf("%w: expired: now %s, signature %s", apierr.GetAPIError(apierr.ErrExpiredPresignRequest),
|
||||||
|
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
|
||||||
|
}
|
||||||
|
if now.Before(signatureDateTime) {
|
||||||
|
return fmt.Errorf("%w: signature time from the future: now %s, signature %s", apierr.GetAPIError(apierr.ErrBadRequest),
|
||||||
|
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func SignStr(secret, service, region string, t time.Time, strToSign string) string {
|
||||||
creds := deriveKey(secret, service, region, t)
|
creds := deriveKey(secret, service, region, t)
|
||||||
signature := hmacSHA256(creds, []byte(strToSign))
|
signature := hmacSHA256(creds, []byte(strToSign))
|
||||||
return hex.EncodeToString(signature)
|
return hex.EncodeToString(signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func SignStrV4A(ctx context.Context, cred aws.Credentials, strToSign string) (string, error) {
|
||||||
|
credAdapter := v4a.SymmetricCredentialAdaptor{
|
||||||
|
SymmetricProvider: credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, ""),
|
||||||
|
}
|
||||||
|
|
||||||
|
creds, err := credAdapter.RetrievePrivateKey(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
hash := sha256.New()
|
||||||
|
hash.Write([]byte(strToSign))
|
||||||
|
|
||||||
|
sig, err := creds.PrivateKey.Sign(rand.Reader, hash.Sum(nil), crypto.SHA256)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return hex.EncodeToString(sig), nil
|
||||||
|
}
|
||||||
|
|
||||||
func deriveKey(secret, service, region string, t time.Time) []byte {
|
func deriveKey(secret, service, region string, t time.Time) []byte {
|
||||||
hmacDate := hmacSHA256([]byte("AWS4"+secret), []byte(t.UTC().Format("20060102")))
|
hmacDate := hmacSHA256([]byte("AWS4"+secret), []byte(t.UTC().Format("20060102")))
|
||||||
hmacRegion := hmacSHA256(hmacDate, []byte(region))
|
hmacRegion := hmacSHA256(hmacDate, []byte(region))
|
||||||
|
|
93
api/auth/center_fuzz_test.go
Normal file
93
api/auth/center_fuzz_test.go
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
//go:build gofuzz
|
||||||
|
// +build gofuzz
|
||||||
|
|
||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
|
utils "github.com/trailofbits/go-fuzz-utils"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
fuzzSuccessExitCode = 0
|
||||||
|
fuzzFailExitCode = -1
|
||||||
|
)
|
||||||
|
|
||||||
|
func InitFuzzAuthenticate() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzAuthenticate(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
var accessKeyAddr oid.Address
|
||||||
|
err = tp.Fill(accessKeyAddr)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
||||||
|
secretKey, err := tp.GetString()
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secretKey}
|
||||||
|
|
||||||
|
reqData := RequestData{
|
||||||
|
Method: "GET",
|
||||||
|
Endpoint: "http://localhost:8084",
|
||||||
|
Bucket: "my-bucket",
|
||||||
|
Object: "@obj/name",
|
||||||
|
}
|
||||||
|
presignData := PresignData{
|
||||||
|
Service: "s3",
|
||||||
|
Region: "spb",
|
||||||
|
Lifetime: 10 * time.Minute,
|
||||||
|
SignTime: time.Now().UTC(),
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := PresignRequest(context.Background(), awsCreds, reqData, presignData, zap.NewNop())
|
||||||
|
if req == nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
expBox := &accessbox.Box{
|
||||||
|
Gate: &accessbox.GateData{
|
||||||
|
SecretKey: secretKey,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
mock := newTokensFrostfsMock()
|
||||||
|
mock.addBox(accessKeyAddr, expBox)
|
||||||
|
|
||||||
|
c := &Center{
|
||||||
|
cli: mock,
|
||||||
|
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
||||||
|
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _ = c.Authenticate(req)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzAuthenticate(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzAuthenticate(data)
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,19 +1,53 @@
|
||||||
package auth
|
package auth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
|
||||||
|
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/v4"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||||
|
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/zap/zaptest"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type centerSettingsMock struct {
|
||||||
|
accessBoxContainer *cid.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *centerSettingsMock) AccessBoxContainer() (cid.ID, bool) {
|
||||||
|
if c.accessBoxContainer == nil {
|
||||||
|
return cid.ID{}, false
|
||||||
|
}
|
||||||
|
return *c.accessBoxContainer, true
|
||||||
|
}
|
||||||
|
|
||||||
func TestAuthHeaderParse(t *testing.T) {
|
func TestAuthHeaderParse(t *testing.T) {
|
||||||
defaultHeader := "AWS4-HMAC-SHA256 Credential=oid0cid/20210809/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=2811ccb9e242f41426738fb1f"
|
defaultHeader := "AWS4-HMAC-SHA256 Credential=oid0cid/20210809/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=2811ccb9e242f41426738fb1f"
|
||||||
|
|
||||||
center := &Center{
|
center := &Center{
|
||||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
||||||
|
settings: ¢erSettingsMock{},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
|
@ -28,9 +62,10 @@ func TestAuthHeaderParse(t *testing.T) {
|
||||||
AccessKeyID: "oid0cid",
|
AccessKeyID: "oid0cid",
|
||||||
Service: "s3",
|
Service: "s3",
|
||||||
Region: "us-east-1",
|
Region: "us-east-1",
|
||||||
SignatureV4: "2811ccb9e242f41426738fb1f",
|
Signature: "2811ccb9e242f41426738fb1f",
|
||||||
SignedFields: []string{"host", "x-amz-content-sha256", "x-amz-date"},
|
SignedFields: []string{"host", "x-amz-content-sha256", "x-amz-date"},
|
||||||
Date: "20210809",
|
Date: "20210809",
|
||||||
|
Preamble: signaturePreambleSigV4,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -38,55 +73,13 @@ func TestAuthHeaderParse(t *testing.T) {
|
||||||
err: errors.GetAPIError(errors.ErrAuthorizationHeaderMalformed),
|
err: errors.GetAPIError(errors.ErrAuthorizationHeaderMalformed),
|
||||||
expected: nil,
|
expected: nil,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
header: strings.ReplaceAll(defaultHeader, "oid0cid", "oidcid"),
|
|
||||||
err: errors.GetAPIError(errors.ErrInvalidAccessKeyID),
|
|
||||||
expected: nil,
|
|
||||||
},
|
|
||||||
} {
|
} {
|
||||||
authHeader, err := center.parseAuthHeader(tc.header)
|
authHeader, err := center.parseAuthHeader(tc.header, nil)
|
||||||
require.ErrorIs(t, err, tc.err, tc.header)
|
require.ErrorIs(t, err, tc.err, tc.header)
|
||||||
require.Equal(t, tc.expected, authHeader, tc.header)
|
require.Equal(t, tc.expected, authHeader, tc.header)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAuthHeaderGetAddress(t *testing.T) {
|
|
||||||
defaulErr := errors.GetAPIError(errors.ErrInvalidAccessKeyID)
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
authHeader *AuthHeader
|
|
||||||
err error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
authHeader: &AuthHeader{
|
|
||||||
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJM0HrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
|
|
||||||
},
|
|
||||||
err: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
authHeader: &AuthHeader{
|
|
||||||
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJMHrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
|
|
||||||
},
|
|
||||||
err: defaulErr,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
authHeader: &AuthHeader{
|
|
||||||
AccessKeyID: "oid0cid",
|
|
||||||
},
|
|
||||||
err: defaulErr,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
authHeader: &AuthHeader{
|
|
||||||
AccessKeyID: "oidcid",
|
|
||||||
},
|
|
||||||
err: defaulErr,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
_, err := getAddress(tc.authHeader.AccessKeyID)
|
|
||||||
require.ErrorIs(t, err, tc.err, tc.authHeader.AccessKeyID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignature(t *testing.T) {
|
func TestSignature(t *testing.T) {
|
||||||
secret := "66be461c3cd429941c55daf42fad2b8153e5a2016ba89c9494d97677cc9d3872"
|
secret := "66be461c3cd429941c55daf42fad2b8153e5a2016ba89c9494d97677cc9d3872"
|
||||||
strToSign := "eyAiZXhwaXJhdGlvbiI6ICIyMDE1LTEyLTMwVDEyOjAwOjAwLjAwMFoiLAogICJjb25kaXRpb25zIjogWwogICAgeyJidWNrZXQiOiAiYWNsIn0sCiAgICBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS8iXSwKICAgIHsic3VjY2Vzc19hY3Rpb25fcmVkaXJlY3QiOiAiaHR0cDovL2xvY2FsaG9zdDo4MDg0L2FjbCJ9LAogICAgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLAogICAgeyJ4LWFtei1tZXRhLXV1aWQiOiAiMTQzNjUxMjM2NTEyNzQifSwKICAgIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLAoKICAgIHsiWC1BbXotQ3JlZGVudGlhbCI6ICI4Vmk0MVBIbjVGMXNzY2J4OUhqMXdmMUU2aERUYURpNndxOGhxTU05NllKdTA1QzVDeUVkVlFoV1E2aVZGekFpTkxXaTlFc3BiUTE5ZDRuR3pTYnZVZm10TS8yMDE1MTIyOS91cy1lYXN0LTEvczMvYXdzNF9yZXF1ZXN0In0sCiAgICB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sCiAgICB7IlgtQW16LURhdGUiOiAiMjAxNTEyMjlUMDAwMDAwWiIgfSwKICAgIHsieC1pZ25vcmUtdG1wIjogInNvbWV0aGluZyIgfQogIF0KfQ=="
|
strToSign := "eyAiZXhwaXJhdGlvbiI6ICIyMDE1LTEyLTMwVDEyOjAwOjAwLjAwMFoiLAogICJjb25kaXRpb25zIjogWwogICAgeyJidWNrZXQiOiAiYWNsIn0sCiAgICBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS8iXSwKICAgIHsic3VjY2Vzc19hY3Rpb25fcmVkaXJlY3QiOiAiaHR0cDovL2xvY2FsaG9zdDo4MDg0L2FjbCJ9LAogICAgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLAogICAgeyJ4LWFtei1tZXRhLXV1aWQiOiAiMTQzNjUxMjM2NTEyNzQifSwKICAgIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLAoKICAgIHsiWC1BbXotQ3JlZGVudGlhbCI6ICI4Vmk0MVBIbjVGMXNzY2J4OUhqMXdmMUU2aERUYURpNndxOGhxTU05NllKdTA1QzVDeUVkVlFoV1E2aVZGekFpTkxXaTlFc3BiUTE5ZDRuR3pTYnZVZm10TS8yMDE1MTIyOS91cy1lYXN0LTEvczMvYXdzNF9yZXF1ZXN0In0sCiAgICB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sCiAgICB7IlgtQW16LURhdGUiOiAiMjAxNTEyMjlUMDAwMDAwWiIgfSwKICAgIHsieC1pZ25vcmUtdG1wIjogInNvbWV0aGluZyIgfQogIF0KfQ=="
|
||||||
|
@ -96,10 +89,95 @@ func TestSignature(t *testing.T) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
signature := signStr(secret, "s3", "us-east-1", signTime, strToSign)
|
signature := SignStr(secret, "s3", "us-east-1", signTime, strToSign)
|
||||||
require.Equal(t, "dfbe886241d9e369cf4b329ca0f15eb27306c97aa1022cc0bb5a914c4ef87634", signature)
|
require.Equal(t, "dfbe886241d9e369cf4b329ca0f15eb27306c97aa1022cc0bb5a914c4ef87634", signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSignatureV4A(t *testing.T) {
|
||||||
|
accessKeyID := "2XEbqH4M3ym7a3E3esxfZ2gRLnMwDXrCN4y1SkQg5fHa09sThVmVL3EE6xeKsyMzaqu5jPi41YCaVbnwbwCTF3bx1"
|
||||||
|
secretKey := "00637f53f842573aaa06c2164c598973cd986880987111416cf71f1619def537"
|
||||||
|
|
||||||
|
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
|
||||||
|
options.DisableURIPathEscaping = true
|
||||||
|
options.Logger = zaptest.NewLogger(t)
|
||||||
|
options.LogSigning = true
|
||||||
|
})
|
||||||
|
|
||||||
|
credAdapter := v4a.SymmetricCredentialAdaptor{
|
||||||
|
SymmetricProvider: credentials.NewStaticCredentialsProvider(accessKeyID, secretKey, ""),
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyStr := `
|
||||||
|
1b;chunk-signature=3045022100b63692a1b20759bdabd342011823427a8952df75c93174d98ad043abca8052e002201695228a91ba986171b8d0ad20856d3d94ca3614d0a90a50a531ba8e52447b9b**
|
||||||
|
Testing with the {sdk-java}
|
||||||
|
0;chunk-signature=30440220455885a2d4e9f705256ca6b0a5a22f7f784780ccbd1c0a371e5db3059c91745b022073259dd44746cbd63261d628a04d25be5a32a974c077c5c2d83c8157fb323b9f****
|
||||||
|
|
||||||
|
`
|
||||||
|
body := bytes.NewBufferString(bodyStr)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("PUT", "http://localhost:8084/test/tmp", body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Amz-Sdk-Invocation-Id", "ca3a3cde-7d26-fce6-ed9c-82f7a0573824")
|
||||||
|
req.Header.Set("Amz-Sdk-Request", "attempt=2; max=2")
|
||||||
|
req.Header.Set("Authorization", "AWS4-ECDSA-P256-SHA256 Credential=2XEbqH4M3ym7a3E3esxfZ2gRLnMwDXrCN4y1SkQg5fHa09sThVmVL3EE6xeKsyMzaqu5jPi41YCaVbnwbwCTF3bx1/20240904/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-region-set, Signature=30440220574244c5ff5deba388c4e3b0541a42113179b6839b3e6b4212d255a118fa9089022056f7b9b72c93f67dbcd25fe9ca67950b5913fc00bb7a62bc276c21e828c0b6c7")
|
||||||
|
req.Header.Set("Content-Length", "360")
|
||||||
|
req.Header.Set("Content-Type", "text/plain; charset=UTF-8")
|
||||||
|
req.Header.Set("X-Amz-Content-Sha256", "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD")
|
||||||
|
req.Header.Set("X-Amz-Date", "20240904T133253Z")
|
||||||
|
req.Header.Set("X-Amz-Decoded-Content-Length", "27")
|
||||||
|
req.Header.Set("X-Amz-Region-Set", "us-east-1")
|
||||||
|
|
||||||
|
service := "s3"
|
||||||
|
regionSet := []string{"us-east-1"}
|
||||||
|
signature := "30440220574244c5ff5deba388c4e3b0541a42113179b6839b3e6b4212d255a118fa9089022056f7b9b72c93f67dbcd25fe9ca67950b5913fc00bb7a62bc276c21e828c0b6c7"
|
||||||
|
signingTime, err := time.Parse("20060102T150405Z", "20240904T133253Z")
|
||||||
|
require.NoError(t, err)
|
||||||
|
creds, err := credAdapter.RetrievePrivateKey(req.Context())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = signer.VerifySignature(creds, req, "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD", service, regionSet, signingTime, signature)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSignatureV4(t *testing.T) {
|
||||||
|
signer := v4.NewSigner(func(options *v4.SignerOptions) {
|
||||||
|
options.DisableURIPathEscaping = true
|
||||||
|
options.Logger = zaptest.NewLogger(t)
|
||||||
|
options.LogSigning = true
|
||||||
|
})
|
||||||
|
|
||||||
|
creds := aws.Credentials{
|
||||||
|
AccessKeyID: "9CBEGH8T9XfLin2pg7LG8ZxBH1PnZc1yoioViKngrUnu0CbC2mcjpcw9t4Y7AS6zsF5cJGkDhXAx5hxFDKwfZzgj7",
|
||||||
|
SecretAccessKey: "8742218da7f905de24f633f44efe02f82c6d2a317ed6f99592627215d17816e3",
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyStr := `tmp2
|
||||||
|
`
|
||||||
|
body := bytes.NewBufferString(bodyStr)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("PUT", "http://localhost:8084/main/tmp2", body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Authorization", "AWS4-HMAC-SHA256 Credential=9CBEGH8T9XfLin2pg7LG8ZxBH1PnZc1yoioViKngrUnu0CbC2mcjpcw9t4Y7AS6zsF5cJGkDhXAx5hxFDKwfZzgj7/20241210/ru/s3/aws4_request, SignedHeaders=content-md5;host;x-amz-content-sha256;x-amz-date, Signature=945664a5bccfd37a1167ca5e718e2b883f68a7ccf7f1044768e7fe58b737b7ed")
|
||||||
|
req.Header.Set("Content-Length", "5")
|
||||||
|
req.Header.Set("User-Agent", "aws-cli/2.13.2 Python/3.11.4 Linux/6.4.5-x64v1-xanmod1 exe/x86_64.debian.11 prompt/off command/s3api.put-object")
|
||||||
|
req.Header.Set("Content-MD5", "DstU4KxdzBj5jTGltfyqgA==")
|
||||||
|
req.Header.Set("Expect", "101-continue")
|
||||||
|
req.Header.Set("X-Amz-Content-Sha256", "1f9b7417ee5445c41dbe904c3651eb0ba1c12fecff16c1bccd8df3db6e390b5f")
|
||||||
|
req.Header.Set("X-Amz-Date", "20241210T114611Z")
|
||||||
|
|
||||||
|
service := "s3"
|
||||||
|
region := "ru"
|
||||||
|
signature := "945664a5bccfd37a1167ca5e718e2b883f68a7ccf7f1044768e7fe58b737b7ed"
|
||||||
|
signingTime, err := time.Parse("20060102T150405Z", "20241210T114611Z")
|
||||||
|
require.NoError(t, err)
|
||||||
|
cloned := cloneRequest(req, &AuthHeader{SignedFields: []string{"content-md5", "host", "x-amz-content-sha256", "x-amz-date"}})
|
||||||
|
|
||||||
|
err = signer.SignHTTP(cloned.Context(), creds, cloned, "1f9b7417ee5445c41dbe904c3651eb0ba1c12fecff16c1bccd8df3db6e390b5f", service, region, signingTime)
|
||||||
|
require.NoError(t, err)
|
||||||
|
signatureComputed := NewRegexpMatcher(AuthorizationFieldRegexp).GetSubmatches(cloned.Header.Get(AuthorizationHdr))["v4_signature"]
|
||||||
|
require.Equal(t, signature, signatureComputed, "signature mismatched")
|
||||||
|
}
|
||||||
|
|
||||||
func TestCheckFormatContentSHA256(t *testing.T) {
|
func TestCheckFormatContentSHA256(t *testing.T) {
|
||||||
defaultErr := errors.GetAPIError(errors.ErrContentSHA256Mismatch)
|
defaultErr := errors.GetAPIError(errors.ErrContentSHA256Mismatch)
|
||||||
|
|
||||||
|
@ -123,6 +201,11 @@ func TestCheckFormatContentSHA256(t *testing.T) {
|
||||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f7s",
|
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f7s",
|
||||||
error: defaultErr,
|
error: defaultErr,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "invalid hash format: hash size",
|
||||||
|
hash: "5aadb45520dcd8726b2822a7a78bb53d794f557199d5d4abdedd2c55a4bd6ca73607605c558de3db80c8e86c3196484566163ed1327e82e8b6757d1932113cb8",
|
||||||
|
error: defaultErr,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "unsigned payload",
|
name: "unsigned payload",
|
||||||
hash: "UNSIGNED-PAYLOAD",
|
hash: "UNSIGNED-PAYLOAD",
|
||||||
|
@ -145,3 +228,483 @@ func TestCheckFormatContentSHA256(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type frostFSMock struct {
|
||||||
|
objects map[string]*object.Object
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFrostFSMock() *frostFSMock {
|
||||||
|
return &frostFSMock{
|
||||||
|
objects: map[string]*object.Object{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *frostFSMock) GetCredsObject(_ context.Context, prm tokens.PrmGetCredsObject) (*object.Object, error) {
|
||||||
|
obj, ok := f.objects[prm.AccessKeyID]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *frostFSMock) CreateObject(context.Context, tokens.PrmObjectCreate) (oid.ID, error) {
|
||||||
|
return oid.ID{}, fmt.Errorf("the mock method is not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuthenticate(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
key, err := keys.NewPrivateKey()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cfg := &cache.Config{
|
||||||
|
Size: 10,
|
||||||
|
Lifetime: 24 * time.Hour,
|
||||||
|
Logger: zaptest.NewLogger(t),
|
||||||
|
}
|
||||||
|
|
||||||
|
gateData := []*accessbox.GateData{{
|
||||||
|
BearerToken: &bearer.Token{},
|
||||||
|
GateKey: key.PublicKey(),
|
||||||
|
}}
|
||||||
|
|
||||||
|
accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"), false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
data, err := accessBox.Marshal()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var obj object.Object
|
||||||
|
obj.SetPayload(data)
|
||||||
|
addr := oidtest.Address()
|
||||||
|
obj.SetContainerID(addr.Container())
|
||||||
|
obj.SetID(addr.Object())
|
||||||
|
|
||||||
|
accessKeyID := getAccessKeyID(addr)
|
||||||
|
|
||||||
|
frostfs := newFrostFSMock()
|
||||||
|
frostfs.objects[accessKeyID] = &obj
|
||||||
|
|
||||||
|
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secret.SecretKey}
|
||||||
|
defaultSigner := v4.NewSigner()
|
||||||
|
|
||||||
|
service, region := "s3", "default"
|
||||||
|
invalidValue := "invalid-value"
|
||||||
|
|
||||||
|
bigConfig := tokens.Config{
|
||||||
|
FrostFS: frostfs,
|
||||||
|
Key: key,
|
||||||
|
CacheConfig: cfg,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
prefixes []string
|
||||||
|
request *http.Request
|
||||||
|
err bool
|
||||||
|
errCode errors.ErrorCode
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "valid sign",
|
||||||
|
prefixes: []string{addr.Container().String()},
|
||||||
|
request: func() *http.Request {
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
|
||||||
|
require.NoError(t, err)
|
||||||
|
return r
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no authorization header",
|
||||||
|
request: func() *http.Request {
|
||||||
|
return httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid authorization header",
|
||||||
|
request: func() *http.Request {
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
r.Header.Set(AuthorizationHdr, invalidValue)
|
||||||
|
return r
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
errCode: errors.ErrAuthorizationHeaderMalformed,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid access key id format",
|
||||||
|
request: func() *http.Request {
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
cred := aws.Credentials{AccessKeyID: addr.Object().String(), SecretAccessKey: secret.SecretKey}
|
||||||
|
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
|
||||||
|
require.NoError(t, err)
|
||||||
|
return r
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
errCode: errors.ErrInvalidAccessKeyID,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "not allowed access key id",
|
||||||
|
prefixes: []string{addr.Object().String()},
|
||||||
|
request: func() *http.Request {
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
|
||||||
|
require.NoError(t, err)
|
||||||
|
return r
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
errCode: errors.ErrAccessDenied,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid access key id value",
|
||||||
|
request: func() *http.Request {
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
cred := aws.Credentials{AccessKeyID: accessKeyID[:len(accessKeyID)-4], SecretAccessKey: secret.SecretKey}
|
||||||
|
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
|
||||||
|
require.NoError(t, err)
|
||||||
|
return r
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
errCode: errors.ErrInvalidAccessKeyID,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "unknown access key id",
|
||||||
|
request: func() *http.Request {
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
cred := aws.Credentials{AccessKeyID: addr.Object().String() + "0" + addr.Container().String(), SecretAccessKey: secret.SecretKey}
|
||||||
|
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
|
||||||
|
require.NoError(t, err)
|
||||||
|
return r
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid signature",
|
||||||
|
request: func() *http.Request {
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
cred := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: "secret"}
|
||||||
|
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
|
||||||
|
require.NoError(t, err)
|
||||||
|
return r
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
errCode: errors.ErrSignatureDoesNotMatch,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid signature - AmzDate",
|
||||||
|
prefixes: []string{addr.Container().String()},
|
||||||
|
request: func() *http.Request {
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
|
||||||
|
r.Header.Set(AmzDate, invalidValue)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return r
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid AmzContentSHA256",
|
||||||
|
prefixes: []string{addr.Container().String()},
|
||||||
|
request: func() *http.Request {
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
|
||||||
|
r.Header.Set(AmzContentSHA256, invalidValue)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return r
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid presign",
|
||||||
|
request: func() *http.Request {
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
r.Header.Set(AmzExpires, "60")
|
||||||
|
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
|
||||||
|
require.NoError(t, err)
|
||||||
|
r.URL, err = url.ParseRequestURI(signedURI)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return r
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "presign, bad X-Amz-Credential",
|
||||||
|
request: func() *http.Request {
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
query := url.Values{
|
||||||
|
AmzAlgorithm: []string{"AWS4-HMAC-SHA256"},
|
||||||
|
AmzCredential: []string{invalidValue},
|
||||||
|
}
|
||||||
|
r.URL.RawQuery = query.Encode()
|
||||||
|
return r
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "presign, bad X-Amz-Expires",
|
||||||
|
request: func() *http.Request {
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
r.Header.Set(AmzExpires, invalidValue)
|
||||||
|
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, UnsignedPayload, service, region, time.Now())
|
||||||
|
require.NoError(t, err)
|
||||||
|
r.URL, err = url.ParseRequestURI(signedURI)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return r
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
errCode: errors.ErrMalformedExpires,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "presign, expired",
|
||||||
|
request: func() *http.Request {
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
r.Header.Set(AmzExpires, "60")
|
||||||
|
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, UnsignedPayload, service, region, time.Now().Add(-time.Minute))
|
||||||
|
require.NoError(t, err)
|
||||||
|
r.URL, err = url.ParseRequestURI(signedURI)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return r
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
errCode: errors.ErrExpiredPresignRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "presign, signature from future",
|
||||||
|
request: func() *http.Request {
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
r.Header.Set(AmzExpires, "60")
|
||||||
|
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, UnsignedPayload, service, region, time.Now().Add(time.Minute))
|
||||||
|
require.NoError(t, err)
|
||||||
|
r.URL, err = url.ParseRequestURI(signedURI)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return r
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
errCode: errors.ErrBadRequest,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
creds := tokens.New(bigConfig)
|
||||||
|
cntr := New(creds, tc.prefixes, ¢erSettingsMock{})
|
||||||
|
box, err := cntr.Authenticate(tc.request)
|
||||||
|
|
||||||
|
if tc.err {
|
||||||
|
require.Error(t, err)
|
||||||
|
if tc.errCode > 0 {
|
||||||
|
err = frosterr.UnwrapErr(err)
|
||||||
|
require.Equal(t, errors.GetAPIError(tc.errCode), err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, accessKeyID, box.AuthHeaders.AccessKeyID)
|
||||||
|
require.Equal(t, region, box.AuthHeaders.Region)
|
||||||
|
require.Equal(t, secret.SecretKey, box.AccessBox.Gate.SecretKey)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHTTPPostAuthenticate(t *testing.T) {
|
||||||
|
const (
|
||||||
|
policyBase64 = "eyJleHBpcmF0aW9uIjogIjIwMjUtMTItMDFUMTI6MDA6MDAuMDAwWiIsImNvbmRpdGlvbnMiOiBbCiBbInN0YXJ0cy13aXRoIiwgIiR4LWFtei1jcmVkZW50aWFsIiwgIiJdLAogWyJzdGFydHMtd2l0aCIsICIkeC1hbXotZGF0ZSIsICIiXQpdfQ=="
|
||||||
|
invalidValue = "invalid-value"
|
||||||
|
defaultFieldName = "file"
|
||||||
|
service = "s3"
|
||||||
|
region = "default"
|
||||||
|
)
|
||||||
|
|
||||||
|
key, err := keys.NewPrivateKey()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cfg := &cache.Config{
|
||||||
|
Size: 10,
|
||||||
|
Lifetime: 24 * time.Hour,
|
||||||
|
Logger: zaptest.NewLogger(t),
|
||||||
|
}
|
||||||
|
|
||||||
|
gateData := []*accessbox.GateData{{
|
||||||
|
BearerToken: &bearer.Token{},
|
||||||
|
GateKey: key.PublicKey(),
|
||||||
|
}}
|
||||||
|
|
||||||
|
accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"), false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
data, err := accessBox.Marshal()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var obj object.Object
|
||||||
|
obj.SetPayload(data)
|
||||||
|
addr := oidtest.Address()
|
||||||
|
obj.SetContainerID(addr.Container())
|
||||||
|
obj.SetID(addr.Object())
|
||||||
|
|
||||||
|
accessKeyID := getAccessKeyID(addr)
|
||||||
|
|
||||||
|
frostfs := newFrostFSMock()
|
||||||
|
frostfs.objects[accessKeyID] = &obj
|
||||||
|
|
||||||
|
invalidAccessKeyID := oidtest.Address().String() + "0" + oidtest.Address().Object().String()
|
||||||
|
|
||||||
|
timeToSign := time.Now()
|
||||||
|
timeToSignStr := timeToSign.Format("20060102T150405Z")
|
||||||
|
|
||||||
|
bigConfig := tokens.Config{
|
||||||
|
FrostFS: frostfs,
|
||||||
|
Key: key,
|
||||||
|
CacheConfig: cfg,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
prefixes []string
|
||||||
|
request *http.Request
|
||||||
|
err bool
|
||||||
|
errCode errors.ErrorCode
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "HTTP POST valid",
|
||||||
|
request: func() *http.Request {
|
||||||
|
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
||||||
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
|
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "HTTP POST valid with custom field name",
|
||||||
|
request: func() *http.Request {
|
||||||
|
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
||||||
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
|
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, "files")
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "HTTP POST valid with field name with a capital letter",
|
||||||
|
request: func() *http.Request {
|
||||||
|
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
||||||
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
|
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, "File")
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "HTTP POST invalid multipart form",
|
||||||
|
request: func() *http.Request {
|
||||||
|
req := httptest.NewRequest(http.MethodPost, "/", nil)
|
||||||
|
req.Header.Set(ContentTypeHdr, "multipart/form-data")
|
||||||
|
|
||||||
|
return req
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
errCode: errors.ErrInvalidArgument,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "HTTP POST invalid signature date time",
|
||||||
|
request: func() *http.Request {
|
||||||
|
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
||||||
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
|
return getRequestWithMultipartForm(t, policyBase64, creds, invalidValue, sign, defaultFieldName)
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "HTTP POST invalid creds",
|
||||||
|
request: func() *http.Request {
|
||||||
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
|
return getRequestWithMultipartForm(t, policyBase64, invalidValue, timeToSignStr, sign, defaultFieldName)
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
errCode: errors.ErrAuthorizationHeaderMalformed,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "HTTP POST missing policy",
|
||||||
|
request: func() *http.Request {
|
||||||
|
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
||||||
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
|
return getRequestWithMultipartForm(t, "", creds, timeToSignStr, sign, defaultFieldName)
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "HTTP POST invalid accessKeyId",
|
||||||
|
request: func() *http.Request {
|
||||||
|
creds := getCredsStr(invalidValue, timeToSignStr, region, service)
|
||||||
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
|
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "HTTP POST invalid accessKeyId - a non-existent box",
|
||||||
|
request: func() *http.Request {
|
||||||
|
creds := getCredsStr(invalidAccessKeyID, timeToSignStr, region, service)
|
||||||
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
|
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "HTTP POST invalid signature",
|
||||||
|
request: func() *http.Request {
|
||||||
|
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
||||||
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, invalidValue)
|
||||||
|
|
||||||
|
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
||||||
|
}(),
|
||||||
|
err: true,
|
||||||
|
errCode: errors.ErrSignatureDoesNotMatch,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
creds := tokens.New(bigConfig)
|
||||||
|
cntr := New(creds, tc.prefixes, ¢erSettingsMock{})
|
||||||
|
box, err := cntr.Authenticate(tc.request)
|
||||||
|
|
||||||
|
if tc.err {
|
||||||
|
require.Error(t, err)
|
||||||
|
if tc.errCode > 0 {
|
||||||
|
err = frosterr.UnwrapErr(err)
|
||||||
|
require.Equal(t, errors.GetAPIError(tc.errCode), err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, secret.SecretKey, box.AccessBox.Gate.SecretKey)
|
||||||
|
require.Equal(t, accessKeyID, box.AuthHeaders.AccessKeyID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCredsStr(accessKeyID, timeToSign, region, service string) string {
|
||||||
|
return accessKeyID + "/" + timeToSign + "/" + region + "/" + service + "/aws4_request"
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRequestWithMultipartForm(t *testing.T, policy, creds, date, sign, fieldName string) *http.Request {
|
||||||
|
body := &bytes.Buffer{}
|
||||||
|
writer := multipart.NewWriter(body)
|
||||||
|
defer writer.Close()
|
||||||
|
|
||||||
|
err := writer.WriteField("policy", policy)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = writer.WriteField(AmzCredential, creds)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = writer.WriteField(AmzDate, date)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = writer.WriteField(AmzSignature, sign)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = writer.CreateFormFile(fieldName, "test.txt")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req := httptest.NewRequest(http.MethodPost, "/", body)
|
||||||
|
req.Header.Set(ContentTypeHdr, writer.FormDataContentType())
|
||||||
|
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAccessKeyID(addr oid.Address) string {
|
||||||
|
return strings.ReplaceAll(addr.EncodeToString(), "/", "0")
|
||||||
|
}
|
||||||
|
|
|
@ -1,14 +1,20 @@
|
||||||
package auth
|
package auth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/smithy/encoding/httpbinding"
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/v4"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
type RequestData struct {
|
type RequestData struct {
|
||||||
|
@ -23,25 +29,74 @@ type PresignData struct {
|
||||||
Region string
|
Region string
|
||||||
Lifetime time.Duration
|
Lifetime time.Duration
|
||||||
SignTime time.Time
|
SignTime time.Time
|
||||||
|
Headers map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
// PresignRequest forms pre-signed request to access objects without aws credentials.
|
// PresignRequest forms pre-signed request to access objects without aws credentials.
|
||||||
func PresignRequest(creds *credentials.Credentials, reqData RequestData, presignData PresignData) (*http.Request, error) {
|
func PresignRequest(ctx context.Context, creds aws.Credentials, reqData RequestData, presignData PresignData, log *zap.Logger) (*http.Request, error) {
|
||||||
urlStr := fmt.Sprintf("%s/%s/%s", reqData.Endpoint, rest.EscapePath(reqData.Bucket, false), rest.EscapePath(reqData.Object, false))
|
urlStr := fmt.Sprintf("%s/%s/%s", reqData.Endpoint, httpbinding.EscapePath(reqData.Bucket, false), httpbinding.EscapePath(reqData.Object, false))
|
||||||
req, err := http.NewRequest(strings.ToUpper(reqData.Method), urlStr, nil)
|
req, err := http.NewRequest(strings.ToUpper(reqData.Method), urlStr, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create new request: %w", err)
|
return nil, fmt.Errorf("failed to create new request: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for k, v := range presignData.Headers {
|
||||||
|
req.Header.Set(k, v) // maybe we should filter system header (or keep responsibility on caller)
|
||||||
|
}
|
||||||
req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z"))
|
req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z"))
|
||||||
req.Header.Set(ContentTypeHdr, "text/plain")
|
req.Header.Set(AmzExpires, strconv.FormatFloat(presignData.Lifetime.Round(time.Second).Seconds(), 'f', 0, 64))
|
||||||
|
|
||||||
signer := v4.NewSigner(creds)
|
signer := v4.NewSigner(func(options *v4.SignerOptions) {
|
||||||
signer.DisableURIPathEscaping = true
|
options.DisableURIPathEscaping = true
|
||||||
|
options.LogSigning = true
|
||||||
|
options.Logger = log
|
||||||
|
})
|
||||||
|
|
||||||
if _, err = signer.Presign(req, nil, presignData.Service, presignData.Region, presignData.Lifetime, presignData.SignTime); err != nil {
|
signedURI, _, err := signer.PresignHTTP(ctx, creds, req, presignData.Headers[AmzContentSHA256], presignData.Service, presignData.Region, presignData.SignTime)
|
||||||
|
if err != nil {
|
||||||
return nil, fmt.Errorf("presign: %w", err)
|
return nil, fmt.Errorf("presign: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if req.URL, err = url.ParseRequestURI(signedURI); err != nil {
|
||||||
|
return nil, fmt.Errorf("parse signed URI: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PresignRequestV4a forms pre-signed request to access objects without aws credentials.
|
||||||
|
func PresignRequestV4a(cred aws.Credentials, reqData RequestData, presignData PresignData, log *zap.Logger) (*http.Request, error) {
|
||||||
|
urlStr := fmt.Sprintf("%s/%s/%s", reqData.Endpoint, httpbinding.EscapePath(reqData.Bucket, false), httpbinding.EscapePath(reqData.Object, false))
|
||||||
|
req, err := http.NewRequest(strings.ToUpper(reqData.Method), urlStr, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create new request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range presignData.Headers {
|
||||||
|
req.Header.Set(k, v) // maybe we should filter system header (or keep responsibility on caller)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z"))
|
||||||
|
req.Header.Set(AmzExpires, strconv.FormatFloat(presignData.Lifetime.Round(time.Second).Seconds(), 'f', 0, 64))
|
||||||
|
|
||||||
|
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
|
||||||
|
options.DisableURIPathEscaping = true
|
||||||
|
options.LogSigning = true
|
||||||
|
options.Logger = log
|
||||||
|
})
|
||||||
|
|
||||||
|
credAdapter := v4a.SymmetricCredentialAdaptor{
|
||||||
|
SymmetricProvider: credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, ""),
|
||||||
|
}
|
||||||
|
|
||||||
|
creds, err := credAdapter.RetrievePrivateKey(req.Context())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to derive assymetric key from credentials: %w", err)
|
||||||
|
}
|
||||||
|
presignedURL, _, err := signer.PresignHTTP(req.Context(), creds, req, presignData.Headers[AmzContentSHA256], presignData.Service, []string{presignData.Region}, presignData.SignTime)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("presign: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return http.NewRequest(reqData.Method, presignedURL, nil)
|
||||||
|
}
|
||||||
|
|
|
@ -2,18 +2,23 @@ package auth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
|
credentialsv2 "github.com/aws/aws-sdk-go-v2/credentials"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/zap/zaptest"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ tokens.Credentials = (*credentialsMock)(nil)
|
var _ tokens.Credentials = (*credentialsMock)(nil)
|
||||||
|
@ -29,11 +34,11 @@ func newTokensFrostfsMock() *credentialsMock {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m credentialsMock) addBox(addr oid.Address, box *accessbox.Box) {
|
func (m credentialsMock) addBox(addr oid.Address, box *accessbox.Box) {
|
||||||
m.boxes[addr.String()] = box
|
m.boxes[getAccessKeyID(addr)] = box
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m credentialsMock) GetBox(_ context.Context, addr oid.Address) (*accessbox.Box, []object.Attribute, error) {
|
func (m credentialsMock) GetBox(_ context.Context, _ cid.ID, accessKeyID string) (*accessbox.Box, []object.Attribute, error) {
|
||||||
box, ok := m.boxes[addr.String()]
|
box, ok := m.boxes[accessKeyID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil, &apistatus.ObjectNotFound{}
|
return nil, nil, &apistatus.ObjectNotFound{}
|
||||||
}
|
}
|
||||||
|
@ -41,22 +46,24 @@ func (m credentialsMock) GetBox(_ context.Context, addr oid.Address) (*accessbox
|
||||||
return box, nil, nil
|
return box, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m credentialsMock) Put(context.Context, cid.ID, tokens.CredentialsParam) (oid.Address, error) {
|
func (m credentialsMock) Put(context.Context, tokens.CredentialsParam) (oid.Address, error) {
|
||||||
return oid.Address{}, nil
|
return oid.Address{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m credentialsMock) Update(context.Context, oid.Address, tokens.CredentialsParam) (oid.Address, error) {
|
func (m credentialsMock) Update(context.Context, tokens.CredentialsParam) (oid.Address, error) {
|
||||||
return oid.Address{}, nil
|
return oid.Address{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCheckSign(t *testing.T) {
|
func TestCheckSign(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
var accessKeyAddr oid.Address
|
var accessKeyAddr oid.Address
|
||||||
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
|
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
||||||
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
|
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
|
||||||
awsCreds := credentials.NewStaticCredentials(accessKeyID, secretKey, "")
|
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secretKey}
|
||||||
|
|
||||||
reqData := RequestData{
|
reqData := RequestData{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
|
@ -69,9 +76,13 @@ func TestCheckSign(t *testing.T) {
|
||||||
Region: "spb",
|
Region: "spb",
|
||||||
Lifetime: 10 * time.Minute,
|
Lifetime: 10 * time.Minute,
|
||||||
SignTime: time.Now().UTC(),
|
SignTime: time.Now().UTC(),
|
||||||
|
Headers: map[string]string{
|
||||||
|
ContentTypeHdr: "text/plain",
|
||||||
|
AmzContentSHA256: UnsignedPayload,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := PresignRequest(awsCreds, reqData, presignData)
|
req, err := PresignRequest(ctx, awsCreds, reqData, presignData, zaptest.NewLogger(t))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
expBox := &accessbox.Box{
|
expBox := &accessbox.Box{
|
||||||
|
@ -83,12 +94,109 @@ func TestCheckSign(t *testing.T) {
|
||||||
mock := newTokensFrostfsMock()
|
mock := newTokensFrostfsMock()
|
||||||
mock.addBox(accessKeyAddr, expBox)
|
mock.addBox(accessKeyAddr, expBox)
|
||||||
|
|
||||||
|
c := &Center{
|
||||||
|
cli: mock,
|
||||||
|
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
||||||
|
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||||
|
settings: ¢erSettingsMock{},
|
||||||
|
}
|
||||||
|
box, err := c.Authenticate(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, expBox, box.AccessBox)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckSignV4a(t *testing.T) {
|
||||||
|
var accessKeyAddr oid.Address
|
||||||
|
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
||||||
|
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
|
||||||
|
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secretKey}
|
||||||
|
|
||||||
|
reqData := RequestData{
|
||||||
|
Method: "GET",
|
||||||
|
Endpoint: "http://localhost:8084",
|
||||||
|
Bucket: "my-bucket",
|
||||||
|
Object: "@obj/name",
|
||||||
|
}
|
||||||
|
presignData := PresignData{
|
||||||
|
Service: "s3",
|
||||||
|
Region: "spb",
|
||||||
|
Lifetime: 10 * time.Minute,
|
||||||
|
SignTime: time.Now().UTC(),
|
||||||
|
Headers: map[string]string{
|
||||||
|
ContentTypeHdr: "text/plain",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := PresignRequestV4a(awsCreds, reqData, presignData, zaptest.NewLogger(t))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req.Header.Set(ContentTypeHdr, "text/plain")
|
||||||
|
|
||||||
|
expBox := &accessbox.Box{
|
||||||
|
Gate: &accessbox.GateData{
|
||||||
|
SecretKey: secretKey,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
mock := newTokensFrostfsMock()
|
||||||
|
mock.addBox(accessKeyAddr, expBox)
|
||||||
|
|
||||||
c := &Center{
|
c := &Center{
|
||||||
cli: mock,
|
cli: mock,
|
||||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
regV4a: NewRegexpMatcher(authorizationFieldV4aRegexp),
|
||||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||||
}
|
}
|
||||||
box, err := c.Authenticate(req)
|
box, err := c.Authenticate(req)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, expBox, box.AccessBox)
|
require.EqualValues(t, expBox, box.AccessBox)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPresignRequestV4a(t *testing.T) {
|
||||||
|
var accessKeyAddr oid.Address
|
||||||
|
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
||||||
|
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
|
||||||
|
|
||||||
|
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
|
||||||
|
options.DisableURIPathEscaping = true
|
||||||
|
options.LogSigning = true
|
||||||
|
options.Logger = zaptest.NewLogger(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
credAdapter := v4a.SymmetricCredentialAdaptor{
|
||||||
|
SymmetricProvider: credentialsv2.NewStaticCredentialsProvider(accessKeyID, secretKey, ""),
|
||||||
|
}
|
||||||
|
|
||||||
|
creds, err := credAdapter.RetrievePrivateKey(context.TODO())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
signingTime := time.Now()
|
||||||
|
service := "s3"
|
||||||
|
regionSet := []string{"spb"}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", "http://localhost:8084/bucket/object", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set(AmzExpires, "600")
|
||||||
|
|
||||||
|
presignedURL, hdr, err := signer.PresignHTTP(req.Context(), creds, req, "", service, regionSet, signingTime)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fmt.Println(presignedURL)
|
||||||
|
fmt.Println(hdr)
|
||||||
|
|
||||||
|
signature := req.URL.Query().Get(AmzSignature)
|
||||||
|
|
||||||
|
r, err := http.NewRequest("GET", presignedURL, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
query := r.URL.Query()
|
||||||
|
query.Del(AmzSignature)
|
||||||
|
r.URL.RawQuery = query.Encode()
|
||||||
|
|
||||||
|
err = signer.VerifyPresigned(creds, r, "", service, regionSet, signingTime, signature)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
37
api/auth/signer/smithy/encoding/httpbinding/path_replace.go
Normal file
37
api/auth/signer/smithy/encoding/httpbinding/path_replace.go
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
// This file is part of https://github.com/aws/smithy-go/blob/f0c6adfdec6e40bb8bb2920a40d016943b4ad762/encoding/httpbinding/path_replace.go
|
||||||
|
|
||||||
|
package httpbinding
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EscapePath escapes part of a URL path in Amazon style.
|
||||||
|
func EscapePath(path string, encodeSep bool) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for i := 0; i < len(path); i++ {
|
||||||
|
c := path[i]
|
||||||
|
if noEscape[c] || (c == '/' && !encodeSep) {
|
||||||
|
buf.WriteByte(c)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(&buf, "%%%02X", c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
var noEscape [256]bool
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
for i := 0; i < len(noEscape); i++ {
|
||||||
|
// AWS expects every character except these to be escaped
|
||||||
|
noEscape[i] = (i >= 'A' && i <= 'Z') ||
|
||||||
|
(i >= 'a' && i <= 'z') ||
|
||||||
|
(i >= '0' && i <= '9') ||
|
||||||
|
i == '-' ||
|
||||||
|
i == '.' ||
|
||||||
|
i == '_' ||
|
||||||
|
i == '~'
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,87 +0,0 @@
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// validator houses a set of rule needed for validation of a
|
|
||||||
// string value.
|
|
||||||
type rules []rule
|
|
||||||
|
|
||||||
// rule interface allows for more flexible rules and just simply
|
|
||||||
// checks whether or not a value adheres to that rule.
|
|
||||||
type rule interface {
|
|
||||||
IsValid(value string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid will iterate through all rules and see if any rules
|
|
||||||
// apply to the value and supports nested rules.
|
|
||||||
func (r rules) IsValid(value string) bool {
|
|
||||||
for _, rule := range r {
|
|
||||||
if rule.IsValid(value) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// mapRule generic rule for maps.
|
|
||||||
type mapRule map[string]struct{}
|
|
||||||
|
|
||||||
// IsValid for the map rule satisfies whether it exists in the map.
|
|
||||||
func (m mapRule) IsValid(value string) bool {
|
|
||||||
_, ok := m[value]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// whitelist is a generic rule for whitelisting.
|
|
||||||
type whitelist struct {
|
|
||||||
rule
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid for whitelist checks if the value is within the whitelist.
|
|
||||||
func (w whitelist) IsValid(value string) bool {
|
|
||||||
return w.rule.IsValid(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// blacklist is a generic rule for blacklisting.
|
|
||||||
type blacklist struct {
|
|
||||||
rule
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid for whitelist checks if the value is within the whitelist.
|
|
||||||
func (b blacklist) IsValid(value string) bool {
|
|
||||||
return !b.rule.IsValid(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
type patterns []string
|
|
||||||
|
|
||||||
// IsValid for patterns checks each pattern and returns if a match has
|
|
||||||
// been found.
|
|
||||||
func (p patterns) IsValid(value string) bool {
|
|
||||||
for _, pattern := range p {
|
|
||||||
if HasPrefixFold(value, pattern) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
|
|
||||||
// under Unicode case-folding.
|
|
||||||
func HasPrefixFold(s, prefix string) bool {
|
|
||||||
return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
// inclusiveRules rules allow for rules to depend on one another.
|
|
||||||
type inclusiveRules []rule
|
|
||||||
|
|
||||||
// IsValid will return true if all rules are true.
|
|
||||||
func (r inclusiveRules) IsValid(value string) bool {
|
|
||||||
for _, rule := range r {
|
|
||||||
if !rule.IsValid(value) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
|
@ -1,7 +0,0 @@
|
||||||
package v4
|
|
||||||
|
|
||||||
// WithUnsignedPayload will enable and set the UnsignedPayload field to
|
|
||||||
// true of the signer.
|
|
||||||
func WithUnsignedPayload(v4 *Signer) {
|
|
||||||
v4.UnsignedPayload = true
|
|
||||||
}
|
|
|
@ -1,14 +0,0 @@
|
||||||
//go:build go1.7
|
|
||||||
// +build go1.7
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
)
|
|
||||||
|
|
||||||
func requestContext(r *http.Request) aws.Context {
|
|
||||||
return r.Context()
|
|
||||||
}
|
|
|
@ -1,63 +0,0 @@
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/hex"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
)
|
|
||||||
|
|
||||||
type credentialValueProvider interface {
|
|
||||||
Get() (credentials.Value, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StreamSigner implements signing of event stream encoded payloads.
|
|
||||||
type StreamSigner struct {
|
|
||||||
region string
|
|
||||||
service string
|
|
||||||
|
|
||||||
credentials credentialValueProvider
|
|
||||||
|
|
||||||
prevSig []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages.
|
|
||||||
func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner {
|
|
||||||
return &StreamSigner{
|
|
||||||
region: region,
|
|
||||||
service: service,
|
|
||||||
credentials: credentials,
|
|
||||||
prevSig: seedSignature,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSignature takes an event stream encoded headers and payload and returns a signature.
|
|
||||||
func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) {
|
|
||||||
credValue, err := s.credentials.Get()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date)
|
|
||||||
|
|
||||||
keyPath := buildSigningScope(s.region, s.service, date)
|
|
||||||
|
|
||||||
stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date)
|
|
||||||
|
|
||||||
signature := hmacSHA256(sigKey, []byte(stringToSign))
|
|
||||||
s.prevSig = signature
|
|
||||||
|
|
||||||
return signature, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
"AWS4-HMAC-SHA256-PAYLOAD",
|
|
||||||
formatTime(date),
|
|
||||||
scope,
|
|
||||||
hex.EncodeToString(prevSig),
|
|
||||||
hex.EncodeToString(hashSHA256(headers)),
|
|
||||||
hex.EncodeToString(hashSHA256(payload)),
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
|
@ -1,25 +0,0 @@
|
||||||
//go:build go1.5
|
|
||||||
// +build go1.5
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getURIPath(u *url.URL) string {
|
|
||||||
var uri string
|
|
||||||
|
|
||||||
if len(u.Opaque) > 0 {
|
|
||||||
uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
|
|
||||||
} else {
|
|
||||||
uri = u.EscapedPath()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(uri) == 0 {
|
|
||||||
uri = "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
return uri
|
|
||||||
}
|
|
|
@ -1,858 +0,0 @@
|
||||||
// Package v4 implements signing for AWS V4 signer
|
|
||||||
//
|
|
||||||
// Provides request signing for request that need to be signed with
|
|
||||||
// AWS V4 Signatures.
|
|
||||||
//
|
|
||||||
// # Standalone Signer
|
|
||||||
//
|
|
||||||
// Generally using the signer outside of the SDK should not require any additional
|
|
||||||
// logic when using Go v1.5 or higher. The signer does this by taking advantage
|
|
||||||
// of the URL.EscapedPath method. If your request URI requires additional escaping
|
|
||||||
// you many need to use the URL.Opaque to define what the raw URI should be sent
|
|
||||||
// to the service as.
|
|
||||||
//
|
|
||||||
// The signer will first check the URL.Opaque field, and use its value if set.
|
|
||||||
// The signer does require the URL.Opaque field to be set in the form of:
|
|
||||||
//
|
|
||||||
// "//<hostname>/<path>"
|
|
||||||
//
|
|
||||||
// // e.g.
|
|
||||||
// "//example.com/some/path"
|
|
||||||
//
|
|
||||||
// The leading "//" and hostname are required or the URL.Opaque escaping will
|
|
||||||
// not work correctly.
|
|
||||||
//
|
|
||||||
// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
|
|
||||||
// method and using the returned value. If you're using Go v1.4 you must set
|
|
||||||
// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
|
|
||||||
// Go v1.5 the signer will fallback to URL.Path.
|
|
||||||
//
|
|
||||||
// AWS v4 signature validation requires that the canonical string's URI path
|
|
||||||
// element must be the URI escaped form of the HTTP request's path.
|
|
||||||
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
|
||||||
//
|
|
||||||
// The Go HTTP client will perform escaping automatically on the request. Some
|
|
||||||
// of these escaping may cause signature validation errors because the HTTP
|
|
||||||
// request differs from the URI path or query that the signature was generated.
|
|
||||||
// https://golang.org/pkg/net/url/#URL.EscapedPath
|
|
||||||
//
|
|
||||||
// Because of this, it is recommended that when using the signer outside of the
|
|
||||||
// SDK that explicitly escaping the request prior to being signed is preferable,
|
|
||||||
// and will help prevent signature validation errors. This can be done by setting
|
|
||||||
// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
|
|
||||||
// call URL.EscapedPath() if Opaque is not set.
|
|
||||||
//
|
|
||||||
// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
|
|
||||||
// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
|
|
||||||
// request URL. https://github.com/golang/go/issues/16847 points to a bug in
|
|
||||||
// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP
|
|
||||||
// message. URL.Opaque generally will force Go to make requests with absolute URL.
|
|
||||||
// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
|
|
||||||
// or url.EscapedPath will ignore the RawPath escaping.
|
|
||||||
//
|
|
||||||
// Test `TestStandaloneSign` provides a complete example of using the signer
|
|
||||||
// outside of the SDK and pre-escaping the URI path.
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
authorizationHeader = "Authorization"
|
|
||||||
authHeaderSignatureElem = "Signature="
|
|
||||||
signatureQueryKey = "X-Amz-Signature"
|
|
||||||
|
|
||||||
authHeaderPrefix = "AWS4-HMAC-SHA256"
|
|
||||||
timeFormat = "20060102T150405Z"
|
|
||||||
shortTimeFormat = "20060102"
|
|
||||||
awsV4Request = "aws4_request"
|
|
||||||
|
|
||||||
// emptyStringSHA256 is a SHA256 of an empty string.
|
|
||||||
emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
|
|
||||||
)
|
|
||||||
|
|
||||||
var ignoredPresignHeaders = rules{
|
|
||||||
blacklist{
|
|
||||||
mapRule{
|
|
||||||
authorizationHeader: struct{}{},
|
|
||||||
"User-Agent": struct{}{},
|
|
||||||
"X-Amzn-Trace-Id": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// drop User-Agent header to be compatible with aws sdk java v1.
|
|
||||||
var ignoredHeaders = rules{
|
|
||||||
blacklist{
|
|
||||||
mapRule{
|
|
||||||
authorizationHeader: struct{}{},
|
|
||||||
"X-Amzn-Trace-Id": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// requiredSignedHeaders is a whitelist for build canonical headers.
|
|
||||||
var requiredSignedHeaders = rules{
|
|
||||||
whitelist{
|
|
||||||
mapRule{
|
|
||||||
"Cache-Control": struct{}{},
|
|
||||||
"Content-Disposition": struct{}{},
|
|
||||||
"Content-Encoding": struct{}{},
|
|
||||||
"Content-Language": struct{}{},
|
|
||||||
"Content-Md5": struct{}{},
|
|
||||||
"Content-Type": struct{}{},
|
|
||||||
"Expires": struct{}{},
|
|
||||||
"If-Match": struct{}{},
|
|
||||||
"If-Modified-Since": struct{}{},
|
|
||||||
"If-None-Match": struct{}{},
|
|
||||||
"If-Unmodified-Since": struct{}{},
|
|
||||||
"Range": struct{}{},
|
|
||||||
"X-Amz-Acl": struct{}{},
|
|
||||||
"X-Amz-Copy-Source": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Match": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Range": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
|
||||||
"X-Amz-Grant-Full-control": struct{}{},
|
|
||||||
"X-Amz-Grant-Read": struct{}{},
|
|
||||||
"X-Amz-Grant-Read-Acp": struct{}{},
|
|
||||||
"X-Amz-Grant-Write": struct{}{},
|
|
||||||
"X-Amz-Grant-Write-Acp": struct{}{},
|
|
||||||
"X-Amz-Metadata-Directive": struct{}{},
|
|
||||||
"X-Amz-Mfa": struct{}{},
|
|
||||||
"X-Amz-Request-Payer": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
|
||||||
"X-Amz-Storage-Class": struct{}{},
|
|
||||||
"X-Amz-Tagging": struct{}{},
|
|
||||||
"X-Amz-Website-Redirect-Location": struct{}{},
|
|
||||||
"X-Amz-Content-Sha256": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
patterns{"X-Amz-Meta-"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// allowedHoisting is a whitelist for build query headers. The boolean value
|
|
||||||
// represents whether or not it is a pattern.
|
|
||||||
var allowedQueryHoisting = inclusiveRules{
|
|
||||||
blacklist{requiredSignedHeaders},
|
|
||||||
patterns{"X-Amz-"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Signer applies AWS v4 signing to given request. Use this to sign requests
|
|
||||||
// that need to be signed with AWS V4 Signatures.
|
|
||||||
type Signer struct {
|
|
||||||
// The authentication credentials the request will be signed against.
|
|
||||||
// This value must be set to sign requests.
|
|
||||||
Credentials *credentials.Credentials
|
|
||||||
|
|
||||||
// Sets the log level the signer should use when reporting information to
|
|
||||||
// the logger. If the logger is nil nothing will be logged. See
|
|
||||||
// aws.LogLevelType for more information on available logging levels
|
|
||||||
//
|
|
||||||
// By default nothing will be logged.
|
|
||||||
Debug aws.LogLevelType
|
|
||||||
|
|
||||||
// The logger loging information will be written to. If there the logger
|
|
||||||
// is nil, nothing will be logged.
|
|
||||||
Logger aws.Logger
|
|
||||||
|
|
||||||
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
|
|
||||||
// request header to the request's query string. This is most commonly used
|
|
||||||
// with pre-signed requests preventing headers from being added to the
|
|
||||||
// request's query string.
|
|
||||||
DisableHeaderHoisting bool
|
|
||||||
|
|
||||||
// Disables the automatic escaping of the URI path of the request for the
|
|
||||||
// siganture's canonical string's path. For services that do not need additional
|
|
||||||
// escaping then use this to disable the signer escaping the path.
|
|
||||||
//
|
|
||||||
// S3 is an example of a service that does not need additional escaping.
|
|
||||||
//
|
|
||||||
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
|
||||||
DisableURIPathEscaping bool
|
|
||||||
|
|
||||||
// Disables the automatical setting of the HTTP request's Body field with the
|
|
||||||
// io.ReadSeeker passed in to the signer. This is useful if you're using a
|
|
||||||
// custom wrapper around the body for the io.ReadSeeker and want to preserve
|
|
||||||
// the Body value on the Request.Body.
|
|
||||||
//
|
|
||||||
// This does run the risk of signing a request with a body that will not be
|
|
||||||
// sent in the request. Need to ensure that the underlying data of the Body
|
|
||||||
// values are the same.
|
|
||||||
DisableRequestBodyOverwrite bool
|
|
||||||
|
|
||||||
// currentTimeFn returns the time value which represents the current time.
|
|
||||||
// This value should only be used for testing. If it is nil the default
|
|
||||||
// time.Now will be used.
|
|
||||||
currentTimeFn func() time.Time
|
|
||||||
|
|
||||||
// UnsignedPayload will prevent signing of the payload. This will only
|
|
||||||
// work for services that have support for this.
|
|
||||||
UnsignedPayload bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSigner returns a Signer pointer configured with the credentials and optional
|
|
||||||
// option values provided. If not options are provided the Signer will use its
|
|
||||||
// default configuration.
|
|
||||||
func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
|
|
||||||
v4 := &Signer{
|
|
||||||
Credentials: credentials,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, option := range options {
|
|
||||||
option(v4)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v4
|
|
||||||
}
|
|
||||||
|
|
||||||
type signingCtx struct {
|
|
||||||
ServiceName string
|
|
||||||
Region string
|
|
||||||
Request *http.Request
|
|
||||||
Body io.ReadSeeker
|
|
||||||
Query url.Values
|
|
||||||
Time time.Time
|
|
||||||
ExpireTime time.Duration
|
|
||||||
SignedHeaderVals http.Header
|
|
||||||
|
|
||||||
DisableURIPathEscaping bool
|
|
||||||
|
|
||||||
credValues credentials.Value
|
|
||||||
isPresign bool
|
|
||||||
unsignedPayload bool
|
|
||||||
|
|
||||||
bodyDigest string
|
|
||||||
signedHeaders string
|
|
||||||
canonicalHeaders string
|
|
||||||
canonicalString string
|
|
||||||
credentialString string
|
|
||||||
stringToSign string
|
|
||||||
signature string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign signs AWS v4 requests with the provided body, service name, region the
|
|
||||||
// request is made to, and time the request is signed at. The signTime allows
|
|
||||||
// you to specify that a request is signed for the future, and cannot be
|
|
||||||
// used until then.
|
|
||||||
//
|
|
||||||
// Returns a list of HTTP headers that were included in the signature or an
|
|
||||||
// error if signing the request failed. Generally for signed requests this value
|
|
||||||
// is not needed as the full request context will be captured by the http.Request
|
|
||||||
// value. It is included for reference though.
|
|
||||||
//
|
|
||||||
// Sign will set the request's Body to be the `body` parameter passed in. If
|
|
||||||
// the body is not already an io.ReadCloser, it will be wrapped within one. If
|
|
||||||
// a `nil` body parameter passed to Sign, the request's Body field will be
|
|
||||||
// also set to nil. Its important to note that this functionality will not
|
|
||||||
// change the request's ContentLength of the request.
|
|
||||||
//
|
|
||||||
// Sign differs from Presign in that it will sign the request using HTTP
|
|
||||||
// header values. This type of signing is intended for http.Request values that
|
|
||||||
// will not be shared, or are shared in a way the header values on the request
|
|
||||||
// will not be lost.
|
|
||||||
//
|
|
||||||
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
|
|
||||||
// generated. To bypass the signer computing the hash you can set the
|
|
||||||
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
|
|
||||||
// only compute the hash if the request header value is empty.
|
|
||||||
func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
|
|
||||||
return v4.signWithBody(r, body, service, region, 0, false, signTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Presign signs AWS v4 requests with the provided body, service name, region
|
|
||||||
// the request is made to, and time the request is signed at. The signTime
|
|
||||||
// allows you to specify that a request is signed for the future, and cannot
|
|
||||||
// be used until then.
|
|
||||||
//
|
|
||||||
// Returns a list of HTTP headers that were included in the signature or an
|
|
||||||
// error if signing the request failed. For presigned requests these headers
|
|
||||||
// and their values must be included on the HTTP request when it is made. This
|
|
||||||
// is helpful to know what header values need to be shared with the party the
|
|
||||||
// presigned request will be distributed to.
|
|
||||||
//
|
|
||||||
// Presign differs from Sign in that it will sign the request using query string
|
|
||||||
// instead of header values. This allows you to share the Presigned Request's
|
|
||||||
// URL with third parties, or distribute it throughout your system with minimal
|
|
||||||
// dependencies.
|
|
||||||
//
|
|
||||||
// Presign also takes an exp value which is the duration the
|
|
||||||
// signed request will be valid after the signing time. This is allows you to
|
|
||||||
// set when the request will expire.
|
|
||||||
//
|
|
||||||
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
|
|
||||||
// generated. To bypass the signer computing the hash you can set the
|
|
||||||
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
|
|
||||||
// only compute the hash if the request header value is empty.
|
|
||||||
//
|
|
||||||
// Presigning a S3 request will not compute the body's SHA256 hash by default.
|
|
||||||
// This is done due to the general use case for S3 presigned URLs is to share
|
|
||||||
// PUT/GET capabilities. If you would like to include the body's SHA256 in the
|
|
||||||
// presigned request's signature you can set the "X-Amz-Content-Sha256"
|
|
||||||
// HTTP header and that will be included in the request's signature.
|
|
||||||
func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
|
|
||||||
return v4.signWithBody(r, body, service, region, exp, true, signTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) {
|
|
||||||
currentTimeFn := v4.currentTimeFn
|
|
||||||
if currentTimeFn == nil {
|
|
||||||
currentTimeFn = time.Now
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := &signingCtx{
|
|
||||||
Request: r,
|
|
||||||
Body: body,
|
|
||||||
Query: r.URL.Query(),
|
|
||||||
Time: signTime,
|
|
||||||
ExpireTime: exp,
|
|
||||||
isPresign: isPresign,
|
|
||||||
ServiceName: service,
|
|
||||||
Region: region,
|
|
||||||
DisableURIPathEscaping: v4.DisableURIPathEscaping,
|
|
||||||
unsignedPayload: v4.UnsignedPayload,
|
|
||||||
}
|
|
||||||
|
|
||||||
for key := range ctx.Query {
|
|
||||||
sort.Strings(ctx.Query[key])
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.isRequestSigned() {
|
|
||||||
ctx.Time = currentTimeFn()
|
|
||||||
ctx.handlePresignRemoval()
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r))
|
|
||||||
if err != nil {
|
|
||||||
return http.Header{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.sanitizeHostForHeader()
|
|
||||||
ctx.assignAmzQueryValues()
|
|
||||||
if err := ctx.build(v4.DisableHeaderHoisting); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the request is not presigned the body should be attached to it. This
|
|
||||||
// prevents the confusion of wanting to send a signed request without
|
|
||||||
// the body the request was signed for attached.
|
|
||||||
if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
|
|
||||||
var reader io.ReadCloser
|
|
||||||
if body != nil {
|
|
||||||
var ok bool
|
|
||||||
if reader, ok = body.(io.ReadCloser); !ok {
|
|
||||||
reader = io.NopCloser(body)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.Body = reader
|
|
||||||
}
|
|
||||||
|
|
||||||
if v4.Debug.Matches(aws.LogDebugWithSigning) {
|
|
||||||
v4.logSigningInfo(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctx.SignedHeaderVals, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) sanitizeHostForHeader() {
|
|
||||||
request.SanitizeHostForHeader(ctx.Request)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) handlePresignRemoval() {
|
|
||||||
if !ctx.isPresign {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// The credentials have expired for this request. The current signing
|
|
||||||
// is invalid, and needs to be request because the request will fail.
|
|
||||||
ctx.removePresign()
|
|
||||||
|
|
||||||
// Update the request's query string to ensure the values stays in
|
|
||||||
// sync in the case retrieving the new credentials fails.
|
|
||||||
ctx.Request.URL.RawQuery = ctx.Query.Encode()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) assignAmzQueryValues() {
|
|
||||||
if ctx.isPresign {
|
|
||||||
ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
|
|
||||||
if ctx.credValues.SessionToken != "" {
|
|
||||||
ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
|
|
||||||
} else {
|
|
||||||
ctx.Query.Del("X-Amz-Security-Token")
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.credValues.SessionToken != "" {
|
|
||||||
ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignRequestHandler is a named request handler the SDK will use to sign
|
|
||||||
// service client request with using the V4 signature.
|
|
||||||
var SignRequestHandler = request.NamedHandler{
|
|
||||||
Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignSDKRequest signs an AWS request with the V4 signature. This
|
|
||||||
// request handler should only be used with the SDK's built in service client's
|
|
||||||
// API operation requests.
|
|
||||||
//
|
|
||||||
// This function should not be used on its on its own, but in conjunction with
|
|
||||||
// an AWS service client's API operation call. To sign a standalone request
|
|
||||||
// not created by a service client's API operation method use the "Sign" or
|
|
||||||
// "Presign" functions of the "Signer" type.
|
|
||||||
//
|
|
||||||
// If the credentials of the request's config are set to
|
|
||||||
// credentials.AnonymousCredentials the request will not be signed.
|
|
||||||
func SignSDKRequest(req *request.Request) {
|
|
||||||
SignSDKRequestWithCurrentTime(req, time.Now)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildNamedHandler will build a generic handler for signing.
|
|
||||||
func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler {
|
|
||||||
return request.NamedHandler{
|
|
||||||
Name: name,
|
|
||||||
Fn: func(req *request.Request) {
|
|
||||||
SignSDKRequestWithCurrentTime(req, time.Now, opts...)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignSDKRequestWithCurrentTime will sign the SDK's request using the time
|
|
||||||
// function passed in. Behaves the same as SignSDKRequest with the exception
|
|
||||||
// the request is signed with the value returned by the current time function.
|
|
||||||
func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
|
|
||||||
// If the request does not need to be signed ignore the signing of the
|
|
||||||
// request if the AnonymousCredentials object is used.
|
|
||||||
if req.Config.Credentials == credentials.AnonymousCredentials {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
region := req.ClientInfo.SigningRegion
|
|
||||||
if region == "" {
|
|
||||||
region = aws.StringValue(req.Config.Region)
|
|
||||||
}
|
|
||||||
|
|
||||||
name := req.ClientInfo.SigningName
|
|
||||||
if name == "" {
|
|
||||||
name = req.ClientInfo.ServiceName
|
|
||||||
}
|
|
||||||
|
|
||||||
v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
|
|
||||||
v4.Debug = req.Config.LogLevel.Value()
|
|
||||||
v4.Logger = req.Config.Logger
|
|
||||||
v4.DisableHeaderHoisting = req.NotHoist
|
|
||||||
v4.currentTimeFn = curTimeFn
|
|
||||||
if name == "s3" {
|
|
||||||
// S3 service should not have any escaping applied
|
|
||||||
v4.DisableURIPathEscaping = true
|
|
||||||
}
|
|
||||||
// Prevents setting the HTTPRequest's Body. Since the Body could be
|
|
||||||
// wrapped in a custom io.Closer that we do not want to be stompped
|
|
||||||
// on top of by the signer.
|
|
||||||
v4.DisableRequestBodyOverwrite = true
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(v4)
|
|
||||||
}
|
|
||||||
|
|
||||||
curTime := curTimeFn()
|
|
||||||
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
|
|
||||||
name, region, req.ExpireTime, req.ExpireTime > 0, curTime,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
req.Error = err
|
|
||||||
req.SignedHeaderVals = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
req.SignedHeaderVals = signedHeaders
|
|
||||||
req.LastSignedAt = curTime
|
|
||||||
}
|
|
||||||
|
|
||||||
const logSignInfoMsg = `DEBUG: Request Signature:
|
|
||||||
---[ CANONICAL STRING ]-----------------------------
|
|
||||||
%s
|
|
||||||
---[ STRING TO SIGN ]--------------------------------
|
|
||||||
%s%s
|
|
||||||
-----------------------------------------------------`
|
|
||||||
const logSignedURLMsg = `
|
|
||||||
---[ SIGNED URL ]------------------------------------
|
|
||||||
%s`
|
|
||||||
|
|
||||||
func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
|
|
||||||
signedURLMsg := ""
|
|
||||||
if ctx.isPresign {
|
|
||||||
signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
|
|
||||||
}
|
|
||||||
msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
|
|
||||||
v4.Logger.Log(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) build(disableHeaderHoisting bool) error {
|
|
||||||
ctx.buildTime() // no depends
|
|
||||||
ctx.buildCredentialString() // no depends
|
|
||||||
|
|
||||||
if err := ctx.buildBodyDigest(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
unsignedHeaders := ctx.Request.Header
|
|
||||||
if ctx.isPresign {
|
|
||||||
if !disableHeaderHoisting {
|
|
||||||
var urlValues url.Values
|
|
||||||
urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
|
|
||||||
for k := range urlValues {
|
|
||||||
ctx.Query[k] = urlValues[k]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.isPresign {
|
|
||||||
ctx.buildCanonicalHeaders(ignoredPresignHeaders, unsignedHeaders)
|
|
||||||
} else {
|
|
||||||
ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
|
|
||||||
}
|
|
||||||
ctx.buildCanonicalString() // depends on canon headers / signed headers
|
|
||||||
ctx.buildStringToSign() // depends on canon string
|
|
||||||
ctx.buildSignature() // depends on string to sign
|
|
||||||
|
|
||||||
if ctx.isPresign {
|
|
||||||
ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature
|
|
||||||
} else {
|
|
||||||
parts := []string{
|
|
||||||
authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
|
|
||||||
"SignedHeaders=" + ctx.signedHeaders,
|
|
||||||
authHeaderSignatureElem + ctx.signature,
|
|
||||||
}
|
|
||||||
ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSignedRequestSignature attempts to extract the signature of the request.
|
|
||||||
// Returning an error if the request is unsigned, or unable to extract the
|
|
||||||
// signature.
|
|
||||||
func GetSignedRequestSignature(r *http.Request) ([]byte, error) {
|
|
||||||
if auth := r.Header.Get(authorizationHeader); len(auth) != 0 {
|
|
||||||
ps := strings.Split(auth, ", ")
|
|
||||||
for _, p := range ps {
|
|
||||||
if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 {
|
|
||||||
sig := p[len(authHeaderSignatureElem):]
|
|
||||||
if len(sig) == 0 {
|
|
||||||
return nil, fmt.Errorf("invalid request signature authorization header")
|
|
||||||
}
|
|
||||||
return hex.DecodeString(sig)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 {
|
|
||||||
return hex.DecodeString(sig)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("request not signed")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) buildTime() {
|
|
||||||
if ctx.isPresign {
|
|
||||||
duration := int64(ctx.ExpireTime / time.Second)
|
|
||||||
ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time))
|
|
||||||
ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
|
|
||||||
} else {
|
|
||||||
ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) buildCredentialString() {
|
|
||||||
ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time)
|
|
||||||
|
|
||||||
if ctx.isPresign {
|
|
||||||
ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
|
|
||||||
query := url.Values{}
|
|
||||||
unsignedHeaders := http.Header{}
|
|
||||||
for k, h := range header {
|
|
||||||
if r.IsValid(k) {
|
|
||||||
query[k] = h
|
|
||||||
} else {
|
|
||||||
unsignedHeaders[k] = h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return query, unsignedHeaders
|
|
||||||
}
|
|
||||||
func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
|
|
||||||
var headers []string
|
|
||||||
headers = append(headers, "host")
|
|
||||||
for k, v := range header {
|
|
||||||
if !r.IsValid(k) {
|
|
||||||
continue // ignored header
|
|
||||||
}
|
|
||||||
if ctx.SignedHeaderVals == nil {
|
|
||||||
ctx.SignedHeaderVals = make(http.Header)
|
|
||||||
}
|
|
||||||
|
|
||||||
lowerCaseKey := strings.ToLower(k)
|
|
||||||
if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
|
|
||||||
// include additional values
|
|
||||||
ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
headers = append(headers, lowerCaseKey)
|
|
||||||
ctx.SignedHeaderVals[lowerCaseKey] = v
|
|
||||||
}
|
|
||||||
sort.Strings(headers)
|
|
||||||
|
|
||||||
ctx.signedHeaders = strings.Join(headers, ";")
|
|
||||||
|
|
||||||
if ctx.isPresign {
|
|
||||||
ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
|
|
||||||
}
|
|
||||||
|
|
||||||
headerValues := make([]string, len(headers))
|
|
||||||
for i, k := range headers {
|
|
||||||
if k == "host" {
|
|
||||||
if ctx.Request.Host != "" {
|
|
||||||
headerValues[i] = "host:" + ctx.Request.Host
|
|
||||||
} else {
|
|
||||||
headerValues[i] = "host:" + ctx.Request.URL.Host
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
headerValues[i] = k + ":" +
|
|
||||||
strings.Join(ctx.SignedHeaderVals[k], ",")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stripExcessSpaces(headerValues)
|
|
||||||
ctx.canonicalHeaders = strings.Join(headerValues, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) buildCanonicalString() {
|
|
||||||
ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
|
|
||||||
|
|
||||||
uri := getURIPath(ctx.Request.URL)
|
|
||||||
|
|
||||||
if !ctx.DisableURIPathEscaping {
|
|
||||||
uri = rest.EscapePath(uri, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.canonicalString = strings.Join([]string{
|
|
||||||
ctx.Request.Method,
|
|
||||||
uri,
|
|
||||||
ctx.Request.URL.RawQuery,
|
|
||||||
ctx.canonicalHeaders + "\n",
|
|
||||||
ctx.signedHeaders,
|
|
||||||
ctx.bodyDigest,
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) buildStringToSign() {
|
|
||||||
ctx.stringToSign = strings.Join([]string{
|
|
||||||
authHeaderPrefix,
|
|
||||||
formatTime(ctx.Time),
|
|
||||||
ctx.credentialString,
|
|
||||||
hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))),
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) buildSignature() {
|
|
||||||
creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time)
|
|
||||||
signature := hmacSHA256(creds, []byte(ctx.stringToSign))
|
|
||||||
ctx.signature = hex.EncodeToString(signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) buildBodyDigest() error {
|
|
||||||
hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
|
|
||||||
if hash == "" {
|
|
||||||
includeSHA256Header := ctx.unsignedPayload ||
|
|
||||||
ctx.ServiceName == "s3" ||
|
|
||||||
ctx.ServiceName == "glacier"
|
|
||||||
|
|
||||||
s3Presign := ctx.isPresign && ctx.ServiceName == "s3"
|
|
||||||
|
|
||||||
if ctx.unsignedPayload || s3Presign {
|
|
||||||
hash = "UNSIGNED-PAYLOAD"
|
|
||||||
includeSHA256Header = !s3Presign
|
|
||||||
} else if ctx.Body == nil {
|
|
||||||
hash = emptyStringSHA256
|
|
||||||
} else {
|
|
||||||
if !aws.IsReaderSeekable(ctx.Body) {
|
|
||||||
return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
|
|
||||||
}
|
|
||||||
hashBytes, err := makeSha256Reader(ctx.Body)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
hash = hex.EncodeToString(hashBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
if includeSHA256Header {
|
|
||||||
ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ctx.bodyDigest = hash
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// isRequestSigned returns if the request is currently signed or presigned.
|
|
||||||
func (ctx *signingCtx) isRequestSigned() bool {
|
|
||||||
if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if ctx.Request.Header.Get("Authorization") != "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// unsign removes signing flags for both signed and presigned requests.
|
|
||||||
func (ctx *signingCtx) removePresign() {
|
|
||||||
ctx.Query.Del("X-Amz-Algorithm")
|
|
||||||
ctx.Query.Del("X-Amz-Signature")
|
|
||||||
ctx.Query.Del("X-Amz-Security-Token")
|
|
||||||
ctx.Query.Del("X-Amz-Date")
|
|
||||||
ctx.Query.Del("X-Amz-Expires")
|
|
||||||
ctx.Query.Del("X-Amz-Credential")
|
|
||||||
ctx.Query.Del("X-Amz-SignedHeaders")
|
|
||||||
}
|
|
||||||
|
|
||||||
func hmacSHA256(key []byte, data []byte) []byte {
|
|
||||||
hash := hmac.New(sha256.New, key)
|
|
||||||
hash.Write(data)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func hashSHA256(data []byte) []byte {
|
|
||||||
hash := sha256.New()
|
|
||||||
hash.Write(data)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) {
|
|
||||||
hash := sha256.New()
|
|
||||||
start, err := reader.Seek(0, io.SeekCurrent)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
// ensure error is return if unable to seek back to start of payload.
|
|
||||||
_, err = reader.Seek(start, io.SeekStart)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
|
|
||||||
// smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
|
|
||||||
size, err := aws.SeekerLen(reader)
|
|
||||||
if err != nil {
|
|
||||||
_, _ = io.Copy(hash, reader)
|
|
||||||
} else {
|
|
||||||
_, _ = io.CopyN(hash, reader, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
return hash.Sum(nil), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const doubleSpace = " "
|
|
||||||
|
|
||||||
// stripExcessSpaces will rewrite the passed in slice's string values to not
|
|
||||||
// contain multiple side-by-side spaces.
|
|
||||||
//
|
|
||||||
//nolint:revive
|
|
||||||
func stripExcessSpaces(vals []string) {
|
|
||||||
var j, k, l, m, spaces int
|
|
||||||
for i, str := range vals {
|
|
||||||
// Trim trailing spaces
|
|
||||||
for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trim leading spaces
|
|
||||||
for k = 0; k < j && str[k] == ' '; k++ {
|
|
||||||
}
|
|
||||||
str = str[k : j+1]
|
|
||||||
|
|
||||||
// Strip multiple spaces.
|
|
||||||
j = strings.Index(str, doubleSpace)
|
|
||||||
if j < 0 {
|
|
||||||
vals[i] = str
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := []byte(str)
|
|
||||||
for k, m, l = j, j, len(buf); k < l; k++ {
|
|
||||||
if buf[k] == ' ' {
|
|
||||||
if spaces == 0 {
|
|
||||||
// First space.
|
|
||||||
buf[m] = buf[k]
|
|
||||||
m++
|
|
||||||
}
|
|
||||||
spaces++
|
|
||||||
} else {
|
|
||||||
// End of multiple spaces.
|
|
||||||
spaces = 0
|
|
||||||
buf[m] = buf[k]
|
|
||||||
m++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vals[i] = string(buf[:m])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildSigningScope(region, service string, dt time.Time) string {
|
|
||||||
return strings.Join([]string{
|
|
||||||
formatShortTime(dt),
|
|
||||||
region,
|
|
||||||
service,
|
|
||||||
awsV4Request,
|
|
||||||
}, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte {
|
|
||||||
hmacDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt)))
|
|
||||||
hmacRegion := hmacSHA256(hmacDate, []byte(region))
|
|
||||||
hmacService := hmacSHA256(hmacRegion, []byte(service))
|
|
||||||
signingKey := hmacSHA256(hmacService, []byte(awsV4Request))
|
|
||||||
return signingKey
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatShortTime(dt time.Time) string {
|
|
||||||
return dt.UTC().Format(shortTimeFormat)
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatTime(dt time.Time) string {
|
|
||||||
return dt.UTC().Format(timeFormat)
|
|
||||||
}
|
|
144
api/auth/signer/v4asdk2/credentials.go
Normal file
144
api/auth/signer/v4asdk2/credentials.go
Normal file
|
@ -0,0 +1,144 @@
|
||||||
|
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/credentials.go
|
||||||
|
// with changes:
|
||||||
|
// * use `time.Now()` instead of `sdk.NowTime()`
|
||||||
|
|
||||||
|
package v4a
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Credentials is Context, ECDSA, and Optional Session Token that can be used
|
||||||
|
// to sign requests using SigV4a
|
||||||
|
type Credentials struct {
|
||||||
|
Context string
|
||||||
|
PrivateKey *ecdsa.PrivateKey
|
||||||
|
SessionToken string
|
||||||
|
|
||||||
|
// Time the credentials will expire.
|
||||||
|
CanExpire bool
|
||||||
|
Expires time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expired returns if the credentials have expired.
|
||||||
|
func (v Credentials) Expired() bool {
|
||||||
|
if v.CanExpire {
|
||||||
|
return !v.Expires.After(time.Now())
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasKeys returns if the credentials keys are set.
|
||||||
|
func (v Credentials) HasKeys() bool {
|
||||||
|
return len(v.Context) > 0 && v.PrivateKey != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SymmetricCredentialAdaptor wraps a SigV4 AccessKey/SecretKey provider and adapts the credentials
|
||||||
|
// to a ECDSA PrivateKey for signing with SiV4a
|
||||||
|
type SymmetricCredentialAdaptor struct {
|
||||||
|
SymmetricProvider aws.CredentialsProvider
|
||||||
|
|
||||||
|
asymmetric atomic.Value
|
||||||
|
m sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve retrieves symmetric credentials from the underlying provider.
|
||||||
|
func (s *SymmetricCredentialAdaptor) Retrieve(ctx context.Context) (aws.Credentials, error) {
|
||||||
|
symCreds, err := s.retrieveFromSymmetricProvider(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return aws.Credentials{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if asymCreds := s.getCreds(); asymCreds == nil {
|
||||||
|
return symCreds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.m.Lock()
|
||||||
|
defer s.m.Unlock()
|
||||||
|
|
||||||
|
asymCreds := s.getCreds()
|
||||||
|
if asymCreds == nil {
|
||||||
|
return symCreds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the context does not match the access key id clear it
|
||||||
|
if asymCreds.Context != symCreds.AccessKeyID {
|
||||||
|
s.asymmetric.Store((*Credentials)(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
return symCreds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetrievePrivateKey returns credentials suitable for SigV4a signing
|
||||||
|
func (s *SymmetricCredentialAdaptor) RetrievePrivateKey(ctx context.Context) (Credentials, error) {
|
||||||
|
if asymCreds := s.getCreds(); asymCreds != nil {
|
||||||
|
return *asymCreds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.m.Lock()
|
||||||
|
defer s.m.Unlock()
|
||||||
|
|
||||||
|
if asymCreds := s.getCreds(); asymCreds != nil {
|
||||||
|
return *asymCreds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
symmetricCreds, err := s.retrieveFromSymmetricProvider(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return Credentials{}, fmt.Errorf("failed to retrieve symmetric credentials: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
privateKey, err := deriveKeyFromAccessKeyPair(symmetricCreds.AccessKeyID, symmetricCreds.SecretAccessKey)
|
||||||
|
if err != nil {
|
||||||
|
return Credentials{}, fmt.Errorf("failed to derive assymetric key from credentials")
|
||||||
|
}
|
||||||
|
|
||||||
|
creds := Credentials{
|
||||||
|
Context: symmetricCreds.AccessKeyID,
|
||||||
|
PrivateKey: privateKey,
|
||||||
|
SessionToken: symmetricCreds.SessionToken,
|
||||||
|
CanExpire: symmetricCreds.CanExpire,
|
||||||
|
Expires: symmetricCreds.Expires,
|
||||||
|
}
|
||||||
|
|
||||||
|
s.asymmetric.Store(&creds)
|
||||||
|
|
||||||
|
return creds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SymmetricCredentialAdaptor) getCreds() *Credentials {
|
||||||
|
v := s.asymmetric.Load()
|
||||||
|
|
||||||
|
if v == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
c := v.(*Credentials)
|
||||||
|
if c != nil && c.HasKeys() && !c.Expired() {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SymmetricCredentialAdaptor) retrieveFromSymmetricProvider(ctx context.Context) (aws.Credentials, error) {
|
||||||
|
credentials, err := s.SymmetricProvider.Retrieve(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return aws.Credentials{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return credentials, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CredentialsProvider is the interface for a provider to retrieve credentials
|
||||||
|
// to sign requests with.
|
||||||
|
type CredentialsProvider interface {
|
||||||
|
RetrievePrivateKey(context.Context) (Credentials, error)
|
||||||
|
}
|
79
api/auth/signer/v4asdk2/credentials_test.go
Normal file
79
api/auth/signer/v4asdk2/credentials_test.go
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/credentials_test.go
|
||||||
|
|
||||||
|
package v4a
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
|
)
|
||||||
|
|
||||||
|
type rotatingCredsProvider struct {
|
||||||
|
count int
|
||||||
|
fail chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *rotatingCredsProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {
|
||||||
|
select {
|
||||||
|
case <-r.fail:
|
||||||
|
return aws.Credentials{}, fmt.Errorf("rotatingCredsProvider error")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
credentials := aws.Credentials{
|
||||||
|
AccessKeyID: fmt.Sprintf("ACCESS_KEY_ID_%d", r.count),
|
||||||
|
SecretAccessKey: fmt.Sprintf("SECRET_ACCESS_KEY_%d", r.count),
|
||||||
|
SessionToken: fmt.Sprintf("SESSION_TOKEN_%d", r.count),
|
||||||
|
}
|
||||||
|
return credentials, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSymmetricCredentialAdaptor(t *testing.T) {
|
||||||
|
provider := &rotatingCredsProvider{
|
||||||
|
count: 0,
|
||||||
|
fail: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
adaptor := &SymmetricCredentialAdaptor{SymmetricProvider: provider}
|
||||||
|
|
||||||
|
if symCreds, err := adaptor.Retrieve(context.Background()); err != nil {
|
||||||
|
t.Fatalf("expect no error, got %v", err)
|
||||||
|
} else if !symCreds.HasKeys() {
|
||||||
|
t.Fatalf("expect symmetric credentials to have keys")
|
||||||
|
}
|
||||||
|
|
||||||
|
if load := adaptor.asymmetric.Load(); load != nil {
|
||||||
|
t.Errorf("expect asymmetric credentials to be nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
if asymCreds, err := adaptor.RetrievePrivateKey(context.Background()); err != nil {
|
||||||
|
t.Fatalf("expect no error, got %v", err)
|
||||||
|
} else if !asymCreds.HasKeys() {
|
||||||
|
t.Fatalf("expect asymmetric credentials to have keys")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := adaptor.Retrieve(context.Background()); err != nil {
|
||||||
|
t.Fatalf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if load := adaptor.asymmetric.Load(); load.(*Credentials) == nil {
|
||||||
|
t.Errorf("expect asymmetric credentials to be not nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
provider.count++
|
||||||
|
|
||||||
|
if _, err := adaptor.Retrieve(context.Background()); err != nil {
|
||||||
|
t.Fatalf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if load := adaptor.asymmetric.Load(); load.(*Credentials) != nil {
|
||||||
|
t.Errorf("expect asymmetric credentials to be nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
close(provider.fail) // All requests to the original provider will now fail from this point-on.
|
||||||
|
_, err := adaptor.Retrieve(context.Background())
|
||||||
|
if err == nil {
|
||||||
|
t.Error("expect error, got nil")
|
||||||
|
}
|
||||||
|
}
|
32
api/auth/signer/v4asdk2/internal/crypto/compare.go
Normal file
32
api/auth/signer/v4asdk2/internal/crypto/compare.go
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/compare.go
|
||||||
|
|
||||||
|
package crypto
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison
|
||||||
|
// if the two byte slices assuming they represent a big-endian number.
|
||||||
|
//
|
||||||
|
// error if len(x) != len(y)
|
||||||
|
// -1 if x < y
|
||||||
|
// 0 if x == y
|
||||||
|
// +1 if x > y
|
||||||
|
func ConstantTimeByteCompare(x, y []byte) (int, error) {
|
||||||
|
if len(x) != len(y) {
|
||||||
|
return 0, fmt.Errorf("slice lengths do not match")
|
||||||
|
}
|
||||||
|
|
||||||
|
xLarger, yLarger := 0, 0
|
||||||
|
|
||||||
|
for i := 0; i < len(x); i++ {
|
||||||
|
xByte, yByte := int(x[i]), int(y[i])
|
||||||
|
|
||||||
|
x := ((yByte - xByte) >> 8) & 1
|
||||||
|
y := ((xByte - yByte) >> 8) & 1
|
||||||
|
|
||||||
|
xLarger |= x &^ yLarger
|
||||||
|
yLarger |= y &^ xLarger
|
||||||
|
}
|
||||||
|
|
||||||
|
return xLarger - yLarger, nil
|
||||||
|
}
|
62
api/auth/signer/v4asdk2/internal/crypto/compare_test.go
Normal file
62
api/auth/signer/v4asdk2/internal/crypto/compare_test.go
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/compare_test.go
|
||||||
|
|
||||||
|
package crypto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"math/big"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConstantTimeByteCompare(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
x, y []byte
|
||||||
|
r int
|
||||||
|
expectErr bool
|
||||||
|
}{
|
||||||
|
{x: []byte{}, y: []byte{}, r: 0},
|
||||||
|
{x: []byte{40}, y: []byte{30}, r: 1},
|
||||||
|
{x: []byte{30}, y: []byte{40}, r: -1},
|
||||||
|
{x: []byte{60, 40, 30, 10, 20}, y: []byte{50, 30, 20, 0, 10}, r: 1},
|
||||||
|
{x: []byte{50, 30, 20, 0, 10}, y: []byte{60, 40, 30, 10, 20}, r: -1},
|
||||||
|
{x: nil, y: []byte{}, r: 0},
|
||||||
|
{x: []byte{}, y: nil, r: 0},
|
||||||
|
{x: []byte{}, y: []byte{10}, expectErr: true},
|
||||||
|
{x: []byte{10}, y: []byte{}, expectErr: true},
|
||||||
|
{x: []byte{10, 20}, y: []byte{10}, expectErr: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range cases {
|
||||||
|
compare, err := ConstantTimeByteCompare(tt.x, tt.y)
|
||||||
|
if (err != nil) != tt.expectErr {
|
||||||
|
t.Fatalf("expectErr=%v, got %v", tt.expectErr, err)
|
||||||
|
}
|
||||||
|
if e, a := tt.r, compare; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkConstantTimeCompare(b *testing.B) {
|
||||||
|
x, y := big.NewInt(1023), big.NewInt(1024)
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ConstantTimeByteCompare(x.Bytes(), y.Bytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCompare(b *testing.B) {
|
||||||
|
x, y := big.NewInt(1023).Bytes(), big.NewInt(1024).Bytes()
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
bytes.Compare(x, y)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustBigInt(s string) *big.Int {
|
||||||
|
b, ok := (&big.Int{}).SetString(s, 16)
|
||||||
|
if !ok {
|
||||||
|
panic("can't parse as big.Int")
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
115
api/auth/signer/v4asdk2/internal/crypto/ecc.go
Normal file
115
api/auth/signer/v4asdk2/internal/crypto/ecc.go
Normal file
|
@ -0,0 +1,115 @@
|
||||||
|
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/ecc.go
|
||||||
|
|
||||||
|
package crypto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
|
"crypto/hmac"
|
||||||
|
"encoding/asn1"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ecdsaSignature struct {
|
||||||
|
R, S *big.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ECDSAKey takes the given elliptic curve, and private key (d) byte slice
|
||||||
|
// and returns the private ECDSA key.
|
||||||
|
func ECDSAKey(curve elliptic.Curve, d []byte) *ecdsa.PrivateKey {
|
||||||
|
return ECDSAKeyFromPoint(curve, (&big.Int{}).SetBytes(d))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ECDSAKeyFromPoint takes the given elliptic curve and point and returns the
|
||||||
|
// private and public keypair
|
||||||
|
func ECDSAKeyFromPoint(curve elliptic.Curve, d *big.Int) *ecdsa.PrivateKey {
|
||||||
|
pX, pY := curve.ScalarBaseMult(d.Bytes())
|
||||||
|
|
||||||
|
privKey := &ecdsa.PrivateKey{
|
||||||
|
PublicKey: ecdsa.PublicKey{
|
||||||
|
Curve: curve,
|
||||||
|
X: pX,
|
||||||
|
Y: pY,
|
||||||
|
},
|
||||||
|
D: d,
|
||||||
|
}
|
||||||
|
|
||||||
|
return privKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// ECDSAPublicKey takes the provide curve and (x, y) coordinates and returns
|
||||||
|
// *ecdsa.PublicKey. Returns an error if the given points are not on the curve.
|
||||||
|
func ECDSAPublicKey(curve elliptic.Curve, x, y []byte) (*ecdsa.PublicKey, error) {
|
||||||
|
xPoint := (&big.Int{}).SetBytes(x)
|
||||||
|
yPoint := (&big.Int{}).SetBytes(y)
|
||||||
|
|
||||||
|
if !curve.IsOnCurve(xPoint, yPoint) {
|
||||||
|
return nil, fmt.Errorf("point(%v, %v) is not on the given curve", xPoint.String(), yPoint.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ecdsa.PublicKey{
|
||||||
|
Curve: curve,
|
||||||
|
X: xPoint,
|
||||||
|
Y: yPoint,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifySignature takes the provided public key, hash, and asn1 encoded signature and returns
|
||||||
|
// whether the given signature is valid.
|
||||||
|
func VerifySignature(key *ecdsa.PublicKey, hash []byte, signature []byte) (bool, error) {
|
||||||
|
var ecdsaSignature ecdsaSignature
|
||||||
|
|
||||||
|
_, err := asn1.Unmarshal(signature, &ecdsaSignature)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ecdsa.Verify(key, hash, ecdsaSignature.R, ecdsaSignature.S), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HMACKeyDerivation provides an implementation of a NIST-800-108 of a KDF (Key Derivation Function) in Counter Mode.
|
||||||
|
// For the purposes of this implantation HMAC is used as the PRF (Pseudorandom function), where the value of
|
||||||
|
// `r` is defined as a 4 byte counter.
|
||||||
|
func HMACKeyDerivation(hash func() hash.Hash, bitLen int, key []byte, label, context []byte) ([]byte, error) {
|
||||||
|
// verify that we won't overflow the counter
|
||||||
|
n := int64(math.Ceil((float64(bitLen) / 8) / float64(hash().Size())))
|
||||||
|
if n > 0x7FFFFFFF {
|
||||||
|
return nil, fmt.Errorf("unable to derive key of size %d using 32-bit counter", bitLen)
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify the requested bit length is not larger then the length encoding size
|
||||||
|
if int64(bitLen) > 0x7FFFFFFF {
|
||||||
|
return nil, fmt.Errorf("bitLen is greater than 32-bits")
|
||||||
|
}
|
||||||
|
|
||||||
|
fixedInput := bytes.NewBuffer(nil)
|
||||||
|
fixedInput.Write(label)
|
||||||
|
fixedInput.WriteByte(0x00)
|
||||||
|
fixedInput.Write(context)
|
||||||
|
if err := binary.Write(fixedInput, binary.BigEndian, int32(bitLen)); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to write bit length to fixed input string: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var output []byte
|
||||||
|
|
||||||
|
h := hmac.New(hash, key)
|
||||||
|
|
||||||
|
for i := int64(1); i <= n; i++ {
|
||||||
|
h.Reset()
|
||||||
|
if err := binary.Write(h, binary.BigEndian, int32(i)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_, err := h.Write(fixedInput.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
output = append(output, h.Sum(nil)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return output[:bitLen/8], nil
|
||||||
|
}
|
279
api/auth/signer/v4asdk2/internal/crypto/ecc_test.go
Normal file
279
api/auth/signer/v4asdk2/internal/crypto/ecc_test.go
Normal file
|
@ -0,0 +1,279 @@
|
||||||
|
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/ecc_test.go
|
||||||
|
|
||||||
|
package crypto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestECDSAPublicKeyDerivation_P256(t *testing.T) {
|
||||||
|
d := []byte{
|
||||||
|
0xc9, 0x80, 0x68, 0x98, 0xa0, 0x33, 0x49, 0x16, 0xc8, 0x60, 0x74, 0x88, 0x80, 0xa5, 0x41, 0xf0,
|
||||||
|
0x93, 0xb5, 0x79, 0xa9, 0xb1, 0xf3, 0x29, 0x34, 0xd8, 0x6c, 0x36, 0x3c, 0x39, 0x80, 0x03, 0x57,
|
||||||
|
}
|
||||||
|
|
||||||
|
x := []byte{
|
||||||
|
0xd0, 0x72, 0x0d, 0xc6, 0x91, 0xaa, 0x80, 0x09, 0x6b, 0xa3, 0x2f, 0xed, 0x1c, 0xb9, 0x7c, 0x2b,
|
||||||
|
0x62, 0x06, 0x90, 0xd0, 0x6d, 0xe0, 0x31, 0x7b, 0x86, 0x18, 0xd5, 0xce, 0x65, 0xeb, 0x72, 0x8f,
|
||||||
|
}
|
||||||
|
|
||||||
|
y := []byte{
|
||||||
|
0x96, 0x81, 0xb5, 0x17, 0xb1, 0xcd, 0xa1, 0x7d, 0x0d, 0x83, 0xd3, 0x35, 0xd9, 0xc4, 0xa8, 0xa9,
|
||||||
|
0xa9, 0xb0, 0xb1, 0xb3, 0xc7, 0x10, 0x6d, 0x8f, 0x3c, 0x72, 0xbc, 0x50, 0x93, 0xdc, 0x27, 0x5f,
|
||||||
|
}
|
||||||
|
|
||||||
|
testKeyDerivation(t, elliptic.P256(), d, x, y)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestECDSAPublicKeyDerivation_P384(t *testing.T) {
|
||||||
|
d := []byte{
|
||||||
|
0x53, 0x94, 0xf7, 0x97, 0x3e, 0xa8, 0x68, 0xc5, 0x2b, 0xf3, 0xff, 0x8d, 0x8c, 0xee, 0xb4, 0xdb,
|
||||||
|
0x90, 0xa6, 0x83, 0x65, 0x3b, 0x12, 0x48, 0x5d, 0x5f, 0x62, 0x7c, 0x3c, 0xe5, 0xab, 0xd8, 0x97,
|
||||||
|
0x8f, 0xc9, 0x67, 0x3d, 0x14, 0xa7, 0x1d, 0x92, 0x57, 0x47, 0x93, 0x16, 0x62, 0x49, 0x3c, 0x37,
|
||||||
|
}
|
||||||
|
|
||||||
|
x := []byte{
|
||||||
|
0xfd, 0x3c, 0x84, 0xe5, 0x68, 0x9b, 0xed, 0x27, 0x0e, 0x60, 0x1b, 0x3d, 0x80, 0xf9, 0x0d, 0x67,
|
||||||
|
0xa9, 0xae, 0x45, 0x1c, 0xce, 0x89, 0x0f, 0x53, 0xe5, 0x83, 0x22, 0x9a, 0xd0, 0xe2, 0xee, 0x64,
|
||||||
|
0x56, 0x11, 0xfa, 0x99, 0x36, 0xdf, 0xa4, 0x53, 0x06, 0xec, 0x18, 0x06, 0x67, 0x74, 0xaa, 0x24,
|
||||||
|
}
|
||||||
|
|
||||||
|
y := []byte{
|
||||||
|
0xb8, 0x3c, 0xa4, 0x12, 0x6c, 0xfc, 0x4c, 0x4d, 0x1d, 0x18, 0xa4, 0xb6, 0xc2, 0x1c, 0x7f, 0x69,
|
||||||
|
0x9d, 0x51, 0x23, 0xdd, 0x9c, 0x24, 0xf6, 0x6f, 0x83, 0x38, 0x46, 0xee, 0xb5, 0x82, 0x96, 0x19,
|
||||||
|
0x6b, 0x42, 0xec, 0x06, 0x42, 0x5d, 0xb5, 0xb7, 0x0a, 0x4b, 0x81, 0xb7, 0xfc, 0xf7, 0x05, 0xa0,
|
||||||
|
}
|
||||||
|
|
||||||
|
testKeyDerivation(t, elliptic.P384(), d, x, y)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestECDSAKnownSigningValue_P256(t *testing.T) {
|
||||||
|
d := []byte{
|
||||||
|
0x51, 0x9b, 0x42, 0x3d, 0x71, 0x5f, 0x8b, 0x58, 0x1f, 0x4f, 0xa8, 0xee, 0x59, 0xf4, 0x77, 0x1a,
|
||||||
|
0x5b, 0x44, 0xc8, 0x13, 0x0b, 0x4e, 0x3e, 0xac, 0xca, 0x54, 0xa5, 0x6d, 0xda, 0x72, 0xb4, 0x64,
|
||||||
|
}
|
||||||
|
|
||||||
|
testKnownSigningValue(t, elliptic.P256(), d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestECDSAKnownSigningValue_P384(t *testing.T) {
|
||||||
|
d := []byte{
|
||||||
|
0x53, 0x94, 0xf7, 0x97, 0x3e, 0xa8, 0x68, 0xc5, 0x2b, 0xf3, 0xff, 0x8d, 0x8c, 0xee, 0xb4, 0xdb,
|
||||||
|
0x90, 0xa6, 0x83, 0x65, 0x3b, 0x12, 0x48, 0x5d, 0x5f, 0x62, 0x7c, 0x3c, 0xe5, 0xab, 0xd8, 0x97,
|
||||||
|
0x8f, 0xc9, 0x67, 0x3d, 0x14, 0xa7, 0x1d, 0x92, 0x57, 0x47, 0x93, 0x16, 0x62, 0x49, 0x3c, 0x37,
|
||||||
|
}
|
||||||
|
|
||||||
|
testKnownSigningValue(t, elliptic.P384(), d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testKeyDerivation(t *testing.T, curve elliptic.Curve, d, expectedX, expectedY []byte) {
|
||||||
|
privKey := ECDSAKey(curve, d)
|
||||||
|
|
||||||
|
if e, a := d, privKey.D.Bytes(); bytes.Compare(e, a) != 0 {
|
||||||
|
t.Errorf("expected % x, got % x", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
if e, a := expectedX, privKey.X.Bytes(); bytes.Compare(e, a) != 0 {
|
||||||
|
t.Errorf("expected % x, got % x", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
if e, a := expectedY, privKey.Y.Bytes(); bytes.Compare(e, a) != 0 {
|
||||||
|
t.Errorf("expected % x, got % x", e, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testKnownSigningValue(t *testing.T, curve elliptic.Curve, d []byte) {
|
||||||
|
signingKey := ECDSAKey(curve, d)
|
||||||
|
|
||||||
|
message := []byte{
|
||||||
|
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
|
||||||
|
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
|
||||||
|
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
|
||||||
|
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
|
||||||
|
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
|
||||||
|
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
|
||||||
|
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
|
||||||
|
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
|
||||||
|
}
|
||||||
|
|
||||||
|
sha256Hash := sha256.New()
|
||||||
|
_, err := io.Copy(sha256Hash, bytes.NewReader(message))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgHash := sha256Hash.Sum(nil)
|
||||||
|
msgSignature, err := signingKey.Sign(rand.Reader, msgHash, crypto.SHA256)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
verified, err := VerifySignature(&signingKey.PublicKey, msgHash, msgSignature)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !verified {
|
||||||
|
t.Fatalf("failed to verify message msgSignature")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestECDSAInvalidSignature_P256(t *testing.T) {
|
||||||
|
testInvalidSignature(t, elliptic.P256())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestECDSAInvalidSignature_P384(t *testing.T) {
|
||||||
|
testInvalidSignature(t, elliptic.P384())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestECDSAGenKeySignature_P256(t *testing.T) {
|
||||||
|
testGenKeySignature(t, elliptic.P256())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestECDSAGenKeySignature_P384(t *testing.T) {
|
||||||
|
testGenKeySignature(t, elliptic.P384())
|
||||||
|
}
|
||||||
|
|
||||||
|
func testInvalidSignature(t *testing.T, curve elliptic.Curve) {
|
||||||
|
privateKey, err := ecdsa.GenerateKey(curve, rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to generate key: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
message := []byte{
|
||||||
|
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
|
||||||
|
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
|
||||||
|
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
|
||||||
|
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
|
||||||
|
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
|
||||||
|
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
|
||||||
|
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
|
||||||
|
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
|
||||||
|
}
|
||||||
|
|
||||||
|
sha256Hash := sha256.New()
|
||||||
|
_, err = io.Copy(sha256Hash, bytes.NewReader(message))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgHash := sha256Hash.Sum(nil)
|
||||||
|
msgSignature, err := privateKey.Sign(rand.Reader, msgHash, crypto.SHA256)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
byteToFlip := 15
|
||||||
|
switch msgSignature[byteToFlip] {
|
||||||
|
case 0:
|
||||||
|
msgSignature[byteToFlip] = 0x0a
|
||||||
|
default:
|
||||||
|
msgSignature[byteToFlip] &^= msgSignature[byteToFlip]
|
||||||
|
}
|
||||||
|
|
||||||
|
verified, err := VerifySignature(&privateKey.PublicKey, msgHash, msgSignature)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if verified {
|
||||||
|
t.Fatalf("expected message verification to fail")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testGenKeySignature(t *testing.T, curve elliptic.Curve) {
|
||||||
|
privateKey, err := ecdsa.GenerateKey(curve, rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to generate key: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
message := []byte{
|
||||||
|
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
|
||||||
|
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
|
||||||
|
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
|
||||||
|
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
|
||||||
|
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
|
||||||
|
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
|
||||||
|
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
|
||||||
|
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
|
||||||
|
}
|
||||||
|
|
||||||
|
sha256Hash := sha256.New()
|
||||||
|
_, err = io.Copy(sha256Hash, bytes.NewReader(message))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgHash := sha256Hash.Sum(nil)
|
||||||
|
msgSignature, err := privateKey.Sign(rand.Reader, msgHash, crypto.SHA256)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
verified, err := VerifySignature(&privateKey.PublicKey, msgHash, msgSignature)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !verified {
|
||||||
|
t.Fatalf("expected message verification to fail")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestECDSASignatureFormat(t *testing.T) {
|
||||||
|
asn1Signature := []byte{
|
||||||
|
0x30, 0x45, 0x02, 0x21, 0x00, 0xd7, 0xc5, 0xb9, 0x9e, 0x0b, 0xb1, 0x1a, 0x1f, 0x32, 0xda, 0x66, 0xe0, 0xff,
|
||||||
|
0x59, 0xb7, 0x8a, 0x5e, 0xb3, 0x94, 0x9c, 0x23, 0xb3, 0xfc, 0x1f, 0x18, 0xcc, 0xf6, 0x61, 0x67, 0x8b, 0xf1,
|
||||||
|
0xc1, 0x02, 0x20, 0x26, 0x4d, 0x8b, 0x7c, 0xaa, 0x52, 0x4c, 0xc0, 0x2e, 0x5f, 0xf6, 0x7e, 0x24, 0x82, 0xe5,
|
||||||
|
0xfb, 0xcb, 0xc7, 0x9b, 0x83, 0x0d, 0x19, 0x7e, 0x7a, 0x40, 0x37, 0x87, 0xdd, 0x1c, 0x93, 0x13, 0xc4,
|
||||||
|
}
|
||||||
|
|
||||||
|
x := []byte{
|
||||||
|
0x1c, 0xcb, 0xe9, 0x1c, 0x07, 0x5f, 0xc7, 0xf4, 0xf0, 0x33, 0xbf, 0xa2, 0x48, 0xdb, 0x8f, 0xcc,
|
||||||
|
0xd3, 0x56, 0x5d, 0xe9, 0x4b, 0xbf, 0xb1, 0x2f, 0x3c, 0x59, 0xff, 0x46, 0xc2, 0x71, 0xbf, 0x83,
|
||||||
|
}
|
||||||
|
|
||||||
|
y := []byte{
|
||||||
|
0xce, 0x40, 0x14, 0xc6, 0x88, 0x11, 0xf9, 0xa2, 0x1a, 0x1f, 0xdb, 0x2c, 0x0e, 0x61, 0x13, 0xe0,
|
||||||
|
0x6d, 0xb7, 0xca, 0x93, 0xb7, 0x40, 0x4e, 0x78, 0xdc, 0x7c, 0xcd, 0x5c, 0xa8, 0x9a, 0x4c, 0xa9,
|
||||||
|
}
|
||||||
|
|
||||||
|
publicKey, err := ECDSAPublicKey(elliptic.P256(), x, y)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
message := []byte{
|
||||||
|
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
|
||||||
|
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
|
||||||
|
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
|
||||||
|
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
|
||||||
|
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
|
||||||
|
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
|
||||||
|
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
|
||||||
|
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
|
||||||
|
}
|
||||||
|
|
||||||
|
hash := sha256.New()
|
||||||
|
_, err = io.Copy(hash, bytes.NewReader(message))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
msgHash := hash.Sum(nil)
|
||||||
|
|
||||||
|
verifySignature, err := VerifySignature(publicKey, msgHash, asn1Signature)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !verifySignature {
|
||||||
|
t.Fatalf("failed to verify signature")
|
||||||
|
}
|
||||||
|
}
|
38
api/auth/signer/v4asdk2/internal/v4/const.go
Normal file
38
api/auth/signer/v4asdk2/internal/v4/const.go
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/const.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
const (
|
||||||
|
// EmptyStringSHA256 is the hex encoded sha256 value of an empty string
|
||||||
|
EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
|
||||||
|
|
||||||
|
// UnsignedPayload indicates that the request payload body is unsigned
|
||||||
|
UnsignedPayload = "UNSIGNED-PAYLOAD"
|
||||||
|
|
||||||
|
// AmzAlgorithmKey indicates the signing algorithm
|
||||||
|
AmzAlgorithmKey = "X-Amz-Algorithm"
|
||||||
|
|
||||||
|
// AmzSecurityTokenKey indicates the security token to be used with temporary credentials
|
||||||
|
AmzSecurityTokenKey = "X-Amz-Security-Token"
|
||||||
|
|
||||||
|
// AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z'
|
||||||
|
AmzDateKey = "X-Amz-Date"
|
||||||
|
|
||||||
|
// AmzCredentialKey is the access key ID and credential scope
|
||||||
|
AmzCredentialKey = "X-Amz-Credential"
|
||||||
|
|
||||||
|
// AmzSignedHeadersKey is the set of headers signed for the request
|
||||||
|
AmzSignedHeadersKey = "X-Amz-SignedHeaders"
|
||||||
|
|
||||||
|
// AmzSignatureKey is the query parameter to store the SigV4 signature
|
||||||
|
AmzSignatureKey = "X-Amz-Signature"
|
||||||
|
|
||||||
|
// TimeFormat is the time format to be used in the X-Amz-Date header or query parameter
|
||||||
|
TimeFormat = "20060102T150405Z"
|
||||||
|
|
||||||
|
// ShortTimeFormat is the shorten time format used in the credential scope
|
||||||
|
ShortTimeFormat = "20060102"
|
||||||
|
|
||||||
|
// ContentSHAKey is the SHA256 of request body
|
||||||
|
ContentSHAKey = "X-Amz-Content-Sha256"
|
||||||
|
)
|
90
api/auth/signer/v4asdk2/internal/v4/header_rules.go
Normal file
90
api/auth/signer/v4asdk2/internal/v4/header_rules.go
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/header_rules.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Rules houses a set of Rule needed for validation of a
|
||||||
|
// string value
|
||||||
|
type Rules []Rule
|
||||||
|
|
||||||
|
// Rule interface allows for more flexible rules and just simply
|
||||||
|
// checks whether or not a value adheres to that Rule
|
||||||
|
type Rule interface {
|
||||||
|
IsValid(value string) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid will iterate through all rules and see if any rules
|
||||||
|
// apply to the value and supports nested rules
|
||||||
|
func (r Rules) IsValid(value string) bool {
|
||||||
|
for _, rule := range r {
|
||||||
|
if rule.IsValid(value) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapRule generic Rule for maps
|
||||||
|
type MapRule map[string]struct{}
|
||||||
|
|
||||||
|
// IsValid for the map Rule satisfies whether it exists in the map
|
||||||
|
func (m MapRule) IsValid(value string) bool {
|
||||||
|
_, ok := m[value]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowList is a generic Rule for whitelisting
|
||||||
|
type AllowList struct {
|
||||||
|
Rule
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid for AllowList checks if the value is within the AllowList
|
||||||
|
func (w AllowList) IsValid(value string) bool {
|
||||||
|
return w.Rule.IsValid(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DenyList is a generic Rule for blacklisting
|
||||||
|
type DenyList struct {
|
||||||
|
Rule
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid for AllowList checks if the value is within the AllowList
|
||||||
|
func (b DenyList) IsValid(value string) bool {
|
||||||
|
return !b.Rule.IsValid(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Patterns is a list of strings to match against
|
||||||
|
type Patterns []string
|
||||||
|
|
||||||
|
// IsValid for Patterns checks each pattern and returns if a match has
|
||||||
|
// been found
|
||||||
|
func (p Patterns) IsValid(value string) bool {
|
||||||
|
for _, pattern := range p {
|
||||||
|
if HasPrefixFold(value, pattern) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// InclusiveRules rules allow for rules to depend on one another
|
||||||
|
type InclusiveRules []Rule
|
||||||
|
|
||||||
|
// IsValid will return true if all rules are true
|
||||||
|
func (r InclusiveRules) IsValid(value string) bool {
|
||||||
|
for _, rule := range r {
|
||||||
|
if !rule.IsValid(value) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
|
||||||
|
// under Unicode case-folding.
|
||||||
|
func HasPrefixFold(s, prefix string) bool {
|
||||||
|
return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
|
||||||
|
}
|
83
api/auth/signer/v4asdk2/internal/v4/headers.go
Normal file
83
api/auth/signer/v4asdk2/internal/v4/headers.go
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/header.go
|
||||||
|
// with changes:
|
||||||
|
// * drop User-Agent header from ignored
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
// IgnoredPresignedHeaders is a list of headers that are ignored during signing
|
||||||
|
var IgnoredPresignedHeaders = Rules{
|
||||||
|
DenyList{
|
||||||
|
MapRule{
|
||||||
|
"Authorization": struct{}{},
|
||||||
|
"User-Agent": struct{}{},
|
||||||
|
"X-Amzn-Trace-Id": struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoredHeaders is a list of headers that are ignored during signing
|
||||||
|
// drop User-Agent header to be compatible with aws sdk java v1.
|
||||||
|
var IgnoredHeaders = Rules{
|
||||||
|
DenyList{
|
||||||
|
MapRule{
|
||||||
|
"Authorization": struct{}{},
|
||||||
|
//"User-Agent": struct{}{},
|
||||||
|
"X-Amzn-Trace-Id": struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequiredSignedHeaders is a whitelist for Build canonical headers.
|
||||||
|
var RequiredSignedHeaders = Rules{
|
||||||
|
AllowList{
|
||||||
|
MapRule{
|
||||||
|
"Cache-Control": struct{}{},
|
||||||
|
"Content-Disposition": struct{}{},
|
||||||
|
"Content-Encoding": struct{}{},
|
||||||
|
"Content-Language": struct{}{},
|
||||||
|
"Content-Md5": struct{}{},
|
||||||
|
"Content-Type": struct{}{},
|
||||||
|
"Expires": struct{}{},
|
||||||
|
"If-Match": struct{}{},
|
||||||
|
"If-Modified-Since": struct{}{},
|
||||||
|
"If-None-Match": struct{}{},
|
||||||
|
"If-Unmodified-Since": struct{}{},
|
||||||
|
"Range": struct{}{},
|
||||||
|
"X-Amz-Acl": struct{}{},
|
||||||
|
"X-Amz-Copy-Source": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-Match": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Range": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||||
|
"X-Amz-Grant-Full-control": struct{}{},
|
||||||
|
"X-Amz-Grant-Read": struct{}{},
|
||||||
|
"X-Amz-Grant-Read-Acp": struct{}{},
|
||||||
|
"X-Amz-Grant-Write": struct{}{},
|
||||||
|
"X-Amz-Grant-Write-Acp": struct{}{},
|
||||||
|
"X-Amz-Metadata-Directive": struct{}{},
|
||||||
|
"X-Amz-Mfa": struct{}{},
|
||||||
|
"X-Amz-Request-Payer": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||||
|
"X-Amz-Storage-Class": struct{}{},
|
||||||
|
"X-Amz-Website-Redirect-Location": struct{}{},
|
||||||
|
"X-Amz-Content-Sha256": struct{}{},
|
||||||
|
"X-Amz-Tagging": struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Patterns{"X-Amz-Meta-"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowedQueryHoisting is a whitelist for Build query headers. The boolean value
|
||||||
|
// represents whether or not it is a pattern.
|
||||||
|
var AllowedQueryHoisting = InclusiveRules{
|
||||||
|
DenyList{RequiredSignedHeaders},
|
||||||
|
Patterns{"X-Amz-"},
|
||||||
|
}
|
15
api/auth/signer/v4asdk2/internal/v4/hmac.go
Normal file
15
api/auth/signer/v4asdk2/internal/v4/hmac.go
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/hmac.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/sha256"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HMACSHA256 computes a HMAC-SHA256 of data given the provided key.
|
||||||
|
func HMACSHA256(key []byte, data []byte) []byte {
|
||||||
|
hash := hmac.New(sha256.New, key)
|
||||||
|
hash.Write(data)
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
77
api/auth/signer/v4asdk2/internal/v4/host.go
Normal file
77
api/auth/signer/v4asdk2/internal/v4/host.go
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/host.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SanitizeHostForHeader removes default port from host and updates request.Host
|
||||||
|
func SanitizeHostForHeader(r *http.Request) {
|
||||||
|
host := getHost(r)
|
||||||
|
port := portOnly(host)
|
||||||
|
if port != "" && isDefaultPort(r.URL.Scheme, port) {
|
||||||
|
r.Host = stripPort(host)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns host from request
|
||||||
|
func getHost(r *http.Request) string {
|
||||||
|
if r.Host != "" {
|
||||||
|
return r.Host
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.URL.Host
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hostname returns u.Host, without any port number.
|
||||||
|
//
|
||||||
|
// If Host is an IPv6 literal with a port number, Hostname returns the
|
||||||
|
// IPv6 literal without the square brackets. IPv6 literals may include
|
||||||
|
// a zone identifier.
|
||||||
|
//
|
||||||
|
// Copied from the Go 1.8 standard library (net/url)
|
||||||
|
func stripPort(hostport string) string {
|
||||||
|
colon := strings.IndexByte(hostport, ':')
|
||||||
|
if colon == -1 {
|
||||||
|
return hostport
|
||||||
|
}
|
||||||
|
if i := strings.IndexByte(hostport, ']'); i != -1 {
|
||||||
|
return strings.TrimPrefix(hostport[:i], "[")
|
||||||
|
}
|
||||||
|
return hostport[:colon]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Port returns the port part of u.Host, without the leading colon.
|
||||||
|
// If u.Host doesn't contain a port, Port returns an empty string.
|
||||||
|
//
|
||||||
|
// Copied from the Go 1.8 standard library (net/url)
|
||||||
|
func portOnly(hostport string) string {
|
||||||
|
colon := strings.IndexByte(hostport, ':')
|
||||||
|
if colon == -1 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if i := strings.Index(hostport, "]:"); i != -1 {
|
||||||
|
return hostport[i+len("]:"):]
|
||||||
|
}
|
||||||
|
if strings.Contains(hostport, "]") {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return hostport[colon+len(":"):]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if the specified URI is using the standard port
|
||||||
|
// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
|
||||||
|
func isDefaultPort(scheme, port string) bool {
|
||||||
|
if port == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
lowerCaseScheme := strings.ToLower(scheme)
|
||||||
|
if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
38
api/auth/signer/v4asdk2/internal/v4/time.go
Normal file
38
api/auth/signer/v4asdk2/internal/v4/time.go
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/time.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing.
|
||||||
|
type SigningTime struct {
|
||||||
|
time.Time
|
||||||
|
timeFormat string
|
||||||
|
shortTimeFormat string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSigningTime creates a new SigningTime given a time.Time
|
||||||
|
func NewSigningTime(t time.Time) SigningTime {
|
||||||
|
return SigningTime{
|
||||||
|
Time: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeFormat provides a time formatted in the X-Amz-Date format.
|
||||||
|
func (m *SigningTime) TimeFormat() string {
|
||||||
|
return m.format(&m.timeFormat, TimeFormat)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShortTimeFormat provides a time formatted of 20060102.
|
||||||
|
func (m *SigningTime) ShortTimeFormat() string {
|
||||||
|
return m.format(&m.shortTimeFormat, ShortTimeFormat)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SigningTime) format(target *string, format string) string {
|
||||||
|
if len(*target) > 0 {
|
||||||
|
return *target
|
||||||
|
}
|
||||||
|
v := m.Time.Format(format)
|
||||||
|
*target = v
|
||||||
|
return v
|
||||||
|
}
|
66
api/auth/signer/v4asdk2/internal/v4/util.go
Normal file
66
api/auth/signer/v4asdk2/internal/v4/util.go
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/util.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const doubleSpace = " "
|
||||||
|
|
||||||
|
// StripExcessSpaces will rewrite the passed in slice's string values to not
|
||||||
|
// contain muliple side-by-side spaces.
|
||||||
|
func StripExcessSpaces(str string) string {
|
||||||
|
var j, k, l, m, spaces int
|
||||||
|
// Trim trailing spaces
|
||||||
|
for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trim leading spaces
|
||||||
|
for k = 0; k < j && str[k] == ' '; k++ {
|
||||||
|
}
|
||||||
|
str = str[k : j+1]
|
||||||
|
|
||||||
|
// Strip multiple spaces.
|
||||||
|
j = strings.Index(str, doubleSpace)
|
||||||
|
if j < 0 {
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := []byte(str)
|
||||||
|
for k, m, l = j, j, len(buf); k < l; k++ {
|
||||||
|
if buf[k] == ' ' {
|
||||||
|
if spaces == 0 {
|
||||||
|
// First space.
|
||||||
|
buf[m] = buf[k]
|
||||||
|
m++
|
||||||
|
}
|
||||||
|
spaces++
|
||||||
|
} else {
|
||||||
|
// End of multiple spaces.
|
||||||
|
spaces = 0
|
||||||
|
buf[m] = buf[k]
|
||||||
|
m++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(buf[:m])
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetURIPath returns the escaped URI component from the provided URL
|
||||||
|
func GetURIPath(u *url.URL) string {
|
||||||
|
var uri string
|
||||||
|
|
||||||
|
if len(u.Opaque) > 0 {
|
||||||
|
uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
|
||||||
|
} else {
|
||||||
|
uri = u.EscapedPath()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(uri) == 0 {
|
||||||
|
uri = "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
return uri
|
||||||
|
}
|
77
api/auth/signer/v4asdk2/internal/v4/util_test.go
Normal file
77
api/auth/signer/v4asdk2/internal/v4/util_test.go
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/tuil_test.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStripExcessHeaders(t *testing.T) {
|
||||||
|
vals := []string{
|
||||||
|
"",
|
||||||
|
"123",
|
||||||
|
"1 2 3",
|
||||||
|
"1 2 3 ",
|
||||||
|
" 1 2 3",
|
||||||
|
"1 2 3",
|
||||||
|
"1 23",
|
||||||
|
"1 2 3",
|
||||||
|
"1 2 ",
|
||||||
|
" 1 2 ",
|
||||||
|
"12 3",
|
||||||
|
"12 3 1",
|
||||||
|
"12 3 1",
|
||||||
|
"12 3 1abc123",
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := []string{
|
||||||
|
"",
|
||||||
|
"123",
|
||||||
|
"1 2 3",
|
||||||
|
"1 2 3",
|
||||||
|
"1 2 3",
|
||||||
|
"1 2 3",
|
||||||
|
"1 23",
|
||||||
|
"1 2 3",
|
||||||
|
"1 2",
|
||||||
|
"1 2",
|
||||||
|
"12 3",
|
||||||
|
"12 3 1",
|
||||||
|
"12 3 1",
|
||||||
|
"12 3 1abc123",
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(vals); i++ {
|
||||||
|
r := StripExcessSpaces(vals[i])
|
||||||
|
if e, a := expected[i], r; e != a {
|
||||||
|
t.Errorf("%d, expect %v, got %v", i, e, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var stripExcessSpaceCases = []string{
|
||||||
|
`AWS4-HMAC-SHA256 Credential=AKIDFAKEIDFAKEID/20160628/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=1234567890abcdef1234567890abcdef1234567890abcdef`,
|
||||||
|
`123 321 123 321`,
|
||||||
|
` 123 321 123 321 `,
|
||||||
|
` 123 321 123 321 `,
|
||||||
|
"123",
|
||||||
|
"1 2 3",
|
||||||
|
" 1 2 3",
|
||||||
|
"1 2 3",
|
||||||
|
"1 23",
|
||||||
|
"1 2 3",
|
||||||
|
"1 2 ",
|
||||||
|
" 1 2 ",
|
||||||
|
"12 3",
|
||||||
|
"12 3 1",
|
||||||
|
"12 3 1",
|
||||||
|
"12 3 1abc123",
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkStripExcessSpaces(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
for _, v := range stripExcessSpaceCases {
|
||||||
|
StripExcessSpaces(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
98
api/auth/signer/v4asdk2/stream.go
Normal file
98
api/auth/signer/v4asdk2/stream.go
Normal file
|
@ -0,0 +1,98 @@
|
||||||
|
// This file is adopting https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/stream.go for sigv4a.
|
||||||
|
|
||||||
|
package v4a
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
signerCrypto "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/crypto"
|
||||||
|
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/v4"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EventStreamSigner is an AWS EventStream protocol signer.
|
||||||
|
type EventStreamSigner interface {
|
||||||
|
GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StreamSignerOptions is the configuration options for StreamSigner.
|
||||||
|
type StreamSignerOptions struct{}
|
||||||
|
|
||||||
|
// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads.
|
||||||
|
type StreamSigner struct {
|
||||||
|
options StreamSignerOptions
|
||||||
|
|
||||||
|
credentials Credentials
|
||||||
|
service string
|
||||||
|
|
||||||
|
prevSignature []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStreamSigner returns a new AWS EventStream protocol signer.
|
||||||
|
func NewStreamSigner(credentials Credentials, service string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner {
|
||||||
|
o := StreamSignerOptions{}
|
||||||
|
|
||||||
|
for _, fn := range optFns {
|
||||||
|
fn(&o)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &StreamSigner{
|
||||||
|
options: o,
|
||||||
|
credentials: credentials,
|
||||||
|
service: service,
|
||||||
|
prevSignature: seedSignature,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StreamSigner) VerifySignature(headers, payload []byte, signingTime time.Time, signature []byte, optFns ...func(*StreamSignerOptions)) error {
|
||||||
|
options := s.options
|
||||||
|
|
||||||
|
for _, fn := range optFns {
|
||||||
|
fn(&options)
|
||||||
|
}
|
||||||
|
|
||||||
|
prevSignature := s.prevSignature
|
||||||
|
|
||||||
|
st := v4Internal.NewSigningTime(signingTime)
|
||||||
|
|
||||||
|
scope := buildCredentialScope(st, s.service)
|
||||||
|
|
||||||
|
stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st)
|
||||||
|
|
||||||
|
ok, err := signerCrypto.VerifySignature(&s.credentials.PrivateKey.PublicKey, makeHash(sha256.New(), []byte(stringToSign)), signature)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("v4a: invalid signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
s.prevSignature = signature
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
|
||||||
|
hash := sha256.New()
|
||||||
|
return strings.Join([]string{
|
||||||
|
"AWS4-ECDSA-P256-SHA256-PAYLOAD",
|
||||||
|
signingTime.TimeFormat(),
|
||||||
|
credentialScope,
|
||||||
|
hex.EncodeToString(previousSignature),
|
||||||
|
hex.EncodeToString(makeHash(hash, headers)),
|
||||||
|
hex.EncodeToString(makeHash(hash, payload)),
|
||||||
|
}, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildCredentialScope(st v4Internal.SigningTime, service string) string {
|
||||||
|
return strings.Join([]string{
|
||||||
|
st.Format(shortTimeFormat),
|
||||||
|
service,
|
||||||
|
"aws4_request",
|
||||||
|
}, "/")
|
||||||
|
|
||||||
|
}
|
591
api/auth/signer/v4asdk2/v4a.go
Normal file
591
api/auth/signer/v4asdk2/v4a.go
Normal file
|
@ -0,0 +1,591 @@
|
||||||
|
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/v4a.go
|
||||||
|
// with changes:
|
||||||
|
// * adding exported VerifySignature methods
|
||||||
|
// * using different ignore headers for sing/presign requests
|
||||||
|
// * don't duplicate content-length as signed header
|
||||||
|
// * use copy of smithy-go encoding/httpbinding package
|
||||||
|
// * use zap.Logger instead of smithy-go/logging
|
||||||
|
|
||||||
|
package v4a
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"math/big"
|
||||||
|
"net/http"
|
||||||
|
"net/textproto"
|
||||||
|
"net/url"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/smithy/encoding/httpbinding"
|
||||||
|
signerCrypto "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/crypto"
|
||||||
|
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/v4"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// AmzRegionSetKey represents the region set header used for sigv4a
|
||||||
|
AmzRegionSetKey = "X-Amz-Region-Set"
|
||||||
|
amzAlgorithmKey = v4Internal.AmzAlgorithmKey
|
||||||
|
amzSecurityTokenKey = v4Internal.AmzSecurityTokenKey
|
||||||
|
amzDateKey = v4Internal.AmzDateKey
|
||||||
|
amzCredentialKey = v4Internal.AmzCredentialKey
|
||||||
|
amzSignedHeadersKey = v4Internal.AmzSignedHeadersKey
|
||||||
|
authorizationHeader = "Authorization"
|
||||||
|
|
||||||
|
signingAlgorithm = "AWS4-ECDSA-P256-SHA256"
|
||||||
|
|
||||||
|
timeFormat = "20060102T150405Z"
|
||||||
|
shortTimeFormat = "20060102"
|
||||||
|
|
||||||
|
// EmptyStringSHA256 is a hex encoded SHA-256 hash of an empty string
|
||||||
|
EmptyStringSHA256 = v4Internal.EmptyStringSHA256
|
||||||
|
|
||||||
|
// Version of signing v4a
|
||||||
|
Version = "SigV4A"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
p256 elliptic.Curve
|
||||||
|
nMinusTwoP256 *big.Int
|
||||||
|
|
||||||
|
one = new(big.Int).SetInt64(1)
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Ensure the elliptic curve parameters are initialized on package import rather then on first usage
|
||||||
|
p256 = elliptic.P256()
|
||||||
|
|
||||||
|
nMinusTwoP256 = new(big.Int).SetBytes(p256.Params().N.Bytes())
|
||||||
|
nMinusTwoP256 = nMinusTwoP256.Sub(nMinusTwoP256, new(big.Int).SetInt64(2))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignerOptions is the SigV4a signing options for constructing a Signer.
|
||||||
|
type SignerOptions struct {
|
||||||
|
Logger *zap.Logger
|
||||||
|
LogSigning bool
|
||||||
|
|
||||||
|
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
|
||||||
|
// request header to the request's query string. This is most commonly used
|
||||||
|
// with pre-signed requests preventing headers from being added to the
|
||||||
|
// request's query string.
|
||||||
|
DisableHeaderHoisting bool
|
||||||
|
|
||||||
|
// Disables the automatic escaping of the URI path of the request for the
|
||||||
|
// siganture's canonical string's path. For services that do not need additional
|
||||||
|
// escaping then use this to disable the signer escaping the path.
|
||||||
|
//
|
||||||
|
// S3 is an example of a service that does not need additional escaping.
|
||||||
|
//
|
||||||
|
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
||||||
|
DisableURIPathEscaping bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signer is a SigV4a HTTP signing implementation
|
||||||
|
type Signer struct {
|
||||||
|
options SignerOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSigner constructs a SigV4a Signer.
|
||||||
|
func NewSigner(optFns ...func(*SignerOptions)) *Signer {
|
||||||
|
options := SignerOptions{}
|
||||||
|
|
||||||
|
for _, fn := range optFns {
|
||||||
|
fn(&options)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Signer{options: options}
|
||||||
|
}
|
||||||
|
|
||||||
|
// deriveKeyFromAccessKeyPair derives a NIST P-256 PrivateKey from the given
|
||||||
|
// IAM AccessKey and SecretKey pair.
|
||||||
|
//
|
||||||
|
// Based on FIPS.186-4 Appendix B.4.2
|
||||||
|
func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey, error) {
|
||||||
|
params := p256.Params()
|
||||||
|
bitLen := params.BitSize // Testing random candidates does not require an additional 64 bits
|
||||||
|
counter := 0x01
|
||||||
|
|
||||||
|
buffer := make([]byte, 1+len(accessKey)) // 1 byte counter + len(accessKey)
|
||||||
|
kdfContext := bytes.NewBuffer(buffer)
|
||||||
|
|
||||||
|
inputKey := append([]byte("AWS4A"), []byte(secretKey)...)
|
||||||
|
|
||||||
|
d := new(big.Int)
|
||||||
|
for {
|
||||||
|
kdfContext.Reset()
|
||||||
|
kdfContext.WriteString(accessKey)
|
||||||
|
kdfContext.WriteByte(byte(counter))
|
||||||
|
|
||||||
|
key, err := signerCrypto.HMACKeyDerivation(sha256.New, bitLen, inputKey, []byte(signingAlgorithm), kdfContext.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check key first before calling SetBytes if key key is in fact a valid candidate.
|
||||||
|
// This ensures the byte slice is the correct length (32-bytes) to compare in constant-time
|
||||||
|
cmp, err := signerCrypto.ConstantTimeByteCompare(key, nMinusTwoP256.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if cmp == -1 {
|
||||||
|
d.SetBytes(key)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
counter++
|
||||||
|
if counter > 0xFF {
|
||||||
|
return nil, fmt.Errorf("exhausted single byte external counter")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d = d.Add(d, one)
|
||||||
|
|
||||||
|
priv := new(ecdsa.PrivateKey)
|
||||||
|
priv.PublicKey.Curve = p256
|
||||||
|
priv.D = d
|
||||||
|
priv.PublicKey.X, priv.PublicKey.Y = p256.ScalarBaseMult(d.Bytes())
|
||||||
|
|
||||||
|
return priv, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpSigner struct {
|
||||||
|
Request *http.Request
|
||||||
|
ServiceName string
|
||||||
|
RegionSet []string
|
||||||
|
Time time.Time
|
||||||
|
Credentials Credentials
|
||||||
|
IsPreSign bool
|
||||||
|
|
||||||
|
Logger *zap.Logger
|
||||||
|
Debug bool
|
||||||
|
|
||||||
|
// PayloadHash is the hex encoded SHA-256 hash of the request payload
|
||||||
|
// If len(PayloadHash) == 0 the signer will attempt to send the request
|
||||||
|
// as an unsigned payload. Note: Unsigned payloads only work for a subset of services.
|
||||||
|
PayloadHash string
|
||||||
|
|
||||||
|
DisableHeaderHoisting bool
|
||||||
|
DisableURIPathEscaping bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and signs using SigV4a.
|
||||||
|
// The passed in request will be modified in place.
|
||||||
|
func (s *Signer) SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) error {
|
||||||
|
options := s.options
|
||||||
|
for _, fn := range optFns {
|
||||||
|
fn(&options)
|
||||||
|
}
|
||||||
|
|
||||||
|
signer := &httpSigner{
|
||||||
|
Request: r,
|
||||||
|
PayloadHash: payloadHash,
|
||||||
|
ServiceName: service,
|
||||||
|
RegionSet: regionSet,
|
||||||
|
Credentials: credentials,
|
||||||
|
Time: signingTime.UTC(),
|
||||||
|
DisableHeaderHoisting: options.DisableHeaderHoisting,
|
||||||
|
DisableURIPathEscaping: options.DisableURIPathEscaping,
|
||||||
|
Logger: options.Logger,
|
||||||
|
}
|
||||||
|
|
||||||
|
signedRequest, err := signer.Build()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
logHTTPSigningInfo(ctx, options, signedRequest)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifySignature checks sigv4a.
|
||||||
|
func (s *Signer) VerifySignature(credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, signature string, optFns ...func(*SignerOptions)) error {
|
||||||
|
return s.verifySignature(credentials, r, payloadHash, service, regionSet, signingTime, signature, false, optFns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyPresigned checks sigv4a.
|
||||||
|
func (s *Signer) VerifyPresigned(credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, signature string, optFns ...func(*SignerOptions)) error {
|
||||||
|
return s.verifySignature(credentials, r, payloadHash, service, regionSet, signingTime, signature, true, optFns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Signer) verifySignature(credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, signature string, isPresigned bool, optFns ...func(*SignerOptions)) error {
|
||||||
|
options := s.options
|
||||||
|
for _, fn := range optFns {
|
||||||
|
fn(&options)
|
||||||
|
}
|
||||||
|
|
||||||
|
signer := &httpSigner{
|
||||||
|
Request: r,
|
||||||
|
PayloadHash: payloadHash,
|
||||||
|
ServiceName: service,
|
||||||
|
RegionSet: regionSet,
|
||||||
|
Credentials: credentials,
|
||||||
|
Time: signingTime.UTC(),
|
||||||
|
IsPreSign: isPresigned,
|
||||||
|
DisableHeaderHoisting: options.DisableHeaderHoisting,
|
||||||
|
DisableURIPathEscaping: options.DisableURIPathEscaping,
|
||||||
|
}
|
||||||
|
|
||||||
|
signedReq, err := signer.Build()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
logHTTPSigningInfo(context.TODO(), options, signedReq)
|
||||||
|
|
||||||
|
signatureRaw, err := hex.DecodeString(signature)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("decode hex signature: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, err := signerCrypto.VerifySignature(&credentials.PrivateKey.PublicKey, makeHash(sha256.New(), []byte(signedReq.StringToSign)), signatureRaw)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("v4a: invalid signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PresignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and presigns using SigV4a
|
||||||
|
// Returns the presigned URL along with the headers that were signed with the request.
|
||||||
|
//
|
||||||
|
// PresignHTTP will not set the expires time of the presigned request
|
||||||
|
// automatically. To specify the expire duration for a request add the
|
||||||
|
// "X-Amz-Expires" query parameter on the request with the value as the
|
||||||
|
// duration in seconds the presigned URL should be considered valid for. This
|
||||||
|
// parameter is not used by all AWS services, and is most notable used by
|
||||||
|
// Amazon S3 APIs.
|
||||||
|
func (s *Signer) PresignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) (signedURI string, signedHeaders http.Header, err error) {
|
||||||
|
options := s.options
|
||||||
|
for _, fn := range optFns {
|
||||||
|
fn(&options)
|
||||||
|
}
|
||||||
|
|
||||||
|
signer := &httpSigner{
|
||||||
|
Request: r,
|
||||||
|
PayloadHash: payloadHash,
|
||||||
|
ServiceName: service,
|
||||||
|
RegionSet: regionSet,
|
||||||
|
Credentials: credentials,
|
||||||
|
Time: signingTime.UTC(),
|
||||||
|
IsPreSign: true,
|
||||||
|
DisableHeaderHoisting: options.DisableHeaderHoisting,
|
||||||
|
DisableURIPathEscaping: options.DisableURIPathEscaping,
|
||||||
|
}
|
||||||
|
|
||||||
|
signedRequest, err := signer.Build()
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
logHTTPSigningInfo(ctx, options, signedRequest)
|
||||||
|
|
||||||
|
signedHeaders = make(http.Header)
|
||||||
|
|
||||||
|
// For the signed headers we canonicalize the header keys in the returned map.
|
||||||
|
// This avoids situations where can standard library double headers like host header. For example the standard
|
||||||
|
// library will set the Host header, even if it is present in lower-case form.
|
||||||
|
for k, v := range signedRequest.SignedHeaders {
|
||||||
|
key := textproto.CanonicalMIMEHeaderKey(k)
|
||||||
|
signedHeaders[key] = append(signedHeaders[key], v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return signedRequest.Request.URL.String(), signedHeaders, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) {
|
||||||
|
amzDate := s.Time.Format(timeFormat)
|
||||||
|
|
||||||
|
if s.IsPreSign {
|
||||||
|
query.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ","))
|
||||||
|
query.Set(amzDateKey, amzDate)
|
||||||
|
query.Set(amzAlgorithmKey, signingAlgorithm)
|
||||||
|
if len(s.Credentials.SessionToken) > 0 {
|
||||||
|
query.Set(amzSecurityTokenKey, s.Credentials.SessionToken)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
headers.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ","))
|
||||||
|
headers.Set(amzDateKey, amzDate)
|
||||||
|
if len(s.Credentials.SessionToken) > 0 {
|
||||||
|
headers.Set(amzSecurityTokenKey, s.Credentials.SessionToken)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpSigner) Build() (signedRequest, error) {
|
||||||
|
req := s.Request
|
||||||
|
|
||||||
|
query := req.URL.Query()
|
||||||
|
headers := req.Header
|
||||||
|
|
||||||
|
s.setRequiredSigningFields(headers, query)
|
||||||
|
|
||||||
|
// Sort Each Query Key's Values
|
||||||
|
for key := range query {
|
||||||
|
sort.Strings(query[key])
|
||||||
|
}
|
||||||
|
|
||||||
|
v4Internal.SanitizeHostForHeader(req)
|
||||||
|
|
||||||
|
credentialScope := s.buildCredentialScope()
|
||||||
|
credentialStr := s.Credentials.Context + "/" + credentialScope
|
||||||
|
if s.IsPreSign {
|
||||||
|
query.Set(amzCredentialKey, credentialStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
unsignedHeaders := headers
|
||||||
|
if s.IsPreSign && !s.DisableHeaderHoisting {
|
||||||
|
urlValues := url.Values{}
|
||||||
|
urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, unsignedHeaders)
|
||||||
|
for k := range urlValues {
|
||||||
|
query[k] = urlValues[k]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
host := req.URL.Host
|
||||||
|
if len(req.Host) > 0 {
|
||||||
|
host = req.Host
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
signedHeaders http.Header
|
||||||
|
signedHeadersStr string
|
||||||
|
canonicalHeaderStr string
|
||||||
|
)
|
||||||
|
|
||||||
|
if s.IsPreSign {
|
||||||
|
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredPresignedHeaders, unsignedHeaders, s.Request.ContentLength)
|
||||||
|
} else {
|
||||||
|
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.IsPreSign {
|
||||||
|
query.Set(amzSignedHeadersKey, signedHeadersStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawQuery := strings.Replace(query.Encode(), "+", "%20", -1)
|
||||||
|
|
||||||
|
canonicalURI := v4Internal.GetURIPath(req.URL)
|
||||||
|
if !s.DisableURIPathEscaping {
|
||||||
|
canonicalURI = httpbinding.EscapePath(canonicalURI, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
canonicalString := s.buildCanonicalString(
|
||||||
|
req.Method,
|
||||||
|
canonicalURI,
|
||||||
|
rawQuery,
|
||||||
|
signedHeadersStr,
|
||||||
|
canonicalHeaderStr,
|
||||||
|
)
|
||||||
|
|
||||||
|
strToSign := s.buildStringToSign(credentialScope, canonicalString)
|
||||||
|
signingSignature, err := s.buildSignature(strToSign)
|
||||||
|
if err != nil {
|
||||||
|
return signedRequest{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.IsPreSign {
|
||||||
|
rawQuery += "&X-Amz-Signature=" + signingSignature
|
||||||
|
} else {
|
||||||
|
headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature))
|
||||||
|
}
|
||||||
|
|
||||||
|
req.URL.RawQuery = rawQuery
|
||||||
|
|
||||||
|
return signedRequest{
|
||||||
|
Request: req,
|
||||||
|
SignedHeaders: signedHeaders,
|
||||||
|
CanonicalString: canonicalString,
|
||||||
|
StringToSign: strToSign,
|
||||||
|
PreSigned: s.IsPreSign,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string {
|
||||||
|
const credential = "Credential="
|
||||||
|
const signedHeaders = "SignedHeaders="
|
||||||
|
const signature = "Signature="
|
||||||
|
const commaSpace = ", "
|
||||||
|
|
||||||
|
var parts strings.Builder
|
||||||
|
parts.Grow(len(signingAlgorithm) + 1 +
|
||||||
|
len(credential) + len(credentialStr) + len(commaSpace) +
|
||||||
|
len(signedHeaders) + len(signedHeadersStr) + len(commaSpace) +
|
||||||
|
len(signature) + len(signingSignature),
|
||||||
|
)
|
||||||
|
parts.WriteString(signingAlgorithm)
|
||||||
|
parts.WriteRune(' ')
|
||||||
|
parts.WriteString(credential)
|
||||||
|
parts.WriteString(credentialStr)
|
||||||
|
parts.WriteString(commaSpace)
|
||||||
|
parts.WriteString(signedHeaders)
|
||||||
|
parts.WriteString(signedHeadersStr)
|
||||||
|
parts.WriteString(commaSpace)
|
||||||
|
parts.WriteString(signature)
|
||||||
|
parts.WriteString(signingSignature)
|
||||||
|
return parts.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpSigner) buildCredentialScope() string {
|
||||||
|
return strings.Join([]string{
|
||||||
|
s.Time.Format(shortTimeFormat),
|
||||||
|
s.ServiceName,
|
||||||
|
"aws4_request",
|
||||||
|
}, "/")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) {
|
||||||
|
query := url.Values{}
|
||||||
|
unsignedHeaders := http.Header{}
|
||||||
|
for k, h := range header {
|
||||||
|
if r.IsValid(k) {
|
||||||
|
query[k] = h
|
||||||
|
} else {
|
||||||
|
unsignedHeaders[k] = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return query, unsignedHeaders
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) {
|
||||||
|
signed = make(http.Header)
|
||||||
|
|
||||||
|
var headers []string
|
||||||
|
const hostHeader = "host"
|
||||||
|
headers = append(headers, hostHeader)
|
||||||
|
signed[hostHeader] = append(signed[hostHeader], host)
|
||||||
|
|
||||||
|
//const contentLengthHeader = "content-length"
|
||||||
|
//if length > 0 {
|
||||||
|
// headers = append(headers, contentLengthHeader)
|
||||||
|
// signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10))
|
||||||
|
//}
|
||||||
|
|
||||||
|
for k, v := range header {
|
||||||
|
if !rule.IsValid(k) {
|
||||||
|
continue // ignored header
|
||||||
|
}
|
||||||
|
|
||||||
|
lowerCaseKey := strings.ToLower(k)
|
||||||
|
if _, ok := signed[lowerCaseKey]; ok {
|
||||||
|
// include additional values
|
||||||
|
signed[lowerCaseKey] = append(signed[lowerCaseKey], v...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
headers = append(headers, lowerCaseKey)
|
||||||
|
signed[lowerCaseKey] = v
|
||||||
|
}
|
||||||
|
sort.Strings(headers)
|
||||||
|
|
||||||
|
signedHeaders = strings.Join(headers, ";")
|
||||||
|
|
||||||
|
var canonicalHeaders strings.Builder
|
||||||
|
n := len(headers)
|
||||||
|
const colon = ':'
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
if headers[i] == hostHeader {
|
||||||
|
canonicalHeaders.WriteString(hostHeader)
|
||||||
|
canonicalHeaders.WriteRune(colon)
|
||||||
|
canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host))
|
||||||
|
} else {
|
||||||
|
canonicalHeaders.WriteString(headers[i])
|
||||||
|
canonicalHeaders.WriteRune(colon)
|
||||||
|
// Trim out leading, trailing, and dedup inner spaces from signed header values.
|
||||||
|
values := signed[headers[i]]
|
||||||
|
for j, v := range values {
|
||||||
|
cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v))
|
||||||
|
canonicalHeaders.WriteString(cleanedValue)
|
||||||
|
if j < len(values)-1 {
|
||||||
|
canonicalHeaders.WriteRune(',')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
canonicalHeaders.WriteRune('\n')
|
||||||
|
}
|
||||||
|
canonicalHeadersStr = canonicalHeaders.String()
|
||||||
|
|
||||||
|
return signed, signedHeaders, canonicalHeadersStr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string {
|
||||||
|
return strings.Join([]string{
|
||||||
|
method,
|
||||||
|
uri,
|
||||||
|
query,
|
||||||
|
canonicalHeaders,
|
||||||
|
signedHeaders,
|
||||||
|
s.PayloadHash,
|
||||||
|
}, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string {
|
||||||
|
return strings.Join([]string{
|
||||||
|
signingAlgorithm,
|
||||||
|
s.Time.Format(timeFormat),
|
||||||
|
credentialScope,
|
||||||
|
hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))),
|
||||||
|
}, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeHash(hash hash.Hash, b []byte) []byte {
|
||||||
|
hash.Reset()
|
||||||
|
hash.Write(b)
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpSigner) buildSignature(strToSign string) (string, error) {
|
||||||
|
sig, err := s.Credentials.PrivateKey.Sign(rand.Reader, makeHash(sha256.New(), []byte(strToSign)), crypto.SHA256)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return hex.EncodeToString(sig), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const logSignInfoMsg = `Request Signature:
|
||||||
|
---[ CANONICAL STRING ]-----------------------------
|
||||||
|
%s
|
||||||
|
---[ STRING TO SIGN ]--------------------------------
|
||||||
|
%s%s
|
||||||
|
-----------------------------------------------------`
|
||||||
|
const logSignedURLMsg = `
|
||||||
|
---[ SIGNED URL ]------------------------------------
|
||||||
|
%s`
|
||||||
|
|
||||||
|
func logHTTPSigningInfo(_ context.Context, options SignerOptions, r signedRequest) {
|
||||||
|
if !options.LogSigning {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
signedURLMsg := ""
|
||||||
|
if r.PreSigned {
|
||||||
|
signedURLMsg = fmt.Sprintf(logSignedURLMsg, r.Request.URL.String())
|
||||||
|
}
|
||||||
|
if options.Logger != nil {
|
||||||
|
options.Logger.Debug(fmt.Sprintf(logSignInfoMsg, r.CanonicalString, r.StringToSign, signedURLMsg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type signedRequest struct {
|
||||||
|
Request *http.Request
|
||||||
|
SignedHeaders http.Header
|
||||||
|
CanonicalString string
|
||||||
|
StringToSign string
|
||||||
|
PreSigned bool
|
||||||
|
}
|
425
api/auth/signer/v4asdk2/v4a_test.go
Normal file
425
api/auth/signer/v4asdk2/v4a_test.go
Normal file
|
@ -0,0 +1,425 @@
|
||||||
|
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/v4a_test.go
|
||||||
|
// with changes:
|
||||||
|
// * use zap.Logger instead of smithy-go/logging
|
||||||
|
|
||||||
|
package v4a
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/crypto"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
|
"go.uber.org/zap/zaptest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
accessKey = "AKISORANDOMAASORANDOM"
|
||||||
|
secretKey = "q+jcrXGc+0zWN6uzclKVhvMmUsIfRPa4rlRandom"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDeriveECDSAKeyPairFromSecret(t *testing.T) {
|
||||||
|
privateKey, err := deriveKeyFromAccessKeyPair(accessKey, secretKey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedX := func() *big.Int {
|
||||||
|
t.Helper()
|
||||||
|
b, ok := new(big.Int).SetString("15D242CEEBF8D8169FD6A8B5A746C41140414C3B07579038DA06AF89190FFFCB", 16)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("failed to parse big integer")
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}()
|
||||||
|
expectedY := func() *big.Int {
|
||||||
|
t.Helper()
|
||||||
|
b, ok := new(big.Int).SetString("515242CEDD82E94799482E4C0514B505AFCCF2C0C98D6A553BF539F424C5EC0", 16)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("failed to parse big integer")
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}()
|
||||||
|
|
||||||
|
if privateKey.X.Cmp(expectedX) != 0 {
|
||||||
|
t.Errorf("expected % X, got % X", expectedX, privateKey.X)
|
||||||
|
}
|
||||||
|
if privateKey.Y.Cmp(expectedY) != 0 {
|
||||||
|
t.Errorf("expected % X, got % X", expectedY, privateKey.Y)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSignHTTP(t *testing.T) {
|
||||||
|
req := buildRequest("dynamodb", "us-east-1")
|
||||||
|
|
||||||
|
signer, credProvider := buildSigner(t, true)
|
||||||
|
|
||||||
|
key, err := credProvider.RetrievePrivateKey(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = signer.SignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedDate := "19700101T000000Z"
|
||||||
|
expectedAlg := "AWS4-ECDSA-P256-SHA256"
|
||||||
|
expectedCredential := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
|
||||||
|
expectedSignedHeaders := "content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-region-set;x-amz-security-token;x-amz-target"
|
||||||
|
expectedStrToSignHash := "4ba7d0482cf4d5450cefdc067a00de1a4a715e444856fa3e1d85c35fb34d9730"
|
||||||
|
|
||||||
|
q := req.Header
|
||||||
|
|
||||||
|
validateAuthorization(t, q.Get("Authorization"), expectedAlg, expectedCredential, expectedSignedHeaders, expectedStrToSignHash)
|
||||||
|
|
||||||
|
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSignHTTP_NoSessionToken(t *testing.T) {
|
||||||
|
req := buildRequest("dynamodb", "us-east-1")
|
||||||
|
|
||||||
|
signer, credProvider := buildSigner(t, false)
|
||||||
|
|
||||||
|
key, err := credProvider.RetrievePrivateKey(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = signer.SignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedAlg := "AWS4-ECDSA-P256-SHA256"
|
||||||
|
expectedCredential := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
|
||||||
|
expectedSignedHeaders := "content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-region-set;x-amz-target"
|
||||||
|
expectedStrToSignHash := "1aeefb422ae6aa0de7aec829da813e55cff35553cac212dffd5f9474c71e47ee"
|
||||||
|
|
||||||
|
q := req.Header
|
||||||
|
|
||||||
|
validateAuthorization(t, q.Get("Authorization"), expectedAlg, expectedCredential, expectedSignedHeaders, expectedStrToSignHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPresignHTTP(t *testing.T) {
|
||||||
|
req := buildRequest("dynamodb", "us-east-1")
|
||||||
|
|
||||||
|
signer, credProvider := buildSigner(t, false)
|
||||||
|
|
||||||
|
key, err := credProvider.RetrievePrivateKey(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
query := req.URL.Query()
|
||||||
|
query.Set("X-Amz-Expires", "18000")
|
||||||
|
req.URL.RawQuery = query.Encode()
|
||||||
|
|
||||||
|
signedURL, _, err := signer.PresignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedDate := "19700101T000000Z"
|
||||||
|
expectedAlg := "AWS4-ECDSA-P256-SHA256"
|
||||||
|
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
|
||||||
|
expectedCredential := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
|
||||||
|
expectedStrToSignHash := "d7ffbd2fab644384c056957e6ac38de4ae68246764b5f5df171b3824153b6397"
|
||||||
|
expectedTarget := "prefix.Operation"
|
||||||
|
|
||||||
|
signedReq, err := url.Parse(signedURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
q := signedReq.Query()
|
||||||
|
|
||||||
|
validateSignature(t, expectedStrToSignHash, q.Get("X-Amz-Signature"))
|
||||||
|
|
||||||
|
if e, a := expectedAlg, q.Get("X-Amz-Algorithm"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := expectedCredential, q.Get("X-Amz-Credential"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
|
||||||
|
t.Errorf("expect %v to be empty", a)
|
||||||
|
}
|
||||||
|
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "us-east-1", q.Get("X-Amz-Region-Set"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPresignHTTP_BodyWithArrayRequest(t *testing.T) {
|
||||||
|
req := buildRequest("dynamodb", "us-east-1")
|
||||||
|
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
|
||||||
|
|
||||||
|
signer, credProvider := buildSigner(t, true)
|
||||||
|
|
||||||
|
key, err := credProvider.RetrievePrivateKey(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
query := req.URL.Query()
|
||||||
|
query.Set("X-Amz-Expires", "300")
|
||||||
|
req.URL.RawQuery = query.Encode()
|
||||||
|
|
||||||
|
signedURI, _, err := signer.PresignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
signedReq, err := url.Parse(signedURI)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedAlg := "AWS4-ECDSA-P256-SHA256"
|
||||||
|
expectedDate := "19700101T000000Z"
|
||||||
|
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
|
||||||
|
expectedStrToSignHash := "acff64fd3689be96259d4112c3742ff79f4da0d813bc58a285dc1c4449760bec"
|
||||||
|
expectedCred := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
|
||||||
|
expectedTarget := "prefix.Operation"
|
||||||
|
|
||||||
|
q := signedReq.Query()
|
||||||
|
|
||||||
|
validateSignature(t, expectedStrToSignHash, q.Get("X-Amz-Signature"))
|
||||||
|
|
||||||
|
if e, a := expectedAlg, q.Get("X-Amz-Algorithm"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := expectedCred, q.Get("X-Amz-Credential"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
|
||||||
|
t.Errorf("expect %v to be empty, was not", a)
|
||||||
|
}
|
||||||
|
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "us-east-1", q.Get("X-Amz-Region-Set"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestSign_buildCanonicalHeaders(t *testing.T) {
|
||||||
|
serviceName := "mockAPI"
|
||||||
|
region := "mock-region"
|
||||||
|
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request, %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("FooInnerSpace", " inner space ")
|
||||||
|
req.Header.Set("FooLeadingSpace", " leading-space")
|
||||||
|
req.Header.Add("FooMultipleSpace", "no-space")
|
||||||
|
req.Header.Add("FooMultipleSpace", "\ttab-space")
|
||||||
|
req.Header.Add("FooMultipleSpace", "trailing-space ")
|
||||||
|
req.Header.Set("FooNoSpace", "no-space")
|
||||||
|
req.Header.Set("FooTabSpace", "\ttab-space\t")
|
||||||
|
req.Header.Set("FooTrailingSpace", "trailing-space ")
|
||||||
|
req.Header.Set("FooWrappedSpace", " wrapped-space ")
|
||||||
|
|
||||||
|
credProvider := &SymmetricCredentialAdaptor{
|
||||||
|
SymmetricProvider: staticCredentialsProvider{
|
||||||
|
Value: aws.Credentials{
|
||||||
|
AccessKeyID: accessKey,
|
||||||
|
SecretAccessKey: secretKey,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
key, err := credProvider.RetrievePrivateKey(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := &httpSigner{
|
||||||
|
Request: req,
|
||||||
|
ServiceName: serviceName,
|
||||||
|
RegionSet: []string{region},
|
||||||
|
Credentials: key,
|
||||||
|
Time: time.Date(2021, 10, 20, 12, 42, 0, 0, time.UTC),
|
||||||
|
}
|
||||||
|
|
||||||
|
build, err := ctx.Build()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectCanonicalString := strings.Join([]string{
|
||||||
|
`POST`,
|
||||||
|
`/`,
|
||||||
|
``,
|
||||||
|
`fooinnerspace:inner space`,
|
||||||
|
`fooleadingspace:leading-space`,
|
||||||
|
`foomultiplespace:no-space,tab-space,trailing-space`,
|
||||||
|
`foonospace:no-space`,
|
||||||
|
`footabspace:tab-space`,
|
||||||
|
`footrailingspace:trailing-space`,
|
||||||
|
`foowrappedspace:wrapped-space`,
|
||||||
|
`host:mockAPI.mock-region.amazonaws.com`,
|
||||||
|
`x-amz-date:20211020T124200Z`,
|
||||||
|
`x-amz-region-set:mock-region`,
|
||||||
|
``,
|
||||||
|
`fooinnerspace;fooleadingspace;foomultiplespace;foonospace;footabspace;footrailingspace;foowrappedspace;host;x-amz-date;x-amz-region-set`,
|
||||||
|
``,
|
||||||
|
}, "\n")
|
||||||
|
if diff := cmpDiff(expectCanonicalString, build.CanonicalString); diff != "" {
|
||||||
|
t.Errorf("expect match, got\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateAuthorization(t *testing.T, authorization, expectedAlg, expectedCredential, expectedSignedHeaders, expectedStrToSignHash string) {
|
||||||
|
t.Helper()
|
||||||
|
split := strings.SplitN(authorization, " ", 2)
|
||||||
|
|
||||||
|
if len(split) != 2 {
|
||||||
|
t.Fatal("unexpected authorization header format")
|
||||||
|
}
|
||||||
|
|
||||||
|
if e, a := split[0], expectedAlg; e != a {
|
||||||
|
t.Errorf("expected %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
keyValues := strings.Split(split[1], ", ")
|
||||||
|
seen := make(map[string]string)
|
||||||
|
|
||||||
|
for _, kv := range keyValues {
|
||||||
|
idx := strings.Index(kv, "=")
|
||||||
|
if idx == -1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
key, value := kv[:idx], kv[idx+1:]
|
||||||
|
seen[key] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
if a, ok := seen["Credential"]; ok {
|
||||||
|
if expectedCredential != a {
|
||||||
|
t.Errorf("expected credential %v, got %v", expectedCredential, a)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t.Errorf("Credential not found in authorization string")
|
||||||
|
}
|
||||||
|
|
||||||
|
if a, ok := seen["SignedHeaders"]; ok {
|
||||||
|
if expectedSignedHeaders != a {
|
||||||
|
t.Errorf("expected signed headers %v, got %v", expectedSignedHeaders, a)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t.Errorf("SignedHeaders not found in authorization string")
|
||||||
|
}
|
||||||
|
|
||||||
|
if a, ok := seen["Signature"]; ok {
|
||||||
|
validateSignature(t, expectedStrToSignHash, a)
|
||||||
|
} else {
|
||||||
|
t.Errorf("signature not found in authorization string")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateSignature(t *testing.T, expectedHash, signature string) {
|
||||||
|
t.Helper()
|
||||||
|
pair, err := deriveKeyFromAccessKeyPair(accessKey, secretKey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hash, _ := hex.DecodeString(expectedHash)
|
||||||
|
sig, _ := hex.DecodeString(signature)
|
||||||
|
|
||||||
|
ok, err := crypto.VerifySignature(&pair.PublicKey, hash, sig)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("failed to verify signing singature")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildRequest(serviceName, region string) *http.Request {
|
||||||
|
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
|
||||||
|
req, _ := http.NewRequest("POST", endpoint, nil)
|
||||||
|
req.URL.Opaque = "//example.org/bucket/key-._~,!@%23$%25^&*()"
|
||||||
|
req.Header.Set("X-Amz-Target", "prefix.Operation")
|
||||||
|
req.Header.Set("Content-Type", "application/x-amz-json-1.0")
|
||||||
|
|
||||||
|
req.Header.Set("Content-Length", strconv.Itoa(1024))
|
||||||
|
|
||||||
|
req.Header.Set("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
|
||||||
|
req.Header.Add("X-Amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
|
||||||
|
req.Header.Add("X-amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildSigner(t *testing.T, withToken bool) (*Signer, CredentialsProvider) {
|
||||||
|
creds := aws.Credentials{
|
||||||
|
AccessKeyID: accessKey,
|
||||||
|
SecretAccessKey: secretKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
if withToken {
|
||||||
|
creds.SessionToken = "TOKEN"
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewSigner(func(options *SignerOptions) {
|
||||||
|
options.Logger = zaptest.NewLogger(t)
|
||||||
|
}), &SymmetricCredentialAdaptor{
|
||||||
|
SymmetricProvider: staticCredentialsProvider{
|
||||||
|
Value: creds,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type staticCredentialsProvider struct {
|
||||||
|
Value aws.Credentials
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s staticCredentialsProvider) Retrieve(_ context.Context) (aws.Credentials, error) {
|
||||||
|
v := s.Value
|
||||||
|
if v.AccessKeyID == "" || v.SecretAccessKey == "" {
|
||||||
|
return aws.Credentials{
|
||||||
|
Source: "Source Name",
|
||||||
|
}, fmt.Errorf("static credentials are empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(v.Source) == 0 {
|
||||||
|
v.Source = "Source Name"
|
||||||
|
}
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmpDiff(e, a interface{}) string {
|
||||||
|
if !reflect.DeepEqual(e, a) {
|
||||||
|
return fmt.Sprintf("%v != %v", e, a)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
117
api/auth/signer/v4sdk2/signer/internal/v4/cache.go
Normal file
117
api/auth/signer/v4sdk2/signer/internal/v4/cache.go
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/cache.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
|
)
|
||||||
|
|
||||||
|
func lookupKey(service, region string) string {
|
||||||
|
var s strings.Builder
|
||||||
|
s.Grow(len(region) + len(service) + 3)
|
||||||
|
s.WriteString(region)
|
||||||
|
s.WriteRune('/')
|
||||||
|
s.WriteString(service)
|
||||||
|
return s.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
type derivedKey struct {
|
||||||
|
AccessKey string
|
||||||
|
Date time.Time
|
||||||
|
Credential []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type derivedKeyCache struct {
|
||||||
|
values map[string]derivedKey
|
||||||
|
mutex sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDerivedKeyCache() derivedKeyCache {
|
||||||
|
return derivedKeyCache{
|
||||||
|
values: make(map[string]derivedKey),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *derivedKeyCache) Get(credentials aws.Credentials, service, region string, signingTime SigningTime) []byte {
|
||||||
|
key := lookupKey(service, region)
|
||||||
|
s.mutex.RLock()
|
||||||
|
if cred, ok := s.get(key, credentials, signingTime.Time); ok {
|
||||||
|
s.mutex.RUnlock()
|
||||||
|
return cred
|
||||||
|
}
|
||||||
|
s.mutex.RUnlock()
|
||||||
|
|
||||||
|
s.mutex.Lock()
|
||||||
|
if cred, ok := s.get(key, credentials, signingTime.Time); ok {
|
||||||
|
s.mutex.Unlock()
|
||||||
|
return cred
|
||||||
|
}
|
||||||
|
cred := deriveKey(credentials.SecretAccessKey, service, region, signingTime)
|
||||||
|
entry := derivedKey{
|
||||||
|
AccessKey: credentials.AccessKeyID,
|
||||||
|
Date: signingTime.Time,
|
||||||
|
Credential: cred,
|
||||||
|
}
|
||||||
|
s.values[key] = entry
|
||||||
|
s.mutex.Unlock()
|
||||||
|
|
||||||
|
return cred
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *derivedKeyCache) get(key string, credentials aws.Credentials, signingTime time.Time) ([]byte, bool) {
|
||||||
|
cacheEntry, ok := s.retrieveFromCache(key)
|
||||||
|
if ok && cacheEntry.AccessKey == credentials.AccessKeyID && isSameDay(signingTime, cacheEntry.Date) {
|
||||||
|
return cacheEntry.Credential, true
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *derivedKeyCache) retrieveFromCache(key string) (derivedKey, bool) {
|
||||||
|
if v, ok := s.values[key]; ok {
|
||||||
|
return v, true
|
||||||
|
}
|
||||||
|
return derivedKey{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// SigningKeyDeriver derives a signing key from a set of credentials
|
||||||
|
type SigningKeyDeriver struct {
|
||||||
|
cache derivedKeyCache
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSigningKeyDeriver returns a new SigningKeyDeriver
|
||||||
|
func NewSigningKeyDeriver() *SigningKeyDeriver {
|
||||||
|
return &SigningKeyDeriver{
|
||||||
|
cache: newDerivedKeyCache(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeriveKey returns a derived signing key from the given credentials to be used with SigV4 signing.
|
||||||
|
func (k *SigningKeyDeriver) DeriveKey(credential aws.Credentials, service, region string, signingTime SigningTime) []byte {
|
||||||
|
return k.cache.Get(credential, service, region, signingTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
func deriveKey(secret, service, region string, t SigningTime) []byte {
|
||||||
|
hmacDate := HMACSHA256([]byte("AWS4"+secret), []byte(t.ShortTimeFormat()))
|
||||||
|
hmacRegion := HMACSHA256(hmacDate, []byte(region))
|
||||||
|
hmacService := HMACSHA256(hmacRegion, []byte(service))
|
||||||
|
return HMACSHA256(hmacService, []byte("aws4_request"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSameDay(x, y time.Time) bool {
|
||||||
|
xYear, xMonth, xDay := x.Date()
|
||||||
|
yYear, yMonth, yDay := y.Date()
|
||||||
|
|
||||||
|
if xYear != yYear {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if xMonth != yMonth {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return xDay == yDay
|
||||||
|
}
|
42
api/auth/signer/v4sdk2/signer/internal/v4/const.go
Normal file
42
api/auth/signer/v4sdk2/signer/internal/v4/const.go
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/const.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
// Signature Version 4 (SigV4) Constants
|
||||||
|
const (
|
||||||
|
// EmptyStringSHA256 is the hex encoded sha256 value of an empty string
|
||||||
|
EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
|
||||||
|
|
||||||
|
// UnsignedPayload indicates that the request payload body is unsigned
|
||||||
|
UnsignedPayload = "UNSIGNED-PAYLOAD"
|
||||||
|
|
||||||
|
// AmzAlgorithmKey indicates the signing algorithm
|
||||||
|
AmzAlgorithmKey = "X-Amz-Algorithm"
|
||||||
|
|
||||||
|
// AmzSecurityTokenKey indicates the security token to be used with temporary credentials
|
||||||
|
AmzSecurityTokenKey = "X-Amz-Security-Token"
|
||||||
|
|
||||||
|
// AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z'
|
||||||
|
AmzDateKey = "X-Amz-Date"
|
||||||
|
|
||||||
|
// AmzCredentialKey is the access key ID and credential scope
|
||||||
|
AmzCredentialKey = "X-Amz-Credential"
|
||||||
|
|
||||||
|
// AmzSignedHeadersKey is the set of headers signed for the request
|
||||||
|
AmzSignedHeadersKey = "X-Amz-SignedHeaders"
|
||||||
|
|
||||||
|
// AmzSignatureKey is the query parameter to store the SigV4 signature
|
||||||
|
AmzSignatureKey = "X-Amz-Signature"
|
||||||
|
|
||||||
|
// TimeFormat is the time format to be used in the X-Amz-Date header or query parameter
|
||||||
|
TimeFormat = "20060102T150405Z"
|
||||||
|
|
||||||
|
// ShortTimeFormat is the shorten time format used in the credential scope
|
||||||
|
ShortTimeFormat = "20060102"
|
||||||
|
|
||||||
|
// ContentSHAKey is the SHA256 of request body
|
||||||
|
ContentSHAKey = "X-Amz-Content-Sha256"
|
||||||
|
|
||||||
|
// StreamingEventsPayload indicates that the request payload body is a signed event stream.
|
||||||
|
StreamingEventsPayload = "STREAMING-AWS4-HMAC-SHA256-EVENTS"
|
||||||
|
)
|
90
api/auth/signer/v4sdk2/signer/internal/v4/header_rules.go
Normal file
90
api/auth/signer/v4sdk2/signer/internal/v4/header_rules.go
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/header_rules.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Rules houses a set of Rule needed for validation of a
|
||||||
|
// string value
|
||||||
|
type Rules []Rule
|
||||||
|
|
||||||
|
// Rule interface allows for more flexible rules and just simply
|
||||||
|
// checks whether or not a value adheres to that Rule
|
||||||
|
type Rule interface {
|
||||||
|
IsValid(value string) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid will iterate through all rules and see if any rules
|
||||||
|
// apply to the value and supports nested rules
|
||||||
|
func (r Rules) IsValid(value string) bool {
|
||||||
|
for _, rule := range r {
|
||||||
|
if rule.IsValid(value) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapRule generic Rule for maps
|
||||||
|
type MapRule map[string]struct{}
|
||||||
|
|
||||||
|
// IsValid for the map Rule satisfies whether it exists in the map
|
||||||
|
func (m MapRule) IsValid(value string) bool {
|
||||||
|
_, ok := m[value]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowList is a generic Rule for include listing
|
||||||
|
type AllowList struct {
|
||||||
|
Rule
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid for AllowList checks if the value is within the AllowList
|
||||||
|
func (w AllowList) IsValid(value string) bool {
|
||||||
|
return w.Rule.IsValid(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExcludeList is a generic Rule for exclude listing
|
||||||
|
type ExcludeList struct {
|
||||||
|
Rule
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid for AllowList checks if the value is within the AllowList
|
||||||
|
func (b ExcludeList) IsValid(value string) bool {
|
||||||
|
return !b.Rule.IsValid(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Patterns is a list of strings to match against
|
||||||
|
type Patterns []string
|
||||||
|
|
||||||
|
// IsValid for Patterns checks each pattern and returns if a match has
|
||||||
|
// been found
|
||||||
|
func (p Patterns) IsValid(value string) bool {
|
||||||
|
for _, pattern := range p {
|
||||||
|
if HasPrefixFold(value, pattern) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// InclusiveRules rules allow for rules to depend on one another
|
||||||
|
type InclusiveRules []Rule
|
||||||
|
|
||||||
|
// IsValid will return true if all rules are true
|
||||||
|
func (r InclusiveRules) IsValid(value string) bool {
|
||||||
|
for _, rule := range r {
|
||||||
|
if !rule.IsValid(value) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
|
||||||
|
// under Unicode case-folding.
|
||||||
|
func HasPrefixFold(s, prefix string) bool {
|
||||||
|
return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
|
||||||
|
}
|
88
api/auth/signer/v4sdk2/signer/internal/v4/headers.go
Normal file
88
api/auth/signer/v4sdk2/signer/internal/v4/headers.go
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/header.go
|
||||||
|
// with changes:
|
||||||
|
// * drop User-Agent header from ignored
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
// IgnoredPresignedHeaders is a list of headers that are ignored during signing
|
||||||
|
var IgnoredPresignedHeaders = Rules{
|
||||||
|
ExcludeList{
|
||||||
|
MapRule{
|
||||||
|
"Authorization": struct{}{},
|
||||||
|
"User-Agent": struct{}{},
|
||||||
|
"X-Amzn-Trace-Id": struct{}{},
|
||||||
|
"Expect": struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoredHeaders is a list of headers that are ignored during signing
|
||||||
|
// drop User-Agent header to be compatible with aws sdk java v1.
|
||||||
|
var IgnoredHeaders = Rules{
|
||||||
|
ExcludeList{
|
||||||
|
MapRule{
|
||||||
|
"Authorization": struct{}{},
|
||||||
|
//"User-Agent": struct{}{},
|
||||||
|
"X-Amzn-Trace-Id": struct{}{},
|
||||||
|
"Expect": struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequiredSignedHeaders is a allow list for Build canonical headers.
|
||||||
|
var RequiredSignedHeaders = Rules{
|
||||||
|
AllowList{
|
||||||
|
MapRule{
|
||||||
|
"Cache-Control": struct{}{},
|
||||||
|
"Content-Disposition": struct{}{},
|
||||||
|
"Content-Encoding": struct{}{},
|
||||||
|
"Content-Language": struct{}{},
|
||||||
|
"Content-Md5": struct{}{},
|
||||||
|
"Content-Type": struct{}{},
|
||||||
|
"Expires": struct{}{},
|
||||||
|
"If-Match": struct{}{},
|
||||||
|
"If-Modified-Since": struct{}{},
|
||||||
|
"If-None-Match": struct{}{},
|
||||||
|
"If-Unmodified-Since": struct{}{},
|
||||||
|
"Range": struct{}{},
|
||||||
|
"X-Amz-Acl": struct{}{},
|
||||||
|
"X-Amz-Copy-Source": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-Match": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Range": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||||
|
"X-Amz-Expected-Bucket-Owner": struct{}{},
|
||||||
|
"X-Amz-Grant-Full-control": struct{}{},
|
||||||
|
"X-Amz-Grant-Read": struct{}{},
|
||||||
|
"X-Amz-Grant-Read-Acp": struct{}{},
|
||||||
|
"X-Amz-Grant-Write": struct{}{},
|
||||||
|
"X-Amz-Grant-Write-Acp": struct{}{},
|
||||||
|
"X-Amz-Metadata-Directive": struct{}{},
|
||||||
|
"X-Amz-Mfa": struct{}{},
|
||||||
|
"X-Amz-Request-Payer": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Context": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||||
|
"X-Amz-Storage-Class": struct{}{},
|
||||||
|
"X-Amz-Website-Redirect-Location": struct{}{},
|
||||||
|
"X-Amz-Content-Sha256": struct{}{},
|
||||||
|
"X-Amz-Tagging": struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Patterns{"X-Amz-Object-Lock-"},
|
||||||
|
Patterns{"X-Amz-Meta-"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowedQueryHoisting is a allowed list for Build query headers. The boolean value
|
||||||
|
// represents whether or not it is a pattern.
|
||||||
|
var AllowedQueryHoisting = InclusiveRules{
|
||||||
|
ExcludeList{RequiredSignedHeaders},
|
||||||
|
Patterns{"X-Amz-"},
|
||||||
|
}
|
65
api/auth/signer/v4sdk2/signer/internal/v4/headers_test.go
Normal file
65
api/auth/signer/v4sdk2/signer/internal/v4/headers_test.go
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/header_test.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestAllowedQueryHoisting(t *testing.T) {
|
||||||
|
cases := map[string]struct {
|
||||||
|
Header string
|
||||||
|
ExpectHoist bool
|
||||||
|
}{
|
||||||
|
"object-lock": {
|
||||||
|
Header: "X-Amz-Object-Lock-Mode",
|
||||||
|
ExpectHoist: false,
|
||||||
|
},
|
||||||
|
"s3 metadata": {
|
||||||
|
Header: "X-Amz-Meta-SomeName",
|
||||||
|
ExpectHoist: false,
|
||||||
|
},
|
||||||
|
"another header": {
|
||||||
|
Header: "X-Amz-SomeOtherHeader",
|
||||||
|
ExpectHoist: true,
|
||||||
|
},
|
||||||
|
"non X-AMZ header": {
|
||||||
|
Header: "X-SomeOtherHeader",
|
||||||
|
ExpectHoist: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, c := range cases {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
if e, a := c.ExpectHoist, AllowedQueryHoisting.IsValid(c.Header); e != a {
|
||||||
|
t.Errorf("expect hoist %v, was %v", e, a)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIgnoredHeaders(t *testing.T) {
|
||||||
|
cases := map[string]struct {
|
||||||
|
Header string
|
||||||
|
ExpectIgnored bool
|
||||||
|
}{
|
||||||
|
"expect": {
|
||||||
|
Header: "Expect",
|
||||||
|
ExpectIgnored: true,
|
||||||
|
},
|
||||||
|
"authorization": {
|
||||||
|
Header: "Authorization",
|
||||||
|
ExpectIgnored: true,
|
||||||
|
},
|
||||||
|
"X-AMZ header": {
|
||||||
|
Header: "X-Amz-Content-Sha256",
|
||||||
|
ExpectIgnored: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, c := range cases {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
if e, a := c.ExpectIgnored, IgnoredHeaders.IsValid(c.Header); e == a {
|
||||||
|
t.Errorf("expect ignored %v, was %v", e, a)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
15
api/auth/signer/v4sdk2/signer/internal/v4/hmac.go
Normal file
15
api/auth/signer/v4sdk2/signer/internal/v4/hmac.go
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/hmac.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/sha256"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HMACSHA256 computes a HMAC-SHA256 of data given the provided key.
|
||||||
|
func HMACSHA256(key []byte, data []byte) []byte {
|
||||||
|
hash := hmac.New(sha256.New, key)
|
||||||
|
hash.Write(data)
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
77
api/auth/signer/v4sdk2/signer/internal/v4/host.go
Normal file
77
api/auth/signer/v4sdk2/signer/internal/v4/host.go
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/host.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SanitizeHostForHeader removes default port from host and updates request.Host
|
||||||
|
func SanitizeHostForHeader(r *http.Request) {
|
||||||
|
host := getHost(r)
|
||||||
|
port := portOnly(host)
|
||||||
|
if port != "" && isDefaultPort(r.URL.Scheme, port) {
|
||||||
|
r.Host = stripPort(host)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns host from request
|
||||||
|
func getHost(r *http.Request) string {
|
||||||
|
if r.Host != "" {
|
||||||
|
return r.Host
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.URL.Host
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hostname returns u.Host, without any port number.
|
||||||
|
//
|
||||||
|
// If Host is an IPv6 literal with a port number, Hostname returns the
|
||||||
|
// IPv6 literal without the square brackets. IPv6 literals may include
|
||||||
|
// a zone identifier.
|
||||||
|
//
|
||||||
|
// Copied from the Go 1.8 standard library (net/url)
|
||||||
|
func stripPort(hostport string) string {
|
||||||
|
colon := strings.IndexByte(hostport, ':')
|
||||||
|
if colon == -1 {
|
||||||
|
return hostport
|
||||||
|
}
|
||||||
|
if i := strings.IndexByte(hostport, ']'); i != -1 {
|
||||||
|
return strings.TrimPrefix(hostport[:i], "[")
|
||||||
|
}
|
||||||
|
return hostport[:colon]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Port returns the port part of u.Host, without the leading colon.
|
||||||
|
// If u.Host doesn't contain a port, Port returns an empty string.
|
||||||
|
//
|
||||||
|
// Copied from the Go 1.8 standard library (net/url)
|
||||||
|
func portOnly(hostport string) string {
|
||||||
|
colon := strings.IndexByte(hostport, ':')
|
||||||
|
if colon == -1 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if i := strings.Index(hostport, "]:"); i != -1 {
|
||||||
|
return hostport[i+len("]:"):]
|
||||||
|
}
|
||||||
|
if strings.Contains(hostport, "]") {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return hostport[colon+len(":"):]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if the specified URI is using the standard port
|
||||||
|
// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
|
||||||
|
func isDefaultPort(scheme, port string) bool {
|
||||||
|
if port == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
lowerCaseScheme := strings.ToLower(scheme)
|
||||||
|
if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
15
api/auth/signer/v4sdk2/signer/internal/v4/scope.go
Normal file
15
api/auth/signer/v4sdk2/signer/internal/v4/scope.go
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/scope.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import "strings"
|
||||||
|
|
||||||
|
// BuildCredentialScope builds the Signature Version 4 (SigV4) signing scope
|
||||||
|
func BuildCredentialScope(signingTime SigningTime, region, service string) string {
|
||||||
|
return strings.Join([]string{
|
||||||
|
signingTime.ShortTimeFormat(),
|
||||||
|
region,
|
||||||
|
service,
|
||||||
|
"aws4_request",
|
||||||
|
}, "/")
|
||||||
|
}
|
38
api/auth/signer/v4sdk2/signer/internal/v4/time.go
Normal file
38
api/auth/signer/v4sdk2/signer/internal/v4/time.go
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/time.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing.
|
||||||
|
type SigningTime struct {
|
||||||
|
time.Time
|
||||||
|
timeFormat string
|
||||||
|
shortTimeFormat string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSigningTime creates a new SigningTime given a time.Time
|
||||||
|
func NewSigningTime(t time.Time) SigningTime {
|
||||||
|
return SigningTime{
|
||||||
|
Time: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeFormat provides a time formatted in the X-Amz-Date format.
|
||||||
|
func (m *SigningTime) TimeFormat() string {
|
||||||
|
return m.format(&m.timeFormat, TimeFormat)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShortTimeFormat provides a time formatted of 20060102.
|
||||||
|
func (m *SigningTime) ShortTimeFormat() string {
|
||||||
|
return m.format(&m.shortTimeFormat, ShortTimeFormat)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SigningTime) format(target *string, format string) string {
|
||||||
|
if len(*target) > 0 {
|
||||||
|
return *target
|
||||||
|
}
|
||||||
|
v := m.Time.Format(format)
|
||||||
|
*target = v
|
||||||
|
return v
|
||||||
|
}
|
82
api/auth/signer/v4sdk2/signer/internal/v4/util.go
Normal file
82
api/auth/signer/v4sdk2/signer/internal/v4/util.go
Normal file
|
@ -0,0 +1,82 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/util.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const doubleSpace = " "
|
||||||
|
|
||||||
|
// StripExcessSpaces will rewrite the passed in slice's string values to not
|
||||||
|
// contain multiple side-by-side spaces.
|
||||||
|
func StripExcessSpaces(str string) string {
|
||||||
|
var j, k, l, m, spaces int
|
||||||
|
// Trim trailing spaces
|
||||||
|
for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trim leading spaces
|
||||||
|
for k = 0; k < j && str[k] == ' '; k++ {
|
||||||
|
}
|
||||||
|
str = str[k : j+1]
|
||||||
|
|
||||||
|
// Strip multiple spaces.
|
||||||
|
j = strings.Index(str, doubleSpace)
|
||||||
|
if j < 0 {
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := []byte(str)
|
||||||
|
for k, m, l = j, j, len(buf); k < l; k++ {
|
||||||
|
if buf[k] == ' ' {
|
||||||
|
if spaces == 0 {
|
||||||
|
// First space.
|
||||||
|
buf[m] = buf[k]
|
||||||
|
m++
|
||||||
|
}
|
||||||
|
spaces++
|
||||||
|
} else {
|
||||||
|
// End of multiple spaces.
|
||||||
|
spaces = 0
|
||||||
|
buf[m] = buf[k]
|
||||||
|
m++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(buf[:m])
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetURIPath returns the escaped URI component from the provided URL.
|
||||||
|
func GetURIPath(u *url.URL) string {
|
||||||
|
var uriPath string
|
||||||
|
|
||||||
|
if len(u.Opaque) > 0 {
|
||||||
|
const schemeSep, pathSep, queryStart = "//", "/", "?"
|
||||||
|
|
||||||
|
opaque := u.Opaque
|
||||||
|
// Cut off the query string if present.
|
||||||
|
if idx := strings.Index(opaque, queryStart); idx >= 0 {
|
||||||
|
opaque = opaque[:idx]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cutout the scheme separator if present.
|
||||||
|
if strings.Index(opaque, schemeSep) == 0 {
|
||||||
|
opaque = opaque[len(schemeSep):]
|
||||||
|
}
|
||||||
|
|
||||||
|
// capture URI path starting with first path separator.
|
||||||
|
if idx := strings.Index(opaque, pathSep); idx >= 0 {
|
||||||
|
uriPath = opaque[idx:]
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
uriPath = u.EscapedPath()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(uriPath) == 0 {
|
||||||
|
uriPath = "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
return uriPath
|
||||||
|
}
|
160
api/auth/signer/v4sdk2/signer/internal/v4/util_test.go
Normal file
160
api/auth/signer/v4sdk2/signer/internal/v4/util_test.go
Normal file
|
@ -0,0 +1,160 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/util_test.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func lazyURLParse(v string) func() (*url.URL, error) {
|
||||||
|
return func() (*url.URL, error) {
|
||||||
|
return url.Parse(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetURIPath(t *testing.T) {
|
||||||
|
cases := map[string]struct {
|
||||||
|
getURL func() (*url.URL, error)
|
||||||
|
expect string
|
||||||
|
}{
|
||||||
|
// Cases
|
||||||
|
"with scheme": {
|
||||||
|
getURL: lazyURLParse("https://localhost:9000"),
|
||||||
|
expect: "/",
|
||||||
|
},
|
||||||
|
"no port, with scheme": {
|
||||||
|
getURL: lazyURLParse("https://localhost"),
|
||||||
|
expect: "/",
|
||||||
|
},
|
||||||
|
"without scheme": {
|
||||||
|
getURL: lazyURLParse("localhost:9000"),
|
||||||
|
expect: "/",
|
||||||
|
},
|
||||||
|
"without scheme, with path": {
|
||||||
|
getURL: lazyURLParse("localhost:9000/abc123"),
|
||||||
|
expect: "/abc123",
|
||||||
|
},
|
||||||
|
"without scheme, with separator": {
|
||||||
|
getURL: lazyURLParse("//localhost:9000"),
|
||||||
|
expect: "/",
|
||||||
|
},
|
||||||
|
"no port, without scheme, with separator": {
|
||||||
|
getURL: lazyURLParse("//localhost"),
|
||||||
|
expect: "/",
|
||||||
|
},
|
||||||
|
"without scheme, with separator, with path": {
|
||||||
|
getURL: lazyURLParse("//localhost:9000/abc123"),
|
||||||
|
expect: "/abc123",
|
||||||
|
},
|
||||||
|
"no port, without scheme, with separator, with path": {
|
||||||
|
getURL: lazyURLParse("//localhost/abc123"),
|
||||||
|
expect: "/abc123",
|
||||||
|
},
|
||||||
|
"opaque with query string": {
|
||||||
|
getURL: lazyURLParse("localhost:9000/abc123?efg=456"),
|
||||||
|
expect: "/abc123",
|
||||||
|
},
|
||||||
|
"failing test": {
|
||||||
|
getURL: func() (*url.URL, error) {
|
||||||
|
endpoint := "https://service.region.amazonaws.com"
|
||||||
|
req, _ := http.NewRequest("POST", endpoint, nil)
|
||||||
|
u := req.URL
|
||||||
|
|
||||||
|
u.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()"
|
||||||
|
|
||||||
|
query := u.Query()
|
||||||
|
query.Set("some-query-key", "value")
|
||||||
|
u.RawQuery = query.Encode()
|
||||||
|
|
||||||
|
return u, nil
|
||||||
|
},
|
||||||
|
expect: "/bucket/key-._~,!@#$%^&*()",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, c := range cases {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
u, err := c.getURL()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get URL, %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
actual := GetURIPath(u)
|
||||||
|
if e, a := c.expect, actual; e != a {
|
||||||
|
t.Errorf("expect %v path, got %v", e, a)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStripExcessHeaders(t *testing.T) {
|
||||||
|
vals := []string{
|
||||||
|
"",
|
||||||
|
"123",
|
||||||
|
"1 2 3",
|
||||||
|
"1 2 3 ",
|
||||||
|
" 1 2 3",
|
||||||
|
"1 2 3",
|
||||||
|
"1 23",
|
||||||
|
"1 2 3",
|
||||||
|
"1 2 ",
|
||||||
|
" 1 2 ",
|
||||||
|
"12 3",
|
||||||
|
"12 3 1",
|
||||||
|
"12 3 1",
|
||||||
|
"12 3 1abc123",
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := []string{
|
||||||
|
"",
|
||||||
|
"123",
|
||||||
|
"1 2 3",
|
||||||
|
"1 2 3",
|
||||||
|
"1 2 3",
|
||||||
|
"1 2 3",
|
||||||
|
"1 23",
|
||||||
|
"1 2 3",
|
||||||
|
"1 2",
|
||||||
|
"1 2",
|
||||||
|
"12 3",
|
||||||
|
"12 3 1",
|
||||||
|
"12 3 1",
|
||||||
|
"12 3 1abc123",
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(vals); i++ {
|
||||||
|
r := StripExcessSpaces(vals[i])
|
||||||
|
if e, a := expected[i], r; e != a {
|
||||||
|
t.Errorf("%d, expect %v, got %v", i, e, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var stripExcessSpaceCases = []string{
|
||||||
|
`AWS4-HMAC-SHA256 Credential=AKIDFAKEIDFAKEID/20160628/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=1234567890abcdef1234567890abcdef1234567890abcdef`,
|
||||||
|
`123 321 123 321`,
|
||||||
|
` 123 321 123 321 `,
|
||||||
|
` 123 321 123 321 `,
|
||||||
|
"123",
|
||||||
|
"1 2 3",
|
||||||
|
" 1 2 3",
|
||||||
|
"1 2 3",
|
||||||
|
"1 23",
|
||||||
|
"1 2 3",
|
||||||
|
"1 2 ",
|
||||||
|
" 1 2 ",
|
||||||
|
"12 3",
|
||||||
|
"12 3 1",
|
||||||
|
"12 3 1",
|
||||||
|
"12 3 1abc123",
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkStripExcessSpaces(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
for _, v := range stripExcessSpaceCases {
|
||||||
|
StripExcessSpaces(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
89
api/auth/signer/v4sdk2/signer/v4/stream.go
Normal file
89
api/auth/signer/v4sdk2/signer/v4/stream.go
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/stream.go
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/internal/v4"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EventStreamSigner is an AWS EventStream protocol signer.
|
||||||
|
type EventStreamSigner interface {
|
||||||
|
GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StreamSignerOptions is the configuration options for StreamSigner.
|
||||||
|
type StreamSignerOptions struct{}
|
||||||
|
|
||||||
|
// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads.
|
||||||
|
type StreamSigner struct {
|
||||||
|
options StreamSignerOptions
|
||||||
|
|
||||||
|
credentials aws.Credentials
|
||||||
|
service string
|
||||||
|
region string
|
||||||
|
|
||||||
|
prevSignature []byte
|
||||||
|
|
||||||
|
signingKeyDeriver *v4Internal.SigningKeyDeriver
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStreamSigner returns a new AWS EventStream protocol signer.
|
||||||
|
func NewStreamSigner(credentials aws.Credentials, service, region string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner {
|
||||||
|
o := StreamSignerOptions{}
|
||||||
|
|
||||||
|
for _, fn := range optFns {
|
||||||
|
fn(&o)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &StreamSigner{
|
||||||
|
options: o,
|
||||||
|
credentials: credentials,
|
||||||
|
service: service,
|
||||||
|
region: region,
|
||||||
|
signingKeyDeriver: v4Internal.NewSigningKeyDeriver(),
|
||||||
|
prevSignature: seedSignature,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSignature signs the provided header and payload bytes.
|
||||||
|
func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) {
|
||||||
|
options := s.options
|
||||||
|
|
||||||
|
for _, fn := range optFns {
|
||||||
|
fn(&options)
|
||||||
|
}
|
||||||
|
|
||||||
|
prevSignature := s.prevSignature
|
||||||
|
|
||||||
|
st := v4Internal.NewSigningTime(signingTime)
|
||||||
|
|
||||||
|
sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st)
|
||||||
|
|
||||||
|
scope := v4Internal.BuildCredentialScope(st, s.region, s.service)
|
||||||
|
|
||||||
|
stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st)
|
||||||
|
|
||||||
|
signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign))
|
||||||
|
s.prevSignature = signature
|
||||||
|
|
||||||
|
return signature, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
|
||||||
|
hash := sha256.New()
|
||||||
|
return strings.Join([]string{
|
||||||
|
"AWS4-HMAC-SHA256-PAYLOAD",
|
||||||
|
signingTime.TimeFormat(),
|
||||||
|
credentialScope,
|
||||||
|
hex.EncodeToString(previousSignature),
|
||||||
|
hex.EncodeToString(makeHash(hash, headers)),
|
||||||
|
hex.EncodeToString(makeHash(hash, payload)),
|
||||||
|
}, "\n")
|
||||||
|
}
|
582
api/auth/signer/v4sdk2/signer/v4/v4.go
Normal file
582
api/auth/signer/v4sdk2/signer/v4/v4.go
Normal file
|
@ -0,0 +1,582 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/v4.go
|
||||||
|
// with changes:
|
||||||
|
// * using different headers for sign/presign
|
||||||
|
// * don't duplicate content-length as signed header
|
||||||
|
// * use copy of smithy-go encoding/httpbinding package
|
||||||
|
// * use zap.Logger instead of smithy-go/logging
|
||||||
|
|
||||||
|
// Package v4 implements the AWS signature version 4 algorithm (commonly known
|
||||||
|
// as SigV4).
|
||||||
|
//
|
||||||
|
// For more information about SigV4, see [Signing AWS API requests] in the IAM
|
||||||
|
// user guide.
|
||||||
|
//
|
||||||
|
// While this implementation CAN work in an external context, it is developed
|
||||||
|
// primarily for SDK use and you may encounter fringe behaviors around header
|
||||||
|
// canonicalization.
|
||||||
|
//
|
||||||
|
// # Pre-escaping a request URI
|
||||||
|
//
|
||||||
|
// AWS v4 signature validation requires that the canonical string's URI path
|
||||||
|
// component must be the escaped form of the HTTP request's path.
|
||||||
|
//
|
||||||
|
// The Go HTTP client will perform escaping automatically on the HTTP request.
|
||||||
|
// This may cause signature validation errors because the request differs from
|
||||||
|
// the URI path or query from which the signature was generated.
|
||||||
|
//
|
||||||
|
// Because of this, we recommend that you explicitly escape the request when
|
||||||
|
// using this signer outside of the SDK to prevent possible signature mismatch.
|
||||||
|
// This can be done by setting URL.Opaque on the request. The signer will
|
||||||
|
// prefer that value, falling back to the return of URL.EscapedPath if unset.
|
||||||
|
//
|
||||||
|
// When setting URL.Opaque you must do so in the form of:
|
||||||
|
//
|
||||||
|
// "//<hostname>/<path>"
|
||||||
|
//
|
||||||
|
// // e.g.
|
||||||
|
// "//example.com/some/path"
|
||||||
|
//
|
||||||
|
// The leading "//" and hostname are required or the escaping will not work
|
||||||
|
// correctly.
|
||||||
|
//
|
||||||
|
// The TestStandaloneSign unit test provides a complete example of using the
|
||||||
|
// signer outside of the SDK and pre-escaping the URI path.
|
||||||
|
//
|
||||||
|
// [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"net/http"
|
||||||
|
"net/textproto"
|
||||||
|
"net/url"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/smithy/encoding/httpbinding"
|
||||||
|
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/internal/v4"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
signingAlgorithm = "AWS4-HMAC-SHA256"
|
||||||
|
authorizationHeader = "Authorization"
|
||||||
|
|
||||||
|
// Version of signing v4
|
||||||
|
Version = "SigV4"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests
|
||||||
|
type HTTPSigner interface {
|
||||||
|
SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*SignerOptions)) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type keyDerivator interface {
|
||||||
|
DeriveKey(credential aws.Credentials, service, region string, signingTime v4Internal.SigningTime) []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignerOptions is the SigV4 Signer options.
|
||||||
|
type SignerOptions struct {
|
||||||
|
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
|
||||||
|
// request header to the request's query string. This is most commonly used
|
||||||
|
// with pre-signed requests preventing headers from being added to the
|
||||||
|
// request's query string.
|
||||||
|
DisableHeaderHoisting bool
|
||||||
|
|
||||||
|
// Disables the automatic escaping of the URI path of the request for the
|
||||||
|
// siganture's canonical string's path. For services that do not need additional
|
||||||
|
// escaping then use this to disable the signer escaping the path.
|
||||||
|
//
|
||||||
|
// S3 is an example of a service that does not need additional escaping.
|
||||||
|
//
|
||||||
|
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
||||||
|
DisableURIPathEscaping bool
|
||||||
|
|
||||||
|
// The logger to send log messages to.
|
||||||
|
Logger *zap.Logger
|
||||||
|
|
||||||
|
// Enable logging of signed requests.
|
||||||
|
// This will enable logging of the canonical request, the string to sign, and for presigning the subsequent
|
||||||
|
// presigned URL.
|
||||||
|
LogSigning bool
|
||||||
|
|
||||||
|
// Disables setting the session token on the request as part of signing
|
||||||
|
// through X-Amz-Security-Token. This is needed for variations of v4 that
|
||||||
|
// present the token elsewhere.
|
||||||
|
DisableSessionToken bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signer applies AWS v4 signing to given request. Use this to sign requests
|
||||||
|
// that need to be signed with AWS V4 Signatures.
|
||||||
|
type Signer struct {
|
||||||
|
options SignerOptions
|
||||||
|
keyDerivator keyDerivator
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSigner returns a new SigV4 Signer
|
||||||
|
func NewSigner(optFns ...func(signer *SignerOptions)) *Signer {
|
||||||
|
options := SignerOptions{}
|
||||||
|
|
||||||
|
for _, fn := range optFns {
|
||||||
|
fn(&options)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Signer{options: options, keyDerivator: v4Internal.NewSigningKeyDeriver()}
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpSigner struct {
|
||||||
|
Request *http.Request
|
||||||
|
ServiceName string
|
||||||
|
Region string
|
||||||
|
Time v4Internal.SigningTime
|
||||||
|
Credentials aws.Credentials
|
||||||
|
KeyDerivator keyDerivator
|
||||||
|
IsPreSign bool
|
||||||
|
|
||||||
|
PayloadHash string
|
||||||
|
|
||||||
|
DisableHeaderHoisting bool
|
||||||
|
DisableURIPathEscaping bool
|
||||||
|
DisableSessionToken bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpSigner) Build() (signedRequest, error) {
|
||||||
|
req := s.Request
|
||||||
|
|
||||||
|
query := req.URL.Query()
|
||||||
|
headers := req.Header
|
||||||
|
|
||||||
|
s.setRequiredSigningFields(headers, query)
|
||||||
|
|
||||||
|
// Sort Each Query Key's Values
|
||||||
|
for key := range query {
|
||||||
|
sort.Strings(query[key])
|
||||||
|
}
|
||||||
|
|
||||||
|
v4Internal.SanitizeHostForHeader(req)
|
||||||
|
|
||||||
|
credentialScope := s.buildCredentialScope()
|
||||||
|
credentialStr := s.Credentials.AccessKeyID + "/" + credentialScope
|
||||||
|
if s.IsPreSign {
|
||||||
|
query.Set(v4Internal.AmzCredentialKey, credentialStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
unsignedHeaders := headers
|
||||||
|
if s.IsPreSign && !s.DisableHeaderHoisting {
|
||||||
|
var urlValues url.Values
|
||||||
|
urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, headers)
|
||||||
|
for k := range urlValues {
|
||||||
|
query[k] = urlValues[k]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
host := req.URL.Host
|
||||||
|
if len(req.Host) > 0 {
|
||||||
|
host = req.Host
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
signedHeaders http.Header
|
||||||
|
signedHeadersStr string
|
||||||
|
canonicalHeaderStr string
|
||||||
|
)
|
||||||
|
|
||||||
|
if s.IsPreSign {
|
||||||
|
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredPresignedHeaders, unsignedHeaders, s.Request.ContentLength)
|
||||||
|
} else {
|
||||||
|
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.IsPreSign {
|
||||||
|
query.Set(v4Internal.AmzSignedHeadersKey, signedHeadersStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
var rawQuery strings.Builder
|
||||||
|
rawQuery.WriteString(strings.Replace(query.Encode(), "+", "%20", -1))
|
||||||
|
|
||||||
|
canonicalURI := v4Internal.GetURIPath(req.URL)
|
||||||
|
if !s.DisableURIPathEscaping {
|
||||||
|
canonicalURI = httpbinding.EscapePath(canonicalURI, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
canonicalString := s.buildCanonicalString(
|
||||||
|
req.Method,
|
||||||
|
canonicalURI,
|
||||||
|
rawQuery.String(),
|
||||||
|
signedHeadersStr,
|
||||||
|
canonicalHeaderStr,
|
||||||
|
)
|
||||||
|
|
||||||
|
strToSign := s.buildStringToSign(credentialScope, canonicalString)
|
||||||
|
signingSignature, err := s.buildSignature(strToSign)
|
||||||
|
if err != nil {
|
||||||
|
return signedRequest{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.IsPreSign {
|
||||||
|
rawQuery.WriteString("&X-Amz-Signature=")
|
||||||
|
rawQuery.WriteString(signingSignature)
|
||||||
|
} else {
|
||||||
|
headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature))
|
||||||
|
}
|
||||||
|
|
||||||
|
req.URL.RawQuery = rawQuery.String()
|
||||||
|
|
||||||
|
return signedRequest{
|
||||||
|
Request: req,
|
||||||
|
SignedHeaders: signedHeaders,
|
||||||
|
CanonicalString: canonicalString,
|
||||||
|
StringToSign: strToSign,
|
||||||
|
PreSigned: s.IsPreSign,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string {
|
||||||
|
const credential = "Credential="
|
||||||
|
const signedHeaders = "SignedHeaders="
|
||||||
|
const signature = "Signature="
|
||||||
|
const commaSpace = ", "
|
||||||
|
|
||||||
|
var parts strings.Builder
|
||||||
|
parts.Grow(len(signingAlgorithm) + 1 +
|
||||||
|
len(credential) + len(credentialStr) + 2 +
|
||||||
|
len(signedHeaders) + len(signedHeadersStr) + 2 +
|
||||||
|
len(signature) + len(signingSignature),
|
||||||
|
)
|
||||||
|
parts.WriteString(signingAlgorithm)
|
||||||
|
parts.WriteRune(' ')
|
||||||
|
parts.WriteString(credential)
|
||||||
|
parts.WriteString(credentialStr)
|
||||||
|
parts.WriteString(commaSpace)
|
||||||
|
parts.WriteString(signedHeaders)
|
||||||
|
parts.WriteString(signedHeadersStr)
|
||||||
|
parts.WriteString(commaSpace)
|
||||||
|
parts.WriteString(signature)
|
||||||
|
parts.WriteString(signingSignature)
|
||||||
|
return parts.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignHTTP signs AWS v4 requests with the provided payload hash, service name, region the
|
||||||
|
// request is made to, and time the request is signed at. The signTime allows
|
||||||
|
// you to specify that a request is signed for the future, and cannot be
|
||||||
|
// used until then.
|
||||||
|
//
|
||||||
|
// The payloadHash is the hex encoded SHA-256 hash of the request payload, and
|
||||||
|
// must be provided. Even if the request has no payload (aka body). If the
|
||||||
|
// request has no payload you should use the hex encoded SHA-256 of an empty
|
||||||
|
// string as the payloadHash value.
|
||||||
|
//
|
||||||
|
// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||||
|
//
|
||||||
|
// Some services such as Amazon S3 accept alternative values for the payload
|
||||||
|
// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
|
||||||
|
// included in the request signature.
|
||||||
|
//
|
||||||
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
|
||||||
|
//
|
||||||
|
// Sign differs from Presign in that it will sign the request using HTTP
|
||||||
|
// header values. This type of signing is intended for http.Request values that
|
||||||
|
// will not be shared, or are shared in a way the header values on the request
|
||||||
|
// will not be lost.
|
||||||
|
//
|
||||||
|
// The passed in request will be modified in place.
|
||||||
|
func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(options *SignerOptions)) error {
|
||||||
|
options := s.options
|
||||||
|
|
||||||
|
for _, fn := range optFns {
|
||||||
|
fn(&options)
|
||||||
|
}
|
||||||
|
|
||||||
|
signer := &httpSigner{
|
||||||
|
Request: r,
|
||||||
|
PayloadHash: payloadHash,
|
||||||
|
ServiceName: service,
|
||||||
|
Region: region,
|
||||||
|
Credentials: credentials,
|
||||||
|
Time: v4Internal.NewSigningTime(signingTime.UTC()),
|
||||||
|
DisableHeaderHoisting: options.DisableHeaderHoisting,
|
||||||
|
DisableURIPathEscaping: options.DisableURIPathEscaping,
|
||||||
|
DisableSessionToken: options.DisableSessionToken,
|
||||||
|
KeyDerivator: s.keyDerivator,
|
||||||
|
}
|
||||||
|
|
||||||
|
signedRequest, err := signer.Build()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
logSigningInfo(ctx, options, &signedRequest, false)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PresignHTTP signs AWS v4 requests with the payload hash, service name, region
|
||||||
|
// the request is made to, and time the request is signed at. The signTime
|
||||||
|
// allows you to specify that a request is signed for the future, and cannot
|
||||||
|
// be used until then.
|
||||||
|
//
|
||||||
|
// Returns the signed URL and the map of HTTP headers that were included in the
|
||||||
|
// signature or an error if signing the request failed. For presigned requests
|
||||||
|
// these headers and their values must be included on the HTTP request when it
|
||||||
|
// is made. This is helpful to know what header values need to be shared with
|
||||||
|
// the party the presigned request will be distributed to.
|
||||||
|
//
|
||||||
|
// The payloadHash is the hex encoded SHA-256 hash of the request payload, and
|
||||||
|
// must be provided. Even if the request has no payload (aka body). If the
|
||||||
|
// request has no payload you should use the hex encoded SHA-256 of an empty
|
||||||
|
// string as the payloadHash value.
|
||||||
|
//
|
||||||
|
// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||||
|
//
|
||||||
|
// Some services such as Amazon S3 accept alternative values for the payload
|
||||||
|
// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
|
||||||
|
// included in the request signature.
|
||||||
|
//
|
||||||
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
|
||||||
|
//
|
||||||
|
// PresignHTTP differs from SignHTTP in that it will sign the request using
|
||||||
|
// query string instead of header values. This allows you to share the
|
||||||
|
// Presigned Request's URL with third parties, or distribute it throughout your
|
||||||
|
// system with minimal dependencies.
|
||||||
|
//
|
||||||
|
// PresignHTTP will not set the expires time of the presigned request
|
||||||
|
// automatically. To specify the expire duration for a request add the
|
||||||
|
// "X-Amz-Expires" query parameter on the request with the value as the
|
||||||
|
// duration in seconds the presigned URL should be considered valid for. This
|
||||||
|
// parameter is not used by all AWS services, and is most notable used by
|
||||||
|
// Amazon S3 APIs.
|
||||||
|
//
|
||||||
|
// expires := 20 * time.Minute
|
||||||
|
// query := req.URL.Query()
|
||||||
|
// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10))
|
||||||
|
// req.URL.RawQuery = query.Encode()
|
||||||
|
//
|
||||||
|
// This method does not modify the provided request.
|
||||||
|
func (s *Signer) PresignHTTP(
|
||||||
|
ctx context.Context, credentials aws.Credentials, r *http.Request,
|
||||||
|
payloadHash string, service string, region string, signingTime time.Time,
|
||||||
|
optFns ...func(*SignerOptions),
|
||||||
|
) (signedURI string, signedHeaders http.Header, err error) {
|
||||||
|
options := s.options
|
||||||
|
|
||||||
|
for _, fn := range optFns {
|
||||||
|
fn(&options)
|
||||||
|
}
|
||||||
|
|
||||||
|
signer := &httpSigner{
|
||||||
|
Request: r.Clone(r.Context()),
|
||||||
|
PayloadHash: payloadHash,
|
||||||
|
ServiceName: service,
|
||||||
|
Region: region,
|
||||||
|
Credentials: credentials,
|
||||||
|
Time: v4Internal.NewSigningTime(signingTime.UTC()),
|
||||||
|
IsPreSign: true,
|
||||||
|
DisableHeaderHoisting: options.DisableHeaderHoisting,
|
||||||
|
DisableURIPathEscaping: options.DisableURIPathEscaping,
|
||||||
|
DisableSessionToken: options.DisableSessionToken,
|
||||||
|
KeyDerivator: s.keyDerivator,
|
||||||
|
}
|
||||||
|
|
||||||
|
signedRequest, err := signer.Build()
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
logSigningInfo(ctx, options, &signedRequest, true)
|
||||||
|
|
||||||
|
signedHeaders = make(http.Header)
|
||||||
|
|
||||||
|
// For the signed headers we canonicalize the header keys in the returned map.
|
||||||
|
// This avoids situations where can standard library double headers like host header. For example the standard
|
||||||
|
// library will set the Host header, even if it is present in lower-case form.
|
||||||
|
for k, v := range signedRequest.SignedHeaders {
|
||||||
|
key := textproto.CanonicalMIMEHeaderKey(k)
|
||||||
|
signedHeaders[key] = append(signedHeaders[key], v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return signedRequest.Request.URL.String(), signedHeaders, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpSigner) buildCredentialScope() string {
|
||||||
|
return v4Internal.BuildCredentialScope(s.Time, s.Region, s.ServiceName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) {
|
||||||
|
query := url.Values{}
|
||||||
|
unsignedHeaders := http.Header{}
|
||||||
|
|
||||||
|
// A list of headers to be converted to lower case to mitigate a limitation from S3
|
||||||
|
lowerCaseHeaders := map[string]string{
|
||||||
|
"X-Amz-Expected-Bucket-Owner": "x-amz-expected-bucket-owner", // see #2508
|
||||||
|
"X-Amz-Request-Payer": "x-amz-request-payer", // see #2764
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, h := range header {
|
||||||
|
if newKey, ok := lowerCaseHeaders[k]; ok {
|
||||||
|
k = newKey
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.IsValid(k) {
|
||||||
|
query[k] = h
|
||||||
|
} else {
|
||||||
|
unsignedHeaders[k] = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return query, unsignedHeaders
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) {
|
||||||
|
signed = make(http.Header)
|
||||||
|
|
||||||
|
var headers []string
|
||||||
|
const hostHeader = "host"
|
||||||
|
headers = append(headers, hostHeader)
|
||||||
|
signed[hostHeader] = append(signed[hostHeader], host)
|
||||||
|
|
||||||
|
//const contentLengthHeader = "content-length"
|
||||||
|
//if length > 0 {
|
||||||
|
// headers = append(headers, contentLengthHeader)
|
||||||
|
// signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10))
|
||||||
|
//}
|
||||||
|
|
||||||
|
for k, v := range header {
|
||||||
|
if !rule.IsValid(k) {
|
||||||
|
continue // ignored header
|
||||||
|
}
|
||||||
|
//if strings.EqualFold(k, contentLengthHeader) {
|
||||||
|
// // prevent signing already handled content-length header.
|
||||||
|
// continue
|
||||||
|
//}
|
||||||
|
|
||||||
|
lowerCaseKey := strings.ToLower(k)
|
||||||
|
if _, ok := signed[lowerCaseKey]; ok {
|
||||||
|
// include additional values
|
||||||
|
signed[lowerCaseKey] = append(signed[lowerCaseKey], v...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
headers = append(headers, lowerCaseKey)
|
||||||
|
signed[lowerCaseKey] = v
|
||||||
|
}
|
||||||
|
sort.Strings(headers)
|
||||||
|
|
||||||
|
signedHeaders = strings.Join(headers, ";")
|
||||||
|
|
||||||
|
var canonicalHeaders strings.Builder
|
||||||
|
n := len(headers)
|
||||||
|
const colon = ':'
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
if headers[i] == hostHeader {
|
||||||
|
canonicalHeaders.WriteString(hostHeader)
|
||||||
|
canonicalHeaders.WriteRune(colon)
|
||||||
|
canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host))
|
||||||
|
} else {
|
||||||
|
canonicalHeaders.WriteString(headers[i])
|
||||||
|
canonicalHeaders.WriteRune(colon)
|
||||||
|
// Trim out leading, trailing, and dedup inner spaces from signed header values.
|
||||||
|
values := signed[headers[i]]
|
||||||
|
for j, v := range values {
|
||||||
|
cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v))
|
||||||
|
canonicalHeaders.WriteString(cleanedValue)
|
||||||
|
if j < len(values)-1 {
|
||||||
|
canonicalHeaders.WriteRune(',')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
canonicalHeaders.WriteRune('\n')
|
||||||
|
}
|
||||||
|
canonicalHeadersStr = canonicalHeaders.String()
|
||||||
|
|
||||||
|
return signed, signedHeaders, canonicalHeadersStr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string {
|
||||||
|
return strings.Join([]string{
|
||||||
|
method,
|
||||||
|
uri,
|
||||||
|
query,
|
||||||
|
canonicalHeaders,
|
||||||
|
signedHeaders,
|
||||||
|
s.PayloadHash,
|
||||||
|
}, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string {
|
||||||
|
return strings.Join([]string{
|
||||||
|
signingAlgorithm,
|
||||||
|
s.Time.TimeFormat(),
|
||||||
|
credentialScope,
|
||||||
|
hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))),
|
||||||
|
}, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeHash(hash hash.Hash, b []byte) []byte {
|
||||||
|
hash.Reset()
|
||||||
|
hash.Write(b)
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpSigner) buildSignature(strToSign string) (string, error) {
|
||||||
|
key := s.KeyDerivator.DeriveKey(s.Credentials, s.ServiceName, s.Region, s.Time)
|
||||||
|
return hex.EncodeToString(v4Internal.HMACSHA256(key, []byte(strToSign))), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) {
|
||||||
|
amzDate := s.Time.TimeFormat()
|
||||||
|
|
||||||
|
if s.IsPreSign {
|
||||||
|
query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm)
|
||||||
|
sessionToken := s.Credentials.SessionToken
|
||||||
|
if !s.DisableSessionToken && len(sessionToken) > 0 {
|
||||||
|
query.Set("X-Amz-Security-Token", sessionToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
query.Set(v4Internal.AmzDateKey, amzDate)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate)
|
||||||
|
|
||||||
|
if !s.DisableSessionToken && len(s.Credentials.SessionToken) > 0 {
|
||||||
|
headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func logSigningInfo(_ context.Context, options SignerOptions, request *signedRequest, isPresign bool) {
|
||||||
|
if !options.LogSigning {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
signedURLMsg := ""
|
||||||
|
if isPresign {
|
||||||
|
signedURLMsg = fmt.Sprintf(logSignedURLMsg, request.Request.URL.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.Logger != nil {
|
||||||
|
options.Logger.Debug(fmt.Sprintf(logSignInfoMsg, request.CanonicalString, request.StringToSign, signedURLMsg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type signedRequest struct {
|
||||||
|
Request *http.Request
|
||||||
|
SignedHeaders http.Header
|
||||||
|
CanonicalString string
|
||||||
|
StringToSign string
|
||||||
|
PreSigned bool
|
||||||
|
}
|
||||||
|
|
||||||
|
const logSignInfoMsg = `Request Signature:
|
||||||
|
---[ CANONICAL STRING ]-----------------------------
|
||||||
|
%s
|
||||||
|
---[ STRING TO SIGN ]--------------------------------
|
||||||
|
%s%s
|
||||||
|
-----------------------------------------------------`
|
||||||
|
const logSignedURLMsg = `
|
||||||
|
---[ SIGNED URL ]------------------------------------
|
||||||
|
%s`
|
370
api/auth/signer/v4sdk2/signer/v4/v4_test.go
Normal file
370
api/auth/signer/v4sdk2/signer/v4/v4_test.go
Normal file
|
@ -0,0 +1,370 @@
|
||||||
|
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/v4_test.go
|
||||||
|
// with changes:
|
||||||
|
// * don't duplicate content-length as signed header
|
||||||
|
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/internal/v4"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
|
)
|
||||||
|
|
||||||
|
var testCredentials = aws.Credentials{AccessKeyID: "AKID", SecretAccessKey: "SECRET", SessionToken: "SESSION"}
|
||||||
|
|
||||||
|
func buildRequest(serviceName, region, body string) (*http.Request, string) {
|
||||||
|
reader := strings.NewReader(body)
|
||||||
|
return buildRequestWithBodyReader(serviceName, region, reader)
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildRequestWithBodyReader(serviceName, region string, body io.Reader) (*http.Request, string) {
|
||||||
|
var bodyLen int
|
||||||
|
|
||||||
|
type lenner interface {
|
||||||
|
Len() int
|
||||||
|
}
|
||||||
|
if lr, ok := body.(lenner); ok {
|
||||||
|
bodyLen = lr.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
|
||||||
|
req, _ := http.NewRequest("POST", endpoint, body)
|
||||||
|
req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()"
|
||||||
|
req.Header.Set("X-Amz-Target", "prefix.Operation")
|
||||||
|
req.Header.Set("Content-Type", "application/x-amz-json-1.0")
|
||||||
|
|
||||||
|
if bodyLen > 0 {
|
||||||
|
req.ContentLength = int64(bodyLen)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
|
||||||
|
req.Header.Add("X-Amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
|
||||||
|
req.Header.Add("X-amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
|
||||||
|
|
||||||
|
h := sha256.New()
|
||||||
|
_, _ = io.Copy(h, body)
|
||||||
|
payloadHash := hex.EncodeToString(h.Sum(nil))
|
||||||
|
|
||||||
|
return req, payloadHash
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPresignRequest(t *testing.T) {
|
||||||
|
req, body := buildRequest("dynamodb", "us-east-1", "{}")
|
||||||
|
req.Header.Set("Content-Length", "2")
|
||||||
|
|
||||||
|
query := req.URL.Query()
|
||||||
|
query.Set("X-Amz-Expires", "300")
|
||||||
|
req.URL.RawQuery = query.Encode()
|
||||||
|
|
||||||
|
signer := NewSigner()
|
||||||
|
signed, headers, err := signer.PresignHTTP(context.Background(), testCredentials, req, body, "dynamodb", "us-east-1", time.Unix(0, 0))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedDate := "19700101T000000Z"
|
||||||
|
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
|
||||||
|
expectedSig := "122f0b9e091e4ba84286097e2b3404a1f1f4c4aad479adda95b7dff0ccbe5581"
|
||||||
|
expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request"
|
||||||
|
expectedTarget := "prefix.Operation"
|
||||||
|
|
||||||
|
q, err := url.ParseQuery(signed[strings.Index(signed, "?"):])
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if e, a := expectedSig, q.Get("X-Amz-Signature"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := expectedCred, q.Get("X-Amz-Credential"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
|
||||||
|
t.Errorf("expect %v to be empty", a)
|
||||||
|
}
|
||||||
|
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, h := range strings.Split(expectedHeaders, ";") {
|
||||||
|
v := headers.Get(h)
|
||||||
|
if len(v) == 0 {
|
||||||
|
t.Errorf("expect %v, to be present in header map", h)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPresignBodyWithArrayRequest(t *testing.T) {
|
||||||
|
req, body := buildRequest("dynamodb", "us-east-1", "{}")
|
||||||
|
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
|
||||||
|
req.Header.Set("Content-Length", "2")
|
||||||
|
|
||||||
|
query := req.URL.Query()
|
||||||
|
query.Set("X-Amz-Expires", "300")
|
||||||
|
req.URL.RawQuery = query.Encode()
|
||||||
|
|
||||||
|
signer := NewSigner()
|
||||||
|
signed, headers, err := signer.PresignHTTP(context.Background(), testCredentials, req, body, "dynamodb", "us-east-1", time.Unix(0, 0))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
q, err := url.ParseQuery(signed[strings.Index(signed, "?"):])
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedDate := "19700101T000000Z"
|
||||||
|
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
|
||||||
|
expectedSig := "e3ac55addee8711b76c6d608d762cff285fe8b627a057f8b5ec9268cf82c08b1"
|
||||||
|
expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request"
|
||||||
|
expectedTarget := "prefix.Operation"
|
||||||
|
|
||||||
|
if e, a := expectedSig, q.Get("X-Amz-Signature"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := expectedCred, q.Get("X-Amz-Credential"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
|
||||||
|
t.Errorf("expect %v to be empty, was not", a)
|
||||||
|
}
|
||||||
|
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, h := range strings.Split(expectedHeaders, ";") {
|
||||||
|
v := headers.Get(h)
|
||||||
|
if len(v) == 0 {
|
||||||
|
t.Errorf("expect %v, to be present in header map", h)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSignRequest(t *testing.T) {
|
||||||
|
req, body := buildRequest("dynamodb", "us-east-1", "{}")
|
||||||
|
req.Header.Set("Content-Length", "2")
|
||||||
|
signer := NewSigner()
|
||||||
|
err := signer.SignHTTP(context.Background(), testCredentials, req, body, "dynamodb", "us-east-1", time.Unix(0, 0))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedDate := "19700101T000000Z"
|
||||||
|
expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-security-token;x-amz-target, Signature=a518299330494908a70222cec6899f6f32f297f8595f6df1776d998936652ad9"
|
||||||
|
|
||||||
|
q := req.Header
|
||||||
|
if e, a := expectedSig, q.Get("Authorization"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildCanonicalRequest(t *testing.T) {
|
||||||
|
req, _ := buildRequest("dynamodb", "us-east-1", "{}")
|
||||||
|
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
|
||||||
|
|
||||||
|
ctx := &httpSigner{
|
||||||
|
ServiceName: "dynamodb",
|
||||||
|
Region: "us-east-1",
|
||||||
|
Request: req,
|
||||||
|
Time: v4Internal.NewSigningTime(time.Now()),
|
||||||
|
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
|
||||||
|
}
|
||||||
|
|
||||||
|
build, err := ctx.Build()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := "https://example.org/bucket/key-._~,!@#$%^&*()?Foo=a&Foo=m&Foo=o&Foo=z"
|
||||||
|
if e, a := expected, build.Request.URL.String(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSigner_SignHTTP_NoReplaceRequestBody(t *testing.T) {
|
||||||
|
req, bodyHash := buildRequest("dynamodb", "us-east-1", "{}")
|
||||||
|
req.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
|
||||||
|
|
||||||
|
s := NewSigner()
|
||||||
|
|
||||||
|
origBody := req.Body
|
||||||
|
|
||||||
|
err := s.SignHTTP(context.Background(), testCredentials, req, bodyHash, "dynamodb", "us-east-1", time.Now())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Body != origBody {
|
||||||
|
t.Errorf("expect request body to not be chagned")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRequestHost(t *testing.T) {
|
||||||
|
req, _ := buildRequest("dynamodb", "us-east-1", "{}")
|
||||||
|
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
|
||||||
|
req.Host = "myhost"
|
||||||
|
|
||||||
|
query := req.URL.Query()
|
||||||
|
query.Set("X-Amz-Expires", "5")
|
||||||
|
req.URL.RawQuery = query.Encode()
|
||||||
|
|
||||||
|
ctx := &httpSigner{
|
||||||
|
ServiceName: "dynamodb",
|
||||||
|
Region: "us-east-1",
|
||||||
|
Request: req,
|
||||||
|
Time: v4Internal.NewSigningTime(time.Now()),
|
||||||
|
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
|
||||||
|
}
|
||||||
|
|
||||||
|
build, err := ctx.Build()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(build.CanonicalString, "host:"+req.Host) {
|
||||||
|
t.Errorf("canonical host header invalid")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSign_buildCanonicalHeadersContentLengthPresent(t *testing.T) {
|
||||||
|
body := `{"description": "this is a test"}`
|
||||||
|
req, _ := buildRequest("dynamodb", "us-east-1", body)
|
||||||
|
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
|
||||||
|
req.Host = "myhost"
|
||||||
|
|
||||||
|
contentLength := fmt.Sprintf("%d", len([]byte(body)))
|
||||||
|
req.Header.Add("Content-Length", contentLength)
|
||||||
|
|
||||||
|
query := req.URL.Query()
|
||||||
|
query.Set("X-Amz-Expires", "5")
|
||||||
|
req.URL.RawQuery = query.Encode()
|
||||||
|
|
||||||
|
ctx := &httpSigner{
|
||||||
|
ServiceName: "dynamodb",
|
||||||
|
Region: "us-east-1",
|
||||||
|
Request: req,
|
||||||
|
Time: v4Internal.NewSigningTime(time.Now()),
|
||||||
|
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
|
||||||
|
}
|
||||||
|
|
||||||
|
build, err := ctx.Build()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(build.CanonicalString, "content-length:"+contentLength+"\n") {
|
||||||
|
t.Errorf("canonical header content-length invalid")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSign_buildCanonicalHeaders(t *testing.T) {
|
||||||
|
serviceName := "mockAPI"
|
||||||
|
region := "mock-region"
|
||||||
|
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request, %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("FooInnerSpace", " inner space ")
|
||||||
|
req.Header.Set("FooLeadingSpace", " leading-space")
|
||||||
|
req.Header.Add("FooMultipleSpace", "no-space")
|
||||||
|
req.Header.Add("FooMultipleSpace", "\ttab-space")
|
||||||
|
req.Header.Add("FooMultipleSpace", "trailing-space ")
|
||||||
|
req.Header.Set("FooNoSpace", "no-space")
|
||||||
|
req.Header.Set("FooTabSpace", "\ttab-space\t")
|
||||||
|
req.Header.Set("FooTrailingSpace", "trailing-space ")
|
||||||
|
req.Header.Set("FooWrappedSpace", " wrapped-space ")
|
||||||
|
|
||||||
|
ctx := &httpSigner{
|
||||||
|
ServiceName: serviceName,
|
||||||
|
Region: region,
|
||||||
|
Request: req,
|
||||||
|
Time: v4Internal.NewSigningTime(time.Date(2021, 10, 20, 12, 42, 0, 0, time.UTC)),
|
||||||
|
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
|
||||||
|
}
|
||||||
|
|
||||||
|
build, err := ctx.Build()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectCanonicalString := strings.Join([]string{
|
||||||
|
`POST`,
|
||||||
|
`/`,
|
||||||
|
``,
|
||||||
|
`fooinnerspace:inner space`,
|
||||||
|
`fooleadingspace:leading-space`,
|
||||||
|
`foomultiplespace:no-space,tab-space,trailing-space`,
|
||||||
|
`foonospace:no-space`,
|
||||||
|
`footabspace:tab-space`,
|
||||||
|
`footrailingspace:trailing-space`,
|
||||||
|
`foowrappedspace:wrapped-space`,
|
||||||
|
`host:mockAPI.mock-region.amazonaws.com`,
|
||||||
|
`x-amz-date:20211020T124200Z`,
|
||||||
|
``,
|
||||||
|
`fooinnerspace;fooleadingspace;foomultiplespace;foonospace;footabspace;footrailingspace;foowrappedspace;host;x-amz-date`,
|
||||||
|
``,
|
||||||
|
}, "\n")
|
||||||
|
if diff := cmpDiff(expectCanonicalString, build.CanonicalString); diff != "" {
|
||||||
|
t.Errorf("expect match, got\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkPresignRequest(b *testing.B) {
|
||||||
|
signer := NewSigner()
|
||||||
|
req, bodyHash := buildRequest("dynamodb", "us-east-1", "{}")
|
||||||
|
|
||||||
|
query := req.URL.Query()
|
||||||
|
query.Set("X-Amz-Expires", "5")
|
||||||
|
req.URL.RawQuery = query.Encode()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
signer.PresignHTTP(context.Background(), testCredentials, req, bodyHash, "dynamodb", "us-east-1", time.Now())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSignRequest(b *testing.B) {
|
||||||
|
signer := NewSigner()
|
||||||
|
req, bodyHash := buildRequest("dynamodb", "us-east-1", "{}")
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
signer.SignHTTP(context.Background(), testCredentials, req, bodyHash, "dynamodb", "us-east-1", time.Now())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmpDiff(e, a interface{}) string {
|
||||||
|
if !reflect.DeepEqual(e, a) {
|
||||||
|
return fmt.Sprintf("%v != %v", e, a)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
18
api/cache/accessbox.go
vendored
18
api/cache/accessbox.go
vendored
|
@ -30,6 +30,7 @@ type (
|
||||||
Box *accessbox.Box
|
Box *accessbox.Box
|
||||||
Attributes []object.Attribute
|
Attributes []object.Attribute
|
||||||
PutTime time.Time
|
PutTime time.Time
|
||||||
|
Address *oid.Address
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -57,8 +58,8 @@ func NewAccessBoxCache(config *Config) *AccessBoxCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a cached accessbox.
|
// Get returns a cached accessbox.
|
||||||
func (o *AccessBoxCache) Get(address oid.Address) *AccessBoxCacheValue {
|
func (o *AccessBoxCache) Get(accessKeyID string) *AccessBoxCacheValue {
|
||||||
entry, err := o.cache.Get(address)
|
entry, err := o.cache.Get(accessKeyID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -74,16 +75,11 @@ func (o *AccessBoxCache) Get(address oid.Address) *AccessBoxCacheValue {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put stores an accessbox to cache.
|
// Put stores an accessbox to cache.
|
||||||
func (o *AccessBoxCache) Put(address oid.Address, box *accessbox.Box, attrs []object.Attribute) error {
|
func (o *AccessBoxCache) Put(accessKeyID string, val *AccessBoxCacheValue) error {
|
||||||
val := &AccessBoxCacheValue{
|
return o.cache.Set(accessKeyID, val)
|
||||||
Box: box,
|
|
||||||
Attributes: attrs,
|
|
||||||
PutTime: time.Now(),
|
|
||||||
}
|
|
||||||
return o.cache.Set(address, val)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete removes an accessbox from cache.
|
// Delete removes an accessbox from cache.
|
||||||
func (o *AccessBoxCache) Delete(address oid.Address) {
|
func (o *AccessBoxCache) Delete(accessKeyID string) {
|
||||||
o.cache.Remove(address)
|
o.cache.Remove(accessKeyID)
|
||||||
}
|
}
|
||||||
|
|
55
api/cache/buckets.go
vendored
55
api/cache/buckets.go
vendored
|
@ -6,14 +6,16 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"github.com/bluele/gcache"
|
"github.com/bluele/gcache"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BucketCache contains cache with objects and the lifetime of cache entries.
|
// BucketCache contains cache with objects and the lifetime of cache entries.
|
||||||
type BucketCache struct {
|
type BucketCache struct {
|
||||||
cache gcache.Cache
|
cache gcache.Cache
|
||||||
logger *zap.Logger
|
cidCache gcache.Cache
|
||||||
|
logger *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -33,14 +35,45 @@ func DefaultBucketConfig(logger *zap.Logger) *Config {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBucketCache creates an object of BucketCache.
|
// NewBucketCache creates an object of BucketCache.
|
||||||
func NewBucketCache(config *Config) *BucketCache {
|
func NewBucketCache(config *Config, cidCache bool) *BucketCache {
|
||||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
cache := &BucketCache{
|
||||||
return &BucketCache{cache: gc, logger: config.Logger}
|
cache: gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build(),
|
||||||
|
logger: config.Logger,
|
||||||
|
}
|
||||||
|
|
||||||
|
if cidCache {
|
||||||
|
cache.cidCache = gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
||||||
|
}
|
||||||
|
return cache
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a cached object.
|
// Get returns a cached object.
|
||||||
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
||||||
entry, err := o.cache.Get(formKey(ns, bktName))
|
return o.get(formKey(ns, bktName))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *BucketCache) GetByCID(cnrID cid.ID) *data.BucketInfo {
|
||||||
|
if o.cidCache == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
entry, err := o.cidCache.Get(cnrID)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := entry.(string)
|
||||||
|
if !ok {
|
||||||
|
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
|
zap.String("expected", fmt.Sprintf("%T", key)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.get(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *BucketCache) get(key string) *data.BucketInfo {
|
||||||
|
entry, err := o.cache.Get(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -57,11 +90,21 @@ func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
||||||
|
|
||||||
// Put puts an object to cache.
|
// Put puts an object to cache.
|
||||||
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
|
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
|
||||||
|
if o.cidCache != nil {
|
||||||
|
if err := o.cidCache.Set(bkt.CID, formKey(bkt.Zone, bkt.Name)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
|
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete deletes an object from cache.
|
// Delete deletes an object from cache.
|
||||||
func (o *BucketCache) Delete(bkt *data.BucketInfo) bool {
|
func (o *BucketCache) Delete(bkt *data.BucketInfo) bool {
|
||||||
|
if o.cidCache != nil {
|
||||||
|
o.cidCache.Remove(bkt.CID)
|
||||||
|
}
|
||||||
|
|
||||||
return o.cache.Remove(formKey(bkt.Zone, bkt.Name))
|
return o.cache.Remove(formKey(bkt.Zone, bkt.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
44
api/cache/cache_test.go
vendored
44
api/cache/cache_test.go
vendored
|
@ -1,13 +1,14 @@
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
|
@ -22,23 +23,26 @@ func TestAccessBoxCacheType(t *testing.T) {
|
||||||
|
|
||||||
addr := oidtest.Address()
|
addr := oidtest.Address()
|
||||||
box := &accessbox.Box{}
|
box := &accessbox.Box{}
|
||||||
var attrs []object.Attribute
|
val := &AccessBoxCacheValue{
|
||||||
|
Box: box,
|
||||||
|
}
|
||||||
|
|
||||||
err := cache.Put(addr, box, attrs)
|
accessKeyID := getAccessKeyID(addr)
|
||||||
|
|
||||||
|
err := cache.Put(accessKeyID, val)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
val := cache.Get(addr)
|
resVal := cache.Get(accessKeyID)
|
||||||
require.Equal(t, box, val.Box)
|
require.Equal(t, box, resVal.Box)
|
||||||
require.Equal(t, attrs, val.Attributes)
|
|
||||||
require.Equal(t, 0, observedLog.Len())
|
require.Equal(t, 0, observedLog.Len())
|
||||||
|
|
||||||
err = cache.cache.Set(addr, "tmp")
|
err = cache.cache.Set(accessKeyID, "tmp")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assertInvalidCacheEntry(t, cache.Get(addr), observedLog)
|
assertInvalidCacheEntry(t, cache.Get(accessKeyID), observedLog)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBucketsCacheType(t *testing.T) {
|
func TestBucketsCacheType(t *testing.T) {
|
||||||
logger, observedLog := getObservedLogger()
|
logger, observedLog := getObservedLogger()
|
||||||
cache := NewBucketCache(DefaultBucketConfig(logger))
|
cache := NewBucketCache(DefaultBucketConfig(logger), false)
|
||||||
|
|
||||||
bktInfo := &data.BucketInfo{Name: "bucket"}
|
bktInfo := &data.BucketInfo{Name: "bucket"}
|
||||||
|
|
||||||
|
@ -182,24 +186,6 @@ func TestSettingsCacheType(t *testing.T) {
|
||||||
assertInvalidCacheEntry(t, cache.GetSettings(key), observedLog)
|
assertInvalidCacheEntry(t, cache.GetSettings(key), observedLog)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNotificationConfigurationCacheType(t *testing.T) {
|
|
||||||
logger, observedLog := getObservedLogger()
|
|
||||||
cache := NewSystemCache(DefaultSystemConfig(logger))
|
|
||||||
|
|
||||||
key := "key"
|
|
||||||
notificationConfig := &data.NotificationConfiguration{}
|
|
||||||
|
|
||||||
err := cache.PutNotificationConfiguration(key, notificationConfig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
val := cache.GetNotificationConfiguration(key)
|
|
||||||
require.Equal(t, notificationConfig, val)
|
|
||||||
require.Equal(t, 0, observedLog.Len())
|
|
||||||
|
|
||||||
err = cache.cache.Set(key, "tmp")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assertInvalidCacheEntry(t, cache.GetNotificationConfiguration(key), observedLog)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFrostFSIDSubjectCacheType(t *testing.T) {
|
func TestFrostFSIDSubjectCacheType(t *testing.T) {
|
||||||
logger, observedLog := getObservedLogger()
|
logger, observedLog := getObservedLogger()
|
||||||
cache := NewFrostfsIDCache(DefaultFrostfsIDConfig(logger))
|
cache := NewFrostfsIDCache(DefaultFrostfsIDConfig(logger))
|
||||||
|
@ -248,3 +234,7 @@ func getObservedLogger() (*zap.Logger, *observer.ObservedLogs) {
|
||||||
loggerCore, observedLog := observer.New(zap.WarnLevel)
|
loggerCore, observedLog := observer.New(zap.WarnLevel)
|
||||||
return zap.New(loggerCore), observedLog
|
return zap.New(loggerCore), observedLog
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getAccessKeyID(addr oid.Address) string {
|
||||||
|
return strings.ReplaceAll(addr.EncodeToString(), "/", "0")
|
||||||
|
}
|
||||||
|
|
86
api/cache/network.go
vendored
Normal file
86
api/cache/network.go
vendored
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
|
"github.com/bluele/gcache"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// NetworkCache provides cache for network-related values.
|
||||||
|
NetworkCache struct {
|
||||||
|
cache gcache.Cache
|
||||||
|
logger *zap.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetworkCacheConfig stores expiration params for cache.
|
||||||
|
NetworkCacheConfig struct {
|
||||||
|
Lifetime time.Duration
|
||||||
|
Logger *zap.Logger
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
DefaultNetworkCacheLifetime = 1 * time.Minute
|
||||||
|
networkCacheSize = 2
|
||||||
|
networkInfoKey = "network_info"
|
||||||
|
netmapKey = "netmap"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultNetworkConfig returns new default cache expiration values.
|
||||||
|
func DefaultNetworkConfig(logger *zap.Logger) *NetworkCacheConfig {
|
||||||
|
return &NetworkCacheConfig{
|
||||||
|
Lifetime: DefaultNetworkCacheLifetime,
|
||||||
|
Logger: logger,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetworkCache creates an object of NetworkCache.
|
||||||
|
func NewNetworkCache(config *NetworkCacheConfig) *NetworkCache {
|
||||||
|
gc := gcache.New(networkCacheSize).LRU().Expiration(config.Lifetime).Build()
|
||||||
|
return &NetworkCache{cache: gc, logger: config.Logger}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *NetworkCache) GetNetworkInfo() *netmap.NetworkInfo {
|
||||||
|
entry, err := c.cache.Get(networkInfoKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
result, ok := entry.(netmap.NetworkInfo)
|
||||||
|
if !ok {
|
||||||
|
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *NetworkCache) PutNetworkInfo(info netmap.NetworkInfo) error {
|
||||||
|
return c.cache.Set(networkInfoKey, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *NetworkCache) GetNetmap() *netmap.NetMap {
|
||||||
|
entry, err := c.cache.Get(netmapKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
result, ok := entry.(netmap.NetMap)
|
||||||
|
if !ok {
|
||||||
|
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *NetworkCache) PutNetmap(nm netmap.NetMap) error {
|
||||||
|
return c.cache.Set(netmapKey, nm)
|
||||||
|
}
|
16
api/cache/system.go
vendored
16
api/cache/system.go
vendored
|
@ -88,13 +88,13 @@ func (o *SystemCache) GetCORS(key string) *data.CORSConfiguration {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *SystemCache) GetSettings(key string) *data.BucketSettings {
|
func (o *SystemCache) GetLifecycleConfiguration(key string) *data.LifecycleConfiguration {
|
||||||
entry, err := o.cache.Get(key)
|
entry, err := o.cache.Get(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
result, ok := entry.(*data.BucketSettings)
|
result, ok := entry.(*data.LifecycleConfiguration)
|
||||||
if !ok {
|
if !ok {
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
|
@ -104,13 +104,13 @@ func (o *SystemCache) GetSettings(key string) *data.BucketSettings {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *SystemCache) GetNotificationConfiguration(key string) *data.NotificationConfiguration {
|
func (o *SystemCache) GetSettings(key string) *data.BucketSettings {
|
||||||
entry, err := o.cache.Get(key)
|
entry, err := o.cache.Get(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
result, ok := entry.(*data.NotificationConfiguration)
|
result, ok := entry.(*data.BucketSettings)
|
||||||
if !ok {
|
if !ok {
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
|
@ -149,12 +149,12 @@ func (o *SystemCache) PutCORS(key string, obj *data.CORSConfiguration) error {
|
||||||
return o.cache.Set(key, obj)
|
return o.cache.Set(key, obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *SystemCache) PutSettings(key string, settings *data.BucketSettings) error {
|
func (o *SystemCache) PutLifecycleConfiguration(key string, obj *data.LifecycleConfiguration) error {
|
||||||
return o.cache.Set(key, settings)
|
return o.cache.Set(key, obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *SystemCache) PutNotificationConfiguration(key string, obj *data.NotificationConfiguration) error {
|
func (o *SystemCache) PutSettings(key string, settings *data.BucketSettings) error {
|
||||||
return o.cache.Set(key, obj)
|
return o.cache.Set(key, settings)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutTagging puts tags of a bucket or an object.
|
// PutTagging puts tags of a bucket or an object.
|
||||||
|
|
|
@ -6,15 +6,16 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
bktSettingsObject = ".s3-settings"
|
bktSettingsObject = ".s3-settings"
|
||||||
bktCORSConfigurationObject = ".s3-cors"
|
bktCORSConfigurationObject = ".s3-cors"
|
||||||
bktNotificationConfigurationObject = ".s3-notifications"
|
bktLifecycleConfigurationObject = ".s3-lifecycle"
|
||||||
|
|
||||||
VersioningUnversioned = "Unversioned"
|
VersioningUnversioned = "Unversioned"
|
||||||
VersioningEnabled = "Enabled"
|
VersioningEnabled = "Enabled"
|
||||||
|
@ -32,7 +33,7 @@ type (
|
||||||
LocationConstraint string
|
LocationConstraint string
|
||||||
ObjectLockEnabled bool
|
ObjectLockEnabled bool
|
||||||
HomomorphicHashDisabled bool
|
HomomorphicHashDisabled bool
|
||||||
APEEnabled bool
|
PlacementPolicy netmap.PlacementPolicy
|
||||||
}
|
}
|
||||||
|
|
||||||
// ObjectInfo holds S3 object data.
|
// ObjectInfo holds S3 object data.
|
||||||
|
@ -52,14 +53,6 @@ type (
|
||||||
Headers map[string]string
|
Headers map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NotificationInfo store info to send s3 notification.
|
|
||||||
NotificationInfo struct {
|
|
||||||
Name string
|
|
||||||
Version string
|
|
||||||
Size uint64
|
|
||||||
HashSum string
|
|
||||||
}
|
|
||||||
|
|
||||||
// BucketSettings stores settings such as versioning.
|
// BucketSettings stores settings such as versioning.
|
||||||
BucketSettings struct {
|
BucketSettings struct {
|
||||||
Versioning string
|
Versioning string
|
||||||
|
@ -91,26 +84,27 @@ type (
|
||||||
VersionID string
|
VersionID string
|
||||||
NoErrorOnDeleteMarker bool
|
NoErrorOnDeleteMarker bool
|
||||||
}
|
}
|
||||||
)
|
|
||||||
|
|
||||||
// NotificationInfoFromObject creates new NotificationInfo from ObjectInfo.
|
// CreatedObjectInfo stores created object info.
|
||||||
func NotificationInfoFromObject(objInfo *ObjectInfo, md5Enabled bool) *NotificationInfo {
|
CreatedObjectInfo struct {
|
||||||
return &NotificationInfo{
|
ID oid.ID
|
||||||
Name: objInfo.Name,
|
Size uint64
|
||||||
Version: objInfo.VersionID(),
|
HashSum []byte
|
||||||
Size: objInfo.Size,
|
MD5Sum []byte
|
||||||
HashSum: Quote(objInfo.ETag(md5Enabled)),
|
CreationEpoch uint64
|
||||||
}
|
}
|
||||||
}
|
)
|
||||||
|
|
||||||
// SettingsObjectName is a system name for a bucket settings file.
|
// SettingsObjectName is a system name for a bucket settings file.
|
||||||
func (b *BucketInfo) SettingsObjectName() string { return bktSettingsObject }
|
func (b *BucketInfo) SettingsObjectName() string { return bktSettingsObject }
|
||||||
|
|
||||||
// CORSObjectName returns a system name for a bucket CORS configuration file.
|
// CORSObjectName returns a system name for a bucket CORS configuration file.
|
||||||
func (b *BucketInfo) CORSObjectName() string { return bktCORSConfigurationObject }
|
func (b *BucketInfo) CORSObjectName() string {
|
||||||
|
return b.CID.EncodeToString() + bktCORSConfigurationObject
|
||||||
|
}
|
||||||
|
|
||||||
func (b *BucketInfo) NotificationConfigurationObjectName() string {
|
func (b *BucketInfo) LifecycleConfigurationObjectName() string {
|
||||||
return bktNotificationConfigurationObject
|
return b.CID.EncodeToString() + bktLifecycleConfigurationObject
|
||||||
}
|
}
|
||||||
|
|
||||||
// VersionID returns object version from ObjectInfo.
|
// VersionID returns object version from ObjectInfo.
|
||||||
|
|
56
api/data/lifecycle.go
Normal file
56
api/data/lifecycle.go
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
package data
|
||||||
|
|
||||||
|
import "encoding/xml"
|
||||||
|
|
||||||
|
const (
|
||||||
|
LifecycleStatusEnabled = "Enabled"
|
||||||
|
LifecycleStatusDisabled = "Disabled"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
LifecycleConfiguration struct {
|
||||||
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LifecycleConfiguration" json:"-"`
|
||||||
|
Rules []LifecycleRule `xml:"Rule"`
|
||||||
|
}
|
||||||
|
|
||||||
|
LifecycleRule struct {
|
||||||
|
Status string `xml:"Status,omitempty"`
|
||||||
|
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty"`
|
||||||
|
Expiration *LifecycleExpiration `xml:"Expiration,omitempty"`
|
||||||
|
Filter *LifecycleRuleFilter `xml:"Filter,omitempty"`
|
||||||
|
ID string `xml:"ID,omitempty"`
|
||||||
|
NonCurrentVersionExpiration *NonCurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
|
||||||
|
Prefix string `xml:"Prefix,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
AbortIncompleteMultipartUpload struct {
|
||||||
|
DaysAfterInitiation *int `xml:"DaysAfterInitiation,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
LifecycleExpiration struct {
|
||||||
|
Date string `xml:"Date,omitempty"`
|
||||||
|
Days *int `xml:"Days,omitempty"`
|
||||||
|
Epoch *uint64 `xml:"Epoch,omitempty"`
|
||||||
|
ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
LifecycleRuleFilter struct {
|
||||||
|
And *LifecycleRuleAndOperator `xml:"And,omitempty"`
|
||||||
|
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
|
||||||
|
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
|
||||||
|
Prefix string `xml:"Prefix,omitempty"`
|
||||||
|
Tag *Tag `xml:"Tag,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
LifecycleRuleAndOperator struct {
|
||||||
|
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
|
||||||
|
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
|
||||||
|
Prefix string `xml:"Prefix,omitempty"`
|
||||||
|
Tags []Tag `xml:"Tag"`
|
||||||
|
}
|
||||||
|
|
||||||
|
NonCurrentVersionExpiration struct {
|
||||||
|
NewerNonCurrentVersions *int `xml:"NewerNoncurrentVersions,omitempty"`
|
||||||
|
NonCurrentDays *int `xml:"NoncurrentDays,omitempty"`
|
||||||
|
}
|
||||||
|
)
|
|
@ -1,42 +0,0 @@
|
||||||
package data
|
|
||||||
|
|
||||||
import "encoding/xml"
|
|
||||||
|
|
||||||
type (
|
|
||||||
NotificationConfiguration struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NotificationConfiguration" json:"-"`
|
|
||||||
QueueConfigurations []QueueConfiguration `xml:"QueueConfiguration" json:"QueueConfigurations"`
|
|
||||||
// Not supported topics
|
|
||||||
TopicConfigurations []TopicConfiguration `xml:"TopicConfiguration" json:"TopicConfigurations"`
|
|
||||||
LambdaFunctionConfigurations []LambdaFunctionConfiguration `xml:"CloudFunctionConfiguration" json:"CloudFunctionConfigurations"`
|
|
||||||
}
|
|
||||||
|
|
||||||
QueueConfiguration struct {
|
|
||||||
ID string `xml:"Id" json:"Id"`
|
|
||||||
QueueArn string `xml:"Queue" json:"Queue"`
|
|
||||||
Events []string `xml:"Event" json:"Events"`
|
|
||||||
Filter Filter `xml:"Filter" json:"Filter"`
|
|
||||||
}
|
|
||||||
|
|
||||||
Filter struct {
|
|
||||||
Key Key `xml:"S3Key" json:"S3Key"`
|
|
||||||
}
|
|
||||||
|
|
||||||
Key struct {
|
|
||||||
FilterRules []FilterRule `xml:"FilterRule" json:"FilterRules"`
|
|
||||||
}
|
|
||||||
|
|
||||||
FilterRule struct {
|
|
||||||
Name string `xml:"Name" json:"Name"`
|
|
||||||
Value string `xml:"Value" json:"Value"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TopicConfiguration and LambdaFunctionConfiguration -- we don't support these configurations,
|
|
||||||
// but we need them to detect in notification configurations in requests.
|
|
||||||
TopicConfiguration struct{}
|
|
||||||
LambdaFunctionConfiguration struct{}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (n NotificationConfiguration) IsEmpty() bool {
|
|
||||||
return len(n.QueueConfigurations) == 0 && len(n.TopicConfigurations) == 0 && len(n.LambdaFunctionConfigurations) == 0
|
|
||||||
}
|
|
|
@ -72,6 +72,7 @@ type BaseNodeVersion struct {
|
||||||
Created *time.Time
|
Created *time.Time
|
||||||
Owner *user.ID
|
Owner *user.ID
|
||||||
IsDeleteMarker bool
|
IsDeleteMarker bool
|
||||||
|
CreationEpoch uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *BaseNodeVersion) GetETag(md5Enabled bool) string {
|
func (v *BaseNodeVersion) GetETag(md5Enabled bool) string {
|
||||||
|
@ -110,6 +111,7 @@ type MultipartInfo struct {
|
||||||
Meta map[string]string
|
Meta map[string]string
|
||||||
CopiesNumbers []uint32
|
CopiesNumbers []uint32
|
||||||
Finished bool
|
Finished bool
|
||||||
|
CreationEpoch uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// PartInfo is upload information about part.
|
// PartInfo is upload information about part.
|
||||||
|
@ -124,6 +126,14 @@ type PartInfo struct {
|
||||||
Created time.Time `json:"created"`
|
Created time.Time `json:"created"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PartInfoExtended struct {
|
||||||
|
PartInfo
|
||||||
|
|
||||||
|
// Timestamp is used to find the latest version of part info in case of tree split
|
||||||
|
// when there are multiple nodes for the same part.
|
||||||
|
Timestamp uint64
|
||||||
|
}
|
||||||
|
|
||||||
// ToHeaderString form short part representation to use in S3-Completed-Parts header.
|
// ToHeaderString form short part representation to use in S3-Completed-Parts header.
|
||||||
func (p *PartInfo) ToHeaderString() string {
|
func (p *PartInfo) ToHeaderString() string {
|
||||||
// ETag value contains SHA256 checksum which is used while getting object parts attributes.
|
// ETag value contains SHA256 checksum which is used while getting object parts attributes.
|
||||||
|
|
|
@ -1,10 +1,13 @@
|
||||||
package errors
|
package errors
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
frosterrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
||||||
|
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
@ -187,6 +190,9 @@ const (
|
||||||
ErrInvalidRequestLargeCopy
|
ErrInvalidRequestLargeCopy
|
||||||
ErrInvalidStorageClass
|
ErrInvalidStorageClass
|
||||||
VersionIDMarkerWithoutKeyMarker
|
VersionIDMarkerWithoutKeyMarker
|
||||||
|
ErrInvalidRangeLength
|
||||||
|
ErrRangeOutOfBounds
|
||||||
|
ErrMissingContentRange
|
||||||
|
|
||||||
ErrMalformedJSON
|
ErrMalformedJSON
|
||||||
ErrInsecureClientRequest
|
ErrInsecureClientRequest
|
||||||
|
@ -1739,12 +1745,30 @@ var errorCodes = errorCodeMap{
|
||||||
Description: "Part number must be an integer between 1 and 10000, inclusive",
|
Description: "Part number must be an integer between 1 and 10000, inclusive",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
|
ErrInvalidRangeLength: {
|
||||||
|
ErrCode: ErrInvalidRangeLength,
|
||||||
|
Code: "InvalidRange",
|
||||||
|
Description: "Provided range length must be equal to content length",
|
||||||
|
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||||
|
},
|
||||||
|
ErrRangeOutOfBounds: {
|
||||||
|
ErrCode: ErrRangeOutOfBounds,
|
||||||
|
Code: "InvalidRange",
|
||||||
|
Description: "Provided range is outside of object bounds",
|
||||||
|
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||||
|
},
|
||||||
|
ErrMissingContentRange: {
|
||||||
|
ErrCode: ErrMissingContentRange,
|
||||||
|
Code: "MissingContentRange",
|
||||||
|
Description: "Content-Range header is mandatory for this type of request",
|
||||||
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
|
},
|
||||||
// Add your error structure here.
|
// Add your error structure here.
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsS3Error checks if the provided error is a specific s3 error.
|
// IsS3Error checks if the provided error is a specific s3 error.
|
||||||
func IsS3Error(err error, code ErrorCode) bool {
|
func IsS3Error(err error, code ErrorCode) bool {
|
||||||
err = frosterrors.UnwrapErr(err)
|
err = frosterr.UnwrapErr(err)
|
||||||
e, ok := err.(Error)
|
e, ok := err.(Error)
|
||||||
return ok && e.ErrCode == code
|
return ok && e.ErrCode == code
|
||||||
}
|
}
|
||||||
|
@ -1781,6 +1805,30 @@ func GetAPIErrorWithError(code ErrorCode, err error) Error {
|
||||||
return errorCodes.toAPIErrWithErr(code, err)
|
return errorCodes.toAPIErrWithErr(code, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TransformToS3Error converts FrostFS error to the corresponding S3 error type.
|
||||||
|
func TransformToS3Error(err error) error {
|
||||||
|
err = frosterr.UnwrapErr(err) // this wouldn't work with errors.Join
|
||||||
|
var s3err Error
|
||||||
|
if errors.As(err, &s3err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, frostfs.ErrAccessDenied) ||
|
||||||
|
errors.Is(err, tree.ErrNodeAccessDenied) {
|
||||||
|
return GetAPIError(ErrAccessDenied)
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, frostfs.ErrGatewayTimeout) {
|
||||||
|
return GetAPIError(ErrGatewayTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, frostfs.ErrGlobalDomainIsAlreadyTaken) {
|
||||||
|
return GetAPIError(ErrBucketAlreadyExists)
|
||||||
|
}
|
||||||
|
|
||||||
|
return GetAPIError(ErrInternalError)
|
||||||
|
}
|
||||||
|
|
||||||
// ObjectError -- error that is linked to a specific object.
|
// ObjectError -- error that is linked to a specific object.
|
||||||
type ObjectError struct {
|
type ObjectError struct {
|
||||||
Err error
|
Err error
|
||||||
|
|
|
@ -2,7 +2,12 @@ package errors
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func BenchmarkErrCode(b *testing.B) {
|
func BenchmarkErrCode(b *testing.B) {
|
||||||
|
@ -24,3 +29,56 @@ func BenchmarkErrorsIs(b *testing.B) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTransformS3Errors(t *testing.T) {
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
err error
|
||||||
|
expected ErrorCode
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "simple std error to internal error",
|
||||||
|
err: errors.New("some error"),
|
||||||
|
expected: ErrInternalError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "layer access denied error to s3 access denied error",
|
||||||
|
err: frostfs.ErrAccessDenied,
|
||||||
|
expected: ErrAccessDenied,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "wrapped layer access denied error to s3 access denied error",
|
||||||
|
err: fmt.Errorf("wrap: %w", frostfs.ErrAccessDenied),
|
||||||
|
expected: ErrAccessDenied,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "layer node access denied error to s3 access denied error",
|
||||||
|
err: tree.ErrNodeAccessDenied,
|
||||||
|
expected: ErrAccessDenied,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "layer gateway timeout error to s3 gateway timeout error",
|
||||||
|
err: frostfs.ErrGatewayTimeout,
|
||||||
|
expected: ErrGatewayTimeout,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "s3 error to s3 error",
|
||||||
|
err: GetAPIError(ErrInvalidPart),
|
||||||
|
expected: ErrInvalidPart,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "wrapped s3 error to s3 error",
|
||||||
|
err: fmt.Errorf("wrap: %w", GetAPIError(ErrInvalidPart)),
|
||||||
|
expected: ErrInvalidPart,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
err := TransformToS3Error(tc.err)
|
||||||
|
s3err, ok := err.(Error)
|
||||||
|
require.True(t, ok, "error must be s3 error")
|
||||||
|
require.Equalf(t, tc.expected, s3err.ErrCode,
|
||||||
|
"expected: '%s', got: '%s'",
|
||||||
|
GetAPIError(tc.expected).Code, GetAPIError(s3err.ErrCode).Code)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
1507
api/handler/acl.go
1507
api/handler/acl.go
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -11,7 +11,6 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||||
|
@ -20,17 +19,11 @@ import (
|
||||||
|
|
||||||
type (
|
type (
|
||||||
handler struct {
|
handler struct {
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
obj layer.Client
|
obj *layer.Layer
|
||||||
notificator Notificator
|
cfg Config
|
||||||
cfg Config
|
ape APE
|
||||||
ape APE
|
frostfsid FrostFSID
|
||||||
frostfsid FrostFSID
|
|
||||||
}
|
|
||||||
|
|
||||||
Notificator interface {
|
|
||||||
SendNotifications(topics map[string]string, p *SendNotificationParams) error
|
|
||||||
SendTestNotification(topic, bucketName, requestID, HostID string, now time.Time) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config contains data which handler needs to keep.
|
// Config contains data which handler needs to keep.
|
||||||
|
@ -39,14 +32,16 @@ type (
|
||||||
PlacementPolicy(namespace, constraint string) (netmap.PlacementPolicy, bool)
|
PlacementPolicy(namespace, constraint string) (netmap.PlacementPolicy, bool)
|
||||||
CopiesNumbers(namespace, constraint string) ([]uint32, bool)
|
CopiesNumbers(namespace, constraint string) ([]uint32, bool)
|
||||||
DefaultCopiesNumbers(namespace string) []uint32
|
DefaultCopiesNumbers(namespace string) []uint32
|
||||||
NewXMLDecoder(io.Reader) *xml.Decoder
|
NewXMLDecoder(reader io.Reader, agent string) *xml.Decoder
|
||||||
DefaultMaxAge() int
|
DefaultMaxAge() int
|
||||||
NotificatorEnabled() bool
|
|
||||||
ResolveZoneList() []string
|
ResolveZoneList() []string
|
||||||
IsResolveListAllow() bool
|
IsResolveListAllow() bool
|
||||||
BypassContentEncodingInChunks() bool
|
BypassContentEncodingInChunks(agent string) bool
|
||||||
MD5Enabled() bool
|
MD5Enabled() bool
|
||||||
ACLEnabled() bool
|
RetryMaxAttempts() int
|
||||||
|
RetryMaxBackoff() time.Duration
|
||||||
|
RetryStrategy() RetryStrategy
|
||||||
|
TLSTerminationHeader() string
|
||||||
}
|
}
|
||||||
|
|
||||||
FrostFSID interface {
|
FrostFSID interface {
|
||||||
|
@ -63,10 +58,17 @@ type (
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type RetryStrategy string
|
||||||
|
|
||||||
|
const (
|
||||||
|
RetryStrategyExponential = "exponential"
|
||||||
|
RetryStrategyConstant = "constant"
|
||||||
|
)
|
||||||
|
|
||||||
var _ api.Handler = (*handler)(nil)
|
var _ api.Handler = (*handler)(nil)
|
||||||
|
|
||||||
// New creates new api.Handler using given logger and client.
|
// New creates new api.Handler using given logger and client.
|
||||||
func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg Config, storage APE, ffsid FrostFSID) (api.Handler, error) {
|
func New(log *zap.Logger, obj *layer.Layer, cfg Config, storage APE, ffsid FrostFSID) (api.Handler, error) {
|
||||||
switch {
|
switch {
|
||||||
case obj == nil:
|
case obj == nil:
|
||||||
return nil, errors.New("empty FrostFS Object Layer")
|
return nil, errors.New("empty FrostFS Object Layer")
|
||||||
|
@ -78,19 +80,12 @@ func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg Config,
|
||||||
return nil, errors.New("empty frostfsid")
|
return nil, errors.New("empty frostfsid")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !cfg.NotificatorEnabled() {
|
|
||||||
log.Warn(logs.NotificatorIsDisabledS3WontProduceNotificationEvents)
|
|
||||||
} else if notificator == nil {
|
|
||||||
return nil, errors.New("empty notificator")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &handler{
|
return &handler{
|
||||||
log: log,
|
log: log,
|
||||||
obj: obj,
|
obj: obj,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
ape: storage,
|
ape: storage,
|
||||||
notificator: notificator,
|
frostfsid: ffsid,
|
||||||
frostfsid: ffsid,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -70,17 +70,18 @@ var validAttributes = map[string]struct{}{
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
params, err := parseGetObjectAttributeArgs(r)
|
params, err := parseGetObjectAttributeArgs(r, h.reqLogger(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid request", reqInfo, err)
|
h.logAndSendError(ctx, w, "invalid request", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,44 +91,44 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
|
||||||
VersionID: params.VersionID,
|
VersionID: params.VersionID,
|
||||||
}
|
}
|
||||||
|
|
||||||
extendedInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), p)
|
extendedInfo, err := h.obj.GetExtendedObjectInfo(ctx, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not fetch object info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not fetch object info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
info := extendedInfo.ObjectInfo
|
info := extendedInfo.ObjectInfo
|
||||||
|
|
||||||
encryptionParams, err := formEncryptionParams(r)
|
encryptionParams, err := h.formEncryptionParams(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
|
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
|
||||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = checkPreconditions(info, params.Conditional, h.cfg.MD5Enabled()); err != nil {
|
if err = checkPreconditions(info, params.Conditional, h.cfg.MD5Enabled()); err != nil {
|
||||||
h.logAndSendError(w, "precondition failed", reqInfo, err)
|
h.logAndSendError(ctx, w, "precondition failed", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := encodeToObjectAttributesResponse(info, params, h.cfg.MD5Enabled())
|
response, err := encodeToObjectAttributesResponse(info, params, h.cfg.MD5Enabled())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "couldn't encode object info to response", reqInfo, err)
|
h.logAndSendError(ctx, w, "couldn't encode object info to response", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
writeAttributesHeaders(w.Header(), extendedInfo, bktSettings.Unversioned())
|
writeAttributesHeaders(w.Header(), extendedInfo, bktSettings.Unversioned())
|
||||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,7 +145,7 @@ func writeAttributesHeaders(h http.Header, info *data.ExtendedObjectInfo, isBuck
|
||||||
// x-amz-request-charged
|
// x-amz-request-charged
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseGetObjectAttributeArgs(r *http.Request) (*GetObjectAttributesArgs, error) {
|
func parseGetObjectAttributeArgs(r *http.Request, log *zap.Logger) (*GetObjectAttributesArgs, error) {
|
||||||
res := &GetObjectAttributesArgs{
|
res := &GetObjectAttributesArgs{
|
||||||
VersionID: r.URL.Query().Get(api.QueryVersionID),
|
VersionID: r.URL.Query().Get(api.QueryVersionID),
|
||||||
}
|
}
|
||||||
|
@ -177,8 +178,8 @@ func parseGetObjectAttributeArgs(r *http.Request) (*GetObjectAttributesArgs, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
res.Conditional, err = parseConditionalHeaders(r.Header)
|
res.Conditional = parseConditionalHeaders(r.Header, log)
|
||||||
return res, err
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttributesArgs, md5Enabled bool) (*GetObjectAttributesResponse, error) {
|
func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttributesArgs, md5Enabled bool) (*GetObjectAttributesResponse, error) {
|
||||||
|
|
|
@ -13,7 +13,6 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -42,11 +41,10 @@ func path2BucketObject(path string) (string, string, error) {
|
||||||
|
|
||||||
func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
versionID string
|
versionID string
|
||||||
metadata map[string]string
|
metadata map[string]string
|
||||||
tagSet map[string]string
|
tagSet map[string]string
|
||||||
sessionTokenEACL *session.Container
|
|
||||||
|
|
||||||
ctx = r.Context()
|
ctx = r.Context()
|
||||||
reqInfo = middleware.GetReqInfo(ctx)
|
reqInfo = middleware.GetReqInfo(ctx)
|
||||||
|
@ -67,7 +65,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
srcBucket, srcObject, err := path2BucketObject(src)
|
srcBucket, srcObject, err := path2BucketObject(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid source copy", reqInfo, err)
|
h.logAndSendError(ctx, w, "invalid source copy", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,83 +75,74 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if srcObjPrm.BktInfo, err = h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner); err != nil {
|
if srcObjPrm.BktInfo, err = h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner); err != nil {
|
||||||
h.logAndSendError(w, "couldn't get source bucket", reqInfo, err)
|
h.logAndSendError(ctx, w, "couldn't get source bucket", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
dstBktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
dstBktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "couldn't get target bucket", reqInfo, err)
|
h.logAndSendError(ctx, w, "couldn't get target bucket", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(ctx, dstBktInfo)
|
settings, err := h.obj.GetBucketSettings(ctx, dstBktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
apeEnabled := dstBktInfo.APEEnabled || settings.CannedACL != ""
|
if cannedACLStatus == aclStatusYes {
|
||||||
if apeEnabled && cannedACLStatus == aclStatusYes {
|
h.logAndSendError(ctx, w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
||||||
h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
needUpdateEACLTable := !(apeEnabled || cannedACLStatus == aclStatusNo)
|
|
||||||
if needUpdateEACLTable {
|
|
||||||
if sessionTokenEACL, err = getSessionTokenSetEACL(ctx); err != nil {
|
|
||||||
h.logAndSendError(w, "could not get eacl session token from a box", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm)
|
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not find object", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not find object", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
srcObjInfo := extendedSrcObjInfo.ObjectInfo
|
srcObjInfo := extendedSrcObjInfo.ObjectInfo
|
||||||
|
|
||||||
srcEncryptionParams, err := formCopySourceEncryptionParams(r)
|
srcEncryptionParams, err := h.formCopySourceEncryptionParams(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
dstEncryptionParams, err := formEncryptionParams(r)
|
dstEncryptionParams, err := h.formEncryptionParams(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = srcEncryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
|
if err = srcEncryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
|
||||||
if errors.IsS3Error(err, errors.ErrInvalidEncryptionParameters) || errors.IsS3Error(err, errors.ErrSSEEncryptedObject) ||
|
if errors.IsS3Error(err, errors.ErrInvalidEncryptionParameters) || errors.IsS3Error(err, errors.ErrSSEEncryptedObject) ||
|
||||||
errors.IsS3Error(err, errors.ErrInvalidSSECustomerParameters) {
|
errors.IsS3Error(err, errors.ErrInvalidSSECustomerParameters) {
|
||||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, err, zap.Error(err))
|
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, err, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var dstSize uint64
|
var dstSize uint64
|
||||||
srcSize, err := layer.GetObjectSize(srcObjInfo)
|
srcSize, err := layer.GetObjectSize(srcObjInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "failed to get source object size", reqInfo, err)
|
h.logAndSendError(ctx, w, "failed to get source object size", reqInfo, err)
|
||||||
return
|
return
|
||||||
} else if srcSize > layer.UploadMaxSize { // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
|
} else if srcSize > layer.UploadMaxSize { // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
|
||||||
h.logAndSendError(w, "too bid object to copy with single copy operation, use multipart upload copy instead", reqInfo, errors.GetAPIError(errors.ErrInvalidRequestLargeCopy))
|
h.logAndSendError(ctx, w, "too bid object to copy with single copy operation, use multipart upload copy instead", reqInfo, errors.GetAPIError(errors.ErrInvalidRequestLargeCopy))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
dstSize = srcSize
|
dstSize = srcSize
|
||||||
|
|
||||||
args, err := parseCopyObjectArgs(r.Header)
|
args, err := parseCopyObjectArgs(r.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not parse request params", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not parse request params", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if isCopyingToItselfForbidden(reqInfo, srcBucket, srcObject, settings, args) {
|
if isCopyingToItselfForbidden(reqInfo, srcBucket, srcObject, settings, args) {
|
||||||
h.logAndSendError(w, "copying to itself without changing anything", reqInfo, errors.GetAPIError(errors.ErrInvalidCopyDest))
|
h.logAndSendError(ctx, w, "copying to itself without changing anything", reqInfo, errors.GetAPIError(errors.ErrInvalidCopyDest))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,7 +153,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
if args.TaggingDirective == replaceDirective {
|
if args.TaggingDirective == replaceDirective {
|
||||||
tagSet, err = parseTaggingHeader(r.Header)
|
tagSet, err = parseTaggingHeader(r.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not parse tagging header", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not parse tagging header", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -179,13 +168,13 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
_, tagSet, err = h.obj.GetObjectTagging(ctx, tagPrm)
|
_, tagSet, err = h.obj.GetObjectTagging(ctx, tagPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get object tagging", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get object tagging", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = checkPreconditions(srcObjInfo, args.Conditional, h.cfg.MD5Enabled()); err != nil {
|
if err = checkPreconditions(srcObjInfo, args.Conditional, h.cfg.MD5Enabled()); err != nil {
|
||||||
h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed))
|
h.logAndSendError(ctx, w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,20 +202,20 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, dstBktInfo.LocationConstraint)
|
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, dstBktInfo.LocationConstraint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
params.Lock, err = formObjectLock(ctx, dstBktInfo, settings.LockConfiguration, r.Header)
|
params.Lock, err = formObjectLock(ctx, dstBktInfo, settings.LockConfiguration, r.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not form object lock", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not form object lock", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
additional := []zap.Field{zap.String("src_bucket_name", srcBucket), zap.String("src_object_name", srcObject)}
|
additional := []zap.Field{zap.String("src_bucket_name", srcBucket), zap.String("src_object_name", srcObject)}
|
||||||
extendedDstObjInfo, err := h.obj.CopyObject(ctx, params)
|
extendedDstObjInfo, err := h.obj.CopyObject(ctx, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "couldn't copy object", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "couldn't copy object", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
dstObjInfo := extendedDstObjInfo.ObjectInfo
|
dstObjInfo := extendedDstObjInfo.ObjectInfo
|
||||||
|
@ -235,29 +224,10 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339),
|
LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339),
|
||||||
ETag: data.Quote(dstObjInfo.ETag(h.cfg.MD5Enabled())),
|
ETag: data.Quote(dstObjInfo.ETag(h.cfg.MD5Enabled())),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if needUpdateEACLTable {
|
|
||||||
newEaclTable, err := h.getNewEAclTable(r, dstBktInfo, dstObjInfo)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(w, "could not get new eacl table", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &layer.PutBucketACLParams{
|
|
||||||
BktInfo: dstBktInfo,
|
|
||||||
EACL: newEaclTable,
|
|
||||||
SessionToken: sessionTokenEACL,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = h.obj.PutBucketACL(ctx, p); err != nil {
|
|
||||||
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if tagSet != nil {
|
if tagSet != nil {
|
||||||
tagPrm := &data.PutObjectTaggingParams{
|
tagPrm := &data.PutObjectTaggingParams{
|
||||||
ObjectVersion: &data.ObjectVersion{
|
ObjectVersion: &data.ObjectVersion{
|
||||||
|
@ -268,24 +238,14 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
TagSet: tagSet,
|
TagSet: tagSet,
|
||||||
NodeVersion: extendedDstObjInfo.NodeVersion,
|
NodeVersion: extendedDstObjInfo.NodeVersion,
|
||||||
}
|
}
|
||||||
if _, err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
if err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
||||||
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not upload object tagging", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
h.reqLogger(ctx).Info(logs.ObjectIsCopied, zap.Stringer("object_id", dstObjInfo.ID))
|
h.reqLogger(ctx).Info(logs.ObjectIsCopied, zap.Stringer("object_id", dstObjInfo.ID))
|
||||||
|
|
||||||
s := &SendNotificationParams{
|
|
||||||
Event: EventObjectCreatedCopy,
|
|
||||||
NotificationInfo: data.NotificationInfoFromObject(dstObjInfo, h.cfg.MD5Enabled()),
|
|
||||||
BktInfo: dstBktInfo,
|
|
||||||
ReqInfo: reqInfo,
|
|
||||||
}
|
|
||||||
if err = h.sendNotifications(ctx, s); err != nil {
|
|
||||||
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
if dstEncryptionParams.Enabled() {
|
if dstEncryptionParams.Enabled() {
|
||||||
addSSECHeaders(w.Header(), r.Header)
|
addSSECHeaders(w.Header(), r.Header)
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,32 +20,34 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
|
cors, err := h.obj.GetBucketCORS(ctx, bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get cors", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get cors", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, cors); err != nil {
|
if err = middleware.EncodeToResponse(w, cors); err != nil {
|
||||||
h.logAndSendError(w, "could not encode cors to response", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not encode cors to response", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,36 +55,38 @@ func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Reader: r.Body,
|
Reader: r.Body,
|
||||||
NewDecoder: h.cfg.NewXMLDecoder,
|
NewDecoder: h.cfg.NewXMLDecoder,
|
||||||
|
UserAgent: r.UserAgent(),
|
||||||
}
|
}
|
||||||
|
|
||||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutBucketCORS(r.Context(), p); err != nil {
|
if err = h.obj.PutBucketCORS(ctx, p); err != nil {
|
||||||
h.logAndSendError(w, "could not put cors configuration", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not put cors configuration", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
||||||
h.logAndSendError(w, "write response", reqInfo, err)
|
h.logAndSendError(ctx, w, "write response", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.DeleteBucketCORS(r.Context(), bktInfo); err != nil {
|
if err = h.obj.DeleteBucketCORS(ctx, bktInfo); err != nil {
|
||||||
h.logAndSendError(w, "could not delete cors", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not delete cors", reqInfo, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
@ -102,7 +106,7 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
|
||||||
if reqInfo.BucketName == "" {
|
if reqInfo.BucketName == "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
bktInfo, err := h.obj.GetBucketInfo(ctx, reqInfo.BucketName)
|
bktInfo, err := h.getBucketInfo(ctx, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.reqLogger(ctx).Warn(logs.GetBucketInfo, zap.Error(err))
|
h.reqLogger(ctx).Warn(logs.GetBucketInfo, zap.Error(err))
|
||||||
return
|
return
|
||||||
|
@ -149,21 +153,22 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
bktInfo, err := h.getBucketInfo(ctx, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
origin := r.Header.Get(api.Origin)
|
origin := r.Header.Get(api.Origin)
|
||||||
if origin == "" {
|
if origin == "" {
|
||||||
h.logAndSendError(w, "origin request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
h.logAndSendError(ctx, w, "origin request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
||||||
}
|
}
|
||||||
|
|
||||||
method := r.Header.Get(api.AccessControlRequestMethod)
|
method := r.Header.Get(api.AccessControlRequestMethod)
|
||||||
if method == "" {
|
if method == "" {
|
||||||
h.logAndSendError(w, "Access-Control-Request-Method request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
h.logAndSendError(ctx, w, "Access-Control-Request-Method request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,9 +178,9 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
||||||
headers = strings.Split(requestHeaders, ", ")
|
headers = strings.Split(requestHeaders, ", ")
|
||||||
}
|
}
|
||||||
|
|
||||||
cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
|
cors, err := h.obj.GetBucketCORS(ctx, bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get cors", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get cors", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,8 +192,8 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
||||||
if !checkSubslice(rule.AllowedHeaders, headers) {
|
if !checkSubslice(rule.AllowedHeaders, headers) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
w.Header().Set(api.AccessControlAllowOrigin, o)
|
w.Header().Set(api.AccessControlAllowOrigin, origin)
|
||||||
w.Header().Set(api.AccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
|
w.Header().Set(api.AccessControlAllowMethods, method)
|
||||||
if headers != nil {
|
if headers != nil {
|
||||||
w.Header().Set(api.AccessControlAllowHeaders, requestHeaders)
|
w.Header().Set(api.AccessControlAllowHeaders, requestHeaders)
|
||||||
}
|
}
|
||||||
|
@ -204,7 +209,7 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
||||||
w.Header().Set(api.AccessControlAllowCredentials, "true")
|
w.Header().Set(api.AccessControlAllowCredentials, "true")
|
||||||
}
|
}
|
||||||
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
||||||
h.logAndSendError(w, "write response", reqInfo, err)
|
h.logAndSendError(ctx, w, "write response", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
@ -213,7 +218,7 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
h.logAndSendError(w, "Forbidden", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
h.logAndSendError(ctx, w, "Forbidden", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkSubslice(slice []string, subSlice []string) bool {
|
func checkSubslice(slice []string, subSlice []string) bool {
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCORSOriginWildcard(t *testing.T) {
|
func TestCORSOriginWildcard(t *testing.T) {
|
||||||
|
@ -39,3 +40,181 @@ func TestCORSOriginWildcard(t *testing.T) {
|
||||||
hc.Handler().GetBucketCorsHandler(w, r)
|
hc.Handler().GetBucketCorsHandler(w, r)
|
||||||
assertStatus(t, w, http.StatusOK)
|
assertStatus(t, w, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPreflight(t *testing.T) {
|
||||||
|
body := `
|
||||||
|
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||||
|
<CORSRule>
|
||||||
|
<AllowedMethod>GET</AllowedMethod>
|
||||||
|
<AllowedOrigin>http://www.example.com</AllowedOrigin>
|
||||||
|
<AllowedHeader>Authorization</AllowedHeader>
|
||||||
|
<ExposeHeader>x-amz-*</ExposeHeader>
|
||||||
|
<ExposeHeader>X-Amz-*</ExposeHeader>
|
||||||
|
<MaxAgeSeconds>600</MaxAgeSeconds>
|
||||||
|
</CORSRule>
|
||||||
|
</CORSConfiguration>
|
||||||
|
`
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-preflight-test"
|
||||||
|
box, _ := createAccessBox(t)
|
||||||
|
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||||
|
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
||||||
|
r = r.WithContext(ctx)
|
||||||
|
hc.Handler().CreateBucketHandler(w, r)
|
||||||
|
assertStatus(t, w, http.StatusOK)
|
||||||
|
|
||||||
|
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(body))
|
||||||
|
ctx = middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
||||||
|
r = r.WithContext(ctx)
|
||||||
|
hc.Handler().PutBucketCorsHandler(w, r)
|
||||||
|
assertStatus(t, w, http.StatusOK)
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
origin string
|
||||||
|
method string
|
||||||
|
headers string
|
||||||
|
expectedStatus int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Valid",
|
||||||
|
origin: "http://www.example.com",
|
||||||
|
method: "GET",
|
||||||
|
headers: "Authorization",
|
||||||
|
expectedStatus: http.StatusOK,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty origin",
|
||||||
|
method: "GET",
|
||||||
|
headers: "Authorization",
|
||||||
|
expectedStatus: http.StatusBadRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty request method",
|
||||||
|
origin: "http://www.example.com",
|
||||||
|
headers: "Authorization",
|
||||||
|
expectedStatus: http.StatusBadRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Not allowed method",
|
||||||
|
origin: "http://www.example.com",
|
||||||
|
method: "PUT",
|
||||||
|
headers: "Authorization",
|
||||||
|
expectedStatus: http.StatusForbidden,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Not allowed headers",
|
||||||
|
origin: "http://www.example.com",
|
||||||
|
method: "GET",
|
||||||
|
headers: "Authorization, Last-Modified",
|
||||||
|
expectedStatus: http.StatusForbidden,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
w, r = prepareTestPayloadRequest(hc, bktName, "", nil)
|
||||||
|
r.Header.Set(api.Origin, tc.origin)
|
||||||
|
r.Header.Set(api.AccessControlRequestMethod, tc.method)
|
||||||
|
r.Header.Set(api.AccessControlRequestHeaders, tc.headers)
|
||||||
|
hc.Handler().Preflight(w, r)
|
||||||
|
assertStatus(t, w, tc.expectedStatus)
|
||||||
|
|
||||||
|
if tc.expectedStatus == http.StatusOK {
|
||||||
|
require.Equal(t, tc.origin, w.Header().Get(api.AccessControlAllowOrigin))
|
||||||
|
require.Equal(t, tc.method, w.Header().Get(api.AccessControlAllowMethods))
|
||||||
|
require.Equal(t, tc.headers, w.Header().Get(api.AccessControlAllowHeaders))
|
||||||
|
require.Equal(t, "x-amz-*, X-Amz-*", w.Header().Get(api.AccessControlExposeHeaders))
|
||||||
|
require.Equal(t, "true", w.Header().Get(api.AccessControlAllowCredentials))
|
||||||
|
require.Equal(t, "600", w.Header().Get(api.AccessControlMaxAge))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPreflightWildcardOrigin(t *testing.T) {
|
||||||
|
body := `
|
||||||
|
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||||
|
<CORSRule>
|
||||||
|
<AllowedMethod>GET</AllowedMethod>
|
||||||
|
<AllowedMethod>PUT</AllowedMethod>
|
||||||
|
<AllowedOrigin>*</AllowedOrigin>
|
||||||
|
<AllowedHeader>*</AllowedHeader>
|
||||||
|
</CORSRule>
|
||||||
|
</CORSConfiguration>
|
||||||
|
`
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-preflight-wildcard-test"
|
||||||
|
box, _ := createAccessBox(t)
|
||||||
|
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||||
|
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
||||||
|
r = r.WithContext(ctx)
|
||||||
|
hc.Handler().CreateBucketHandler(w, r)
|
||||||
|
assertStatus(t, w, http.StatusOK)
|
||||||
|
|
||||||
|
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(body))
|
||||||
|
ctx = middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
||||||
|
r = r.WithContext(ctx)
|
||||||
|
hc.Handler().PutBucketCorsHandler(w, r)
|
||||||
|
assertStatus(t, w, http.StatusOK)
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
origin string
|
||||||
|
method string
|
||||||
|
headers string
|
||||||
|
expectedStatus int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Valid get",
|
||||||
|
origin: "http://www.example.com",
|
||||||
|
method: "GET",
|
||||||
|
headers: "Authorization, Last-Modified",
|
||||||
|
expectedStatus: http.StatusOK,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Valid put",
|
||||||
|
origin: "http://example.com",
|
||||||
|
method: "PUT",
|
||||||
|
headers: "Authorization, Content-Type",
|
||||||
|
expectedStatus: http.StatusOK,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty origin",
|
||||||
|
method: "GET",
|
||||||
|
headers: "Authorization, Last-Modified",
|
||||||
|
expectedStatus: http.StatusBadRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty request method",
|
||||||
|
origin: "http://www.example.com",
|
||||||
|
headers: "Authorization, Last-Modified",
|
||||||
|
expectedStatus: http.StatusBadRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Not allowed method",
|
||||||
|
origin: "http://www.example.com",
|
||||||
|
method: "DELETE",
|
||||||
|
headers: "Authorization, Last-Modified",
|
||||||
|
expectedStatus: http.StatusForbidden,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
w, r = prepareTestPayloadRequest(hc, bktName, "", nil)
|
||||||
|
r.Header.Set(api.Origin, tc.origin)
|
||||||
|
r.Header.Set(api.AccessControlRequestMethod, tc.method)
|
||||||
|
r.Header.Set(api.AccessControlRequestHeaders, tc.headers)
|
||||||
|
hc.Handler().Preflight(w, r)
|
||||||
|
assertStatus(t, w, tc.expectedStatus)
|
||||||
|
|
||||||
|
if tc.expectedStatus == http.StatusOK {
|
||||||
|
require.Equal(t, tc.origin, w.Header().Get(api.AccessControlAllowOrigin))
|
||||||
|
require.Equal(t, tc.method, w.Header().Get(api.AccessControlAllowMethods))
|
||||||
|
require.Equal(t, tc.headers, w.Header().Get(api.AccessControlAllowHeaders))
|
||||||
|
require.Empty(t, w.Header().Get(api.AccessControlExposeHeaders))
|
||||||
|
require.Empty(t, w.Header().Get(api.AccessControlAllowCredentials))
|
||||||
|
require.Equal(t, "0", w.Header().Get(api.AccessControlMaxAge))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -8,16 +8,12 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// limitation of AWS https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
|
// limitation of AWS https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
|
||||||
|
@ -75,67 +71,39 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not get network info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.DeleteObjectParams{
|
p := &layer.DeleteObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Objects: versionedObject,
|
Objects: versionedObject,
|
||||||
Settings: bktSettings,
|
Settings: bktSettings,
|
||||||
|
NetworkInfo: networkInfo,
|
||||||
}
|
}
|
||||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
||||||
deletedObject := deletedObjects[0]
|
deletedObject := deletedObjects[0]
|
||||||
if deletedObject.Error != nil {
|
if deletedObject.Error != nil {
|
||||||
if isErrObjectLocked(deletedObject.Error) {
|
if isErrObjectLocked(deletedObject.Error) {
|
||||||
h.logAndSendError(w, "object is locked", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
h.logAndSendError(ctx, w, "object is locked", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
||||||
} else {
|
} else {
|
||||||
h.logAndSendError(w, "could not delete object", reqInfo, deletedObject.Error)
|
h.logAndSendError(ctx, w, "could not delete object", reqInfo, deletedObject.Error)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var m *SendNotificationParams
|
|
||||||
|
|
||||||
if bktSettings.VersioningEnabled() && len(versionID) == 0 {
|
|
||||||
m = &SendNotificationParams{
|
|
||||||
Event: EventObjectRemovedDeleteMarkerCreated,
|
|
||||||
NotificationInfo: &data.NotificationInfo{
|
|
||||||
Name: reqInfo.ObjectName,
|
|
||||||
HashSum: deletedObject.DeleteMarkerEtag,
|
|
||||||
},
|
|
||||||
BktInfo: bktInfo,
|
|
||||||
ReqInfo: reqInfo,
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var objID oid.ID
|
|
||||||
if len(versionID) != 0 {
|
|
||||||
if err = objID.DecodeString(versionID); err != nil {
|
|
||||||
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
m = &SendNotificationParams{
|
|
||||||
Event: EventObjectRemovedDelete,
|
|
||||||
NotificationInfo: &data.NotificationInfo{
|
|
||||||
Name: reqInfo.ObjectName,
|
|
||||||
Version: objID.EncodeToString(),
|
|
||||||
},
|
|
||||||
BktInfo: bktInfo,
|
|
||||||
ReqInfo: reqInfo,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = h.sendNotifications(ctx, m); err != nil {
|
|
||||||
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
if deletedObject.VersionID != "" {
|
if deletedObject.VersionID != "" {
|
||||||
w.Header().Set(api.AmzVersionID, deletedObject.VersionID)
|
w.Header().Set(api.AmzVersionID, deletedObject.VersionID)
|
||||||
}
|
}
|
||||||
|
@ -166,38 +134,41 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
||||||
// Content-Md5 is required and should be set
|
// Content-Md5 is required and should be set
|
||||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||||
if _, ok := r.Header[api.ContentMD5]; !ok {
|
if _, ok := r.Header[api.ContentMD5]; !ok {
|
||||||
h.logAndSendError(w, "missing Content-MD5", reqInfo, errors.GetAPIError(errors.ErrMissingContentMD5))
|
h.logAndSendError(ctx, w, "missing Content-MD5", reqInfo, errors.GetAPIError(errors.ErrMissingContentMD5))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Content-Length is required and should be non-zero
|
// Content-Length is required and should be non-zero
|
||||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||||
if r.ContentLength <= 0 {
|
if r.ContentLength <= 0 {
|
||||||
h.logAndSendError(w, "missing Content-Length", reqInfo, errors.GetAPIError(errors.ErrMissingContentLength))
|
h.logAndSendError(ctx, w, "missing Content-Length", reqInfo, errors.GetAPIError(errors.ErrMissingContentLength))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal list of keys to be deleted.
|
// Unmarshal list of keys to be deleted.
|
||||||
requested := &DeleteObjectsRequest{}
|
requested := &DeleteObjectsRequest{}
|
||||||
if err := h.cfg.NewXMLDecoder(r.Body).Decode(requested); err != nil {
|
if err := h.cfg.NewXMLDecoder(r.Body, r.UserAgent()).Decode(requested); err != nil {
|
||||||
h.logAndSendError(w, "couldn't decode body", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error()))
|
h.logAndSendError(ctx, w, "couldn't decode body", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(requested.Objects) == 0 || len(requested.Objects) > maxObjectsToDelete {
|
if len(requested.Objects) == 0 || len(requested.Objects) > maxObjectsToDelete {
|
||||||
h.logAndSendError(w, "number of objects to delete must be greater than 0 and less or equal to 1000", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
|
h.logAndSendError(ctx, w, "number of objects to delete must be greater than 0 and less or equal to 1000", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
removed := make(map[string]*layer.VersionedObject)
|
unique := make(map[string]struct{})
|
||||||
toRemove := make([]*layer.VersionedObject, 0, len(requested.Objects))
|
toRemove := make([]*layer.VersionedObject, 0, len(requested.Objects))
|
||||||
for _, obj := range requested.Objects {
|
for _, obj := range requested.Objects {
|
||||||
versionedObj := &layer.VersionedObject{
|
versionedObj := &layer.VersionedObject{
|
||||||
Name: obj.ObjectName,
|
Name: obj.ObjectName,
|
||||||
VersionID: obj.VersionID,
|
VersionID: obj.VersionID,
|
||||||
}
|
}
|
||||||
toRemove = append(toRemove, versionedObj)
|
key := versionedObj.String()
|
||||||
removed[versionedObj.String()] = versionedObj
|
if _, ok := unique[key]; !ok {
|
||||||
|
toRemove = append(toRemove, versionedObj)
|
||||||
|
unique[key] = struct{}{}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
response := &DeleteObjectsResponse{
|
response := &DeleteObjectsResponse{
|
||||||
|
@ -207,21 +178,28 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not get network info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.DeleteObjectParams{
|
p := &layer.DeleteObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Objects: toRemove,
|
Objects: toRemove,
|
||||||
Settings: bktSettings,
|
Settings: bktSettings,
|
||||||
IsMultiple: true,
|
NetworkInfo: networkInfo,
|
||||||
|
IsMultiple: true,
|
||||||
}
|
}
|
||||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
||||||
|
|
||||||
|
@ -253,31 +231,46 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||||
h.logAndSendError(w, "could not write response", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not write response", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = checkOwner(bktInfo, reqInfo.User); err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "request owner id does not match bucket owner id", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var sessionToken *session.Container
|
var sessionToken *session.Container
|
||||||
|
|
||||||
boxData, err := middleware.GetBoxData(r.Context())
|
boxData, err := middleware.GetBoxData(ctx)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
sessionToken = boxData.Gate.SessionTokenForDelete()
|
sessionToken = boxData.Gate.SessionTokenForDelete()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.DeleteBucket(r.Context(), &layer.DeleteBucketParams{
|
skipObjCheck := false
|
||||||
|
if value, ok := r.Header[api.AmzForceBucketDelete]; ok {
|
||||||
|
s := value[0]
|
||||||
|
if s == "true" {
|
||||||
|
skipObjCheck = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = h.obj.DeleteBucket(ctx, &layer.DeleteBucketParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
SessionToken: sessionToken,
|
SessionToken: sessionToken,
|
||||||
|
SkipCheck: skipObjCheck,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
h.logAndSendError(w, "couldn't delete bucket", reqInfo, err)
|
h.logAndSendError(ctx, w, "couldn't delete bucket", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -288,7 +281,7 @@ func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
getBucketCannedChainID(chain.Ingress, bktInfo.CID),
|
getBucketCannedChainID(chain.Ingress, bktInfo.CID),
|
||||||
}
|
}
|
||||||
if err = h.ape.DeleteBucketPolicy(reqInfo.Namespace, bktInfo.CID, chainIDs); err != nil {
|
if err = h.ape.DeleteBucketPolicy(reqInfo.Namespace, bktInfo.CID, chainIDs); err != nil {
|
||||||
h.logAndSendError(w, "failed to delete policy from storage", reqInfo, err)
|
h.logAndSendError(ctx, w, "failed to delete policy from storage", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ package handler
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -10,13 +11,10 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -37,6 +35,7 @@ func TestDeleteBucketOnAlreadyRemovedError(t *testing.T) {
|
||||||
|
|
||||||
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
||||||
|
|
||||||
|
hc.owner = bktInfo.Owner
|
||||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,11 +52,12 @@ func TestDeleteBucket(t *testing.T) {
|
||||||
tc := prepareHandlerContext(t)
|
tc := prepareHandlerContext(t)
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||||
_, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
|
bktInfo, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
|
||||||
|
|
||||||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||||
require.True(t, isDeleteMarker)
|
require.True(t, isDeleteMarker)
|
||||||
|
|
||||||
|
tc.owner = bktInfo.Owner
|
||||||
deleteBucket(t, tc, bktName, http.StatusConflict)
|
deleteBucket(t, tc, bktName, http.StatusConflict)
|
||||||
deleteObject(t, tc, bktName, objName, objInfo.VersionID())
|
deleteObject(t, tc, bktName, objName, objInfo.VersionID())
|
||||||
deleteBucket(t, tc, bktName, http.StatusConflict)
|
deleteBucket(t, tc, bktName, http.StatusConflict)
|
||||||
|
@ -82,9 +82,42 @@ func TestDeleteBucketOnNotFoundError(t *testing.T) {
|
||||||
|
|
||||||
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
||||||
|
|
||||||
|
hc.owner = bktInfo.Owner
|
||||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestForceDeleteBucket(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
|
||||||
|
putObject(hc, bktName, objName)
|
||||||
|
|
||||||
|
nodeVersion, err := hc.tree.GetUnversioned(hc.context, bktInfo, objName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
var addr oid.Address
|
||||||
|
addr.SetContainer(bktInfo.CID)
|
||||||
|
addr.SetObject(nodeVersion.OID)
|
||||||
|
|
||||||
|
hc.owner = bktInfo.Owner
|
||||||
|
deleteBucketForce(t, hc, bktName, http.StatusConflict, "false")
|
||||||
|
deleteBucketForce(t, hc, bktName, http.StatusNoContent, "true")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteMultipleObjectCheckUniqueness(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName, objName := "bucket", "object"
|
||||||
|
createTestBucket(hc, bktName)
|
||||||
|
|
||||||
|
putObject(hc, bktName, objName)
|
||||||
|
|
||||||
|
resp := deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}, {objName, emptyVersion}})
|
||||||
|
require.Empty(t, resp.Errors)
|
||||||
|
require.Len(t, resp.DeletedObjects, 1)
|
||||||
|
}
|
||||||
|
|
||||||
func TestDeleteObjectsError(t *testing.T) {
|
func TestDeleteObjectsError(t *testing.T) {
|
||||||
hc := prepareHandlerContext(t)
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
@ -100,20 +133,21 @@ func TestDeleteObjectsError(t *testing.T) {
|
||||||
addr.SetContainer(bktInfo.CID)
|
addr.SetContainer(bktInfo.CID)
|
||||||
addr.SetObject(nodeVersion.OID)
|
addr.SetObject(nodeVersion.OID)
|
||||||
|
|
||||||
expectedError := apiErrors.GetAPIError(apiErrors.ErrAccessDenied)
|
expectedError := apierr.GetAPIError(apierr.ErrAccessDenied)
|
||||||
hc.tp.SetObjectError(addr, expectedError)
|
hc.tp.SetObjectError(addr, expectedError)
|
||||||
|
|
||||||
w := deleteObjectsBase(hc, bktName, [][2]string{{objName, nodeVersion.OID.EncodeToString()}})
|
w := deleteObjectsBase(hc, bktName, [][2]string{{objName, nodeVersion.OID.EncodeToString()}})
|
||||||
|
var buf bytes.Buffer
|
||||||
res := &s3.DeleteObjectsOutput{}
|
res := &DeleteObjectsResponse{}
|
||||||
err = xmlutil.UnmarshalXML(res, xml.NewDecoder(w.Result().Body), "")
|
err = xml.NewDecoder(io.TeeReader(w.Result().Body, &buf)).Decode(res)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.ElementsMatch(t, []*s3.Error{{
|
require.Contains(t, buf.String(), "VersionId")
|
||||||
Code: aws.String(expectedError.Code),
|
require.ElementsMatch(t, []DeleteError{{
|
||||||
Key: aws.String(objName),
|
Code: expectedError.Code,
|
||||||
Message: aws.String(expectedError.Error()),
|
Key: objName,
|
||||||
VersionId: aws.String(nodeVersion.OID.EncodeToString()),
|
Message: expectedError.Error(),
|
||||||
|
VersionID: nodeVersion.OID.EncodeToString(),
|
||||||
}}, res.Errors)
|
}}, res.Errors)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -426,6 +460,17 @@ func TestDeleteObjectCheckMarkerReturn(t *testing.T) {
|
||||||
require.Equal(t, deleteMarkerVersion, deleteMarkerVersion2)
|
require.Equal(t, deleteMarkerVersion, deleteMarkerVersion2)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDeleteBucketByNotOwner(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-name"
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
deleteBucket(t, hc, bktName, http.StatusForbidden)
|
||||||
|
|
||||||
|
hc.owner = bktInfo.Owner
|
||||||
|
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||||
|
}
|
||||||
|
|
||||||
func createBucketAndObject(tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
|
func createBucketAndObject(tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
|
||||||
bktInfo := createTestBucket(tc, bktName)
|
bktInfo := createTestBucket(tc, bktName)
|
||||||
|
|
||||||
|
@ -458,6 +503,16 @@ func putBucketVersioning(t *testing.T, tc *handlerContext, bktName string, enabl
|
||||||
assertStatus(t, w, http.StatusOK)
|
assertStatus(t, w, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getBucketVersioning(hc *handlerContext, bktName string) *VersioningConfiguration {
|
||||||
|
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||||
|
hc.Handler().GetBucketVersioningHandler(w, r)
|
||||||
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
|
|
||||||
|
res := &VersioningConfiguration{}
|
||||||
|
parseTestResponse(hc.t, w, res)
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
func deleteObject(t *testing.T, tc *handlerContext, bktName, objName, version string) (string, bool) {
|
func deleteObject(t *testing.T, tc *handlerContext, bktName, objName, version string) (string, bool) {
|
||||||
query := make(url.Values)
|
query := make(url.Values)
|
||||||
query.Add(api.QueryVersionID, version)
|
query.Add(api.QueryVersionID, version)
|
||||||
|
@ -494,6 +549,13 @@ func deleteObjectsBase(hc *handlerContext, bktName string, objVersions [][2]stri
|
||||||
return w
|
return w
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func deleteBucketForce(t *testing.T, tc *handlerContext, bktName string, code int, value string) {
|
||||||
|
w, r := prepareTestRequest(tc, bktName, "", nil)
|
||||||
|
r.Header.Set(api.AmzForceBucketDelete, value)
|
||||||
|
tc.Handler().DeleteBucketHandler(w, r)
|
||||||
|
assertStatus(t, w, code)
|
||||||
|
}
|
||||||
|
|
||||||
func deleteBucket(t *testing.T, tc *handlerContext, bktName string, code int) {
|
func deleteBucket(t *testing.T, tc *handlerContext, bktName string, code int) {
|
||||||
w, r := prepareTestRequest(tc, bktName, "", nil)
|
w, r := prepareTestRequest(tc, bktName, "", nil)
|
||||||
tc.Handler().DeleteBucketHandler(w, r)
|
tc.Handler().DeleteBucketHandler(w, r)
|
||||||
|
@ -505,9 +567,9 @@ func checkNotFound(t *testing.T, hc *handlerContext, bktName, objName, version s
|
||||||
assertStatus(t, w, http.StatusNotFound)
|
assertStatus(t, w, http.StatusNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
func headObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apiErrors.ErrorCode) {
|
func headObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apierr.ErrorCode) {
|
||||||
w := headObjectBase(hc, bktName, objName, version)
|
w := headObjectBase(hc, bktName, objName, version)
|
||||||
assertS3Error(hc.t, w, apiErrors.GetAPIError(code))
|
assertS3Error(hc.t, w, apierr.GetAPIError(code))
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkFound(t *testing.T, hc *handlerContext, bktName, objName, version string) {
|
func checkFound(t *testing.T, hc *handlerContext, bktName, objName, version string) {
|
||||||
|
@ -515,6 +577,18 @@ func checkFound(t *testing.T, hc *handlerContext, bktName, objName, version stri
|
||||||
assertStatus(t, w, http.StatusOK)
|
assertStatus(t, w, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func headObjectWithHeaders(hc *handlerContext, bktName, objName, version string, headers map[string]string) *httptest.ResponseRecorder {
|
||||||
|
query := make(url.Values)
|
||||||
|
query.Add(api.QueryVersionID, version)
|
||||||
|
|
||||||
|
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
|
||||||
|
for k, v := range headers {
|
||||||
|
r.Header.Set(k, v)
|
||||||
|
}
|
||||||
|
hc.Handler().HeadObjectHandler(w, r)
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
func headObjectBase(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
|
func headObjectBase(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
|
||||||
query := make(url.Values)
|
query := make(url.Values)
|
||||||
query.Add(api.QueryVersionID, version)
|
query.Add(api.QueryVersionID, version)
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -37,7 +38,7 @@ func TestSimpleGetEncrypted(t *testing.T) {
|
||||||
|
|
||||||
objInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: objName})
|
objInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: objName})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
obj, err := tc.MockedPool().ReadObject(tc.Context(), layer.PrmObjectRead{Container: bktInfo.CID, Object: objInfo.ID})
|
obj, err := tc.MockedPool().GetObject(tc.Context(), frostfs.PrmObjectGet{Container: bktInfo.CID, Object: objInfo.ID})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
encryptedContent, err := io.ReadAll(obj.Payload)
|
encryptedContent, err := io.ReadAll(obj.Payload)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -45,6 +46,29 @@ func TestSimpleGetEncrypted(t *testing.T) {
|
||||||
|
|
||||||
response, _ := getEncryptedObject(tc, bktName, objName)
|
response, _ := getEncryptedObject(tc, bktName, objName)
|
||||||
require.Equal(t, content, string(response))
|
require.Equal(t, content, string(response))
|
||||||
|
|
||||||
|
result := listVersions(t, tc, bktName)
|
||||||
|
require.Len(t, result.Version, 1)
|
||||||
|
require.Equal(t, uint64(len(content)), result.Version[0].Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMD5HeaderBadOrEmpty(t *testing.T) {
|
||||||
|
tc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName, objName := "bucket-for-sse-c", "object-to-encrypt"
|
||||||
|
createTestBucket(tc, bktName)
|
||||||
|
content := "content"
|
||||||
|
|
||||||
|
headers := map[string]string{
|
||||||
|
api.ContentMD5: "",
|
||||||
|
}
|
||||||
|
putEncryptedObjectWithHeadersErr(t, tc, bktName, objName, content, headers, errors.ErrInvalidDigest)
|
||||||
|
|
||||||
|
headers = map[string]string{
|
||||||
|
api.ContentMD5: "YWJjMTIzIT8kKiYoKSctPUB+",
|
||||||
|
}
|
||||||
|
|
||||||
|
putEncryptedObjectWithHeadersErr(t, tc, bktName, objName, content, headers, errors.ErrBadDigest)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetEncryptedRange(t *testing.T) {
|
func TestGetEncryptedRange(t *testing.T) {
|
||||||
|
@ -288,6 +312,21 @@ func completeMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID
|
||||||
return w
|
return w
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func abortMultipartUpload(hc *handlerContext, bktName, objName, uploadID string) {
|
||||||
|
w := abortMultipartUploadBase(hc, bktName, objName, uploadID)
|
||||||
|
assertStatus(hc.t, w, http.StatusNoContent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func abortMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID string) *httptest.ResponseRecorder {
|
||||||
|
query := make(url.Values)
|
||||||
|
query.Set(uploadIDQuery, uploadID)
|
||||||
|
|
||||||
|
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
|
||||||
|
hc.Handler().AbortMultipartUploadHandler(w, r)
|
||||||
|
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
func uploadPartEncrypted(hc *handlerContext, bktName, objName, uploadID string, num, size int) (string, []byte) {
|
func uploadPartEncrypted(hc *handlerContext, bktName, objName, uploadID string, num, size int) (string, []byte) {
|
||||||
return uploadPartBase(hc, bktName, objName, true, uploadID, num, size)
|
return uploadPartBase(hc, bktName, objName, true, uploadID, num, size)
|
||||||
}
|
}
|
||||||
|
@ -334,6 +373,10 @@ func TestMultipartEncrypted(t *testing.T) {
|
||||||
|
|
||||||
part2Range := getEncryptedObjectRange(t, hc, bktName, objName, len(part1), len(part1)+len(part2)-1)
|
part2Range := getEncryptedObjectRange(t, hc, bktName, objName, len(part1), len(part1)+len(part2)-1)
|
||||||
require.Equal(t, part2[0:], part2Range)
|
require.Equal(t, part2[0:], part2Range)
|
||||||
|
|
||||||
|
result := listVersions(t, hc, bktName)
|
||||||
|
require.Len(t, result.Version, 1)
|
||||||
|
require.EqualValues(t, uint64(partSize+5), result.Version[0].Size)
|
||||||
}
|
}
|
||||||
|
|
||||||
func putEncryptedObject(t *testing.T, tc *handlerContext, bktName, objName, content string) {
|
func putEncryptedObject(t *testing.T, tc *handlerContext, bktName, objName, content string) {
|
||||||
|
@ -344,6 +387,15 @@ func putEncryptedObject(t *testing.T, tc *handlerContext, bktName, objName, cont
|
||||||
assertStatus(t, w, http.StatusOK)
|
assertStatus(t, w, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func putEncryptedObjectWithHeadersErr(t *testing.T, tc *handlerContext, bktName, objName, content string, headers map[string]string, code errors.ErrorCode) {
|
||||||
|
body := bytes.NewReader([]byte(content))
|
||||||
|
w, r := prepareTestPayloadRequest(tc, bktName, objName, body)
|
||||||
|
setHeaders(r, headers)
|
||||||
|
|
||||||
|
tc.Handler().PutObjectHandler(w, r)
|
||||||
|
assertS3Error(t, w, errors.GetAPIError(code))
|
||||||
|
}
|
||||||
|
|
||||||
func getEncryptedObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header) {
|
func getEncryptedObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header) {
|
||||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||||
setEncryptHeaders(r)
|
setEncryptHeaders(r)
|
||||||
|
@ -355,6 +407,15 @@ func getObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header
|
||||||
return getObjectBase(hc, w, r)
|
return getObjectBase(hc, w, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getObjectWithHeaders(hc *handlerContext, bktName, objName string, headers map[string]string) *httptest.ResponseRecorder {
|
||||||
|
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||||
|
for k, v := range headers {
|
||||||
|
r.Header.Set(k, v)
|
||||||
|
}
|
||||||
|
hc.Handler().GetObjectHandler(w, r)
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
func getObjectBase(hc *handlerContext, w *httptest.ResponseRecorder, r *http.Request) ([]byte, http.Header) {
|
func getObjectBase(hc *handlerContext, w *httptest.ResponseRecorder, r *http.Request) ([]byte, http.Header) {
|
||||||
hc.Handler().GetObjectHandler(w, r)
|
hc.Handler().GetObjectHandler(w, r)
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -78,6 +79,27 @@ func addSSECHeaders(responseHeader http.Header, requestHeader http.Header) {
|
||||||
responseHeader.Set(api.AmzServerSideEncryptionCustomerKeyMD5, requestHeader.Get(api.AmzServerSideEncryptionCustomerKeyMD5))
|
responseHeader.Set(api.AmzServerSideEncryptionCustomerKeyMD5, requestHeader.Get(api.AmzServerSideEncryptionCustomerKeyMD5))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func writeNotModifiedHeaders(h http.Header, extendedInfo *data.ExtendedObjectInfo, tagSetLength int, isBucketUnversioned, md5Enabled bool) {
|
||||||
|
h.Set(api.ETag, data.Quote(extendedInfo.ObjectInfo.ETag(md5Enabled)))
|
||||||
|
h.Set(api.LastModified, extendedInfo.ObjectInfo.Created.UTC().Format(http.TimeFormat))
|
||||||
|
h.Set(api.AmzTaggingCount, strconv.Itoa(tagSetLength))
|
||||||
|
|
||||||
|
if !isBucketUnversioned {
|
||||||
|
h.Set(api.AmzVersionID, extendedInfo.Version())
|
||||||
|
}
|
||||||
|
|
||||||
|
if cacheControl := extendedInfo.ObjectInfo.Headers[api.CacheControl]; cacheControl != "" {
|
||||||
|
h.Set(api.CacheControl, cacheControl)
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, val := range extendedInfo.ObjectInfo.Headers {
|
||||||
|
if layer.IsSystemHeader(key) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h[api.MetadataPrefix+key] = []string{val}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.ExtendedObjectInfo, tagSetLength int,
|
func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.ExtendedObjectInfo, tagSetLength int,
|
||||||
isBucketUnversioned, md5Enabled bool) {
|
isBucketUnversioned, md5Enabled bool) {
|
||||||
info := extendedInfo.ObjectInfo
|
info := extendedInfo.ObjectInfo
|
||||||
|
@ -129,18 +151,15 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
var (
|
var (
|
||||||
params *layer.RangeParams
|
params *layer.RangeParams
|
||||||
|
|
||||||
reqInfo = middleware.GetReqInfo(r.Context())
|
ctx = r.Context()
|
||||||
|
reqInfo = middleware.GetReqInfo(ctx)
|
||||||
)
|
)
|
||||||
|
|
||||||
conditional, err := parseConditionalHeaders(r.Header)
|
conditional := parseConditionalHeaders(r.Header, h.reqLogger(ctx))
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(w, "could not parse request params", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,37 +169,16 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||||
}
|
}
|
||||||
|
|
||||||
extendedInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), p)
|
extendedInfo, err := h.obj.GetExtendedObjectInfo(ctx, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not find object", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not find object", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
info := extendedInfo.ObjectInfo
|
info := extendedInfo.ObjectInfo
|
||||||
|
|
||||||
if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
|
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||||
h.logAndSendError(w, "precondition failed", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
encryptionParams, err := formEncryptionParams(r)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
|
|
||||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fullSize, err := layer.GetObjectSize(info)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(w, "invalid size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if params, err = fetchRangeHeader(r.Header, fullSize); err != nil {
|
|
||||||
h.logAndSendError(w, "could not parse range header", reqInfo, err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -190,24 +188,48 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
VersionID: info.VersionID(),
|
VersionID: info.VersionID(),
|
||||||
}
|
}
|
||||||
|
|
||||||
tagSet, lockInfo, err := h.obj.GetObjectTaggingAndLock(r.Context(), t, extendedInfo.NodeVersion)
|
tagSet, lockInfo, err := h.obj.GetObjectTaggingAndLock(ctx, t, extendedInfo.NodeVersion)
|
||||||
if err != nil && !errors.IsS3Error(err, errors.ErrNoSuchKey) {
|
if err != nil && !errors.IsS3Error(err, errors.ErrNoSuchKey) {
|
||||||
h.logAndSendError(w, "could not get object meta data", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get object meta data", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if layer.IsAuthenticatedRequest(r.Context()) {
|
if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
|
||||||
|
if errors.IsS3Error(err, errors.ErrNotModified) {
|
||||||
|
writeNotModifiedHeaders(w.Header(), extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.MD5Enabled())
|
||||||
|
}
|
||||||
|
h.logAndSendError(ctx, w, "precondition failed", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptionParams, err := h.formEncryptionParams(r)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fullSize, err := layer.GetObjectSize(info)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "invalid size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if params, err = fetchRangeHeader(r.Header, fullSize); err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not parse range header", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if layer.IsAuthenticatedRequest(ctx) {
|
||||||
overrideResponseHeaders(w.Header(), reqInfo.URL.Query())
|
overrideResponseHeaders(w.Header(), reqInfo.URL.Query())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.setLockingHeaders(bktInfo, lockInfo, w.Header()); err != nil {
|
if err = h.setLockingHeaders(bktInfo, lockInfo, w.Header()); err != nil {
|
||||||
h.logAndSendError(w, "could not get locking info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get locking info", reqInfo, err)
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,9 +241,9 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
Encryption: encryptionParams,
|
Encryption: encryptionParams,
|
||||||
}
|
}
|
||||||
|
|
||||||
objPayload, err := h.obj.GetObject(r.Context(), getPayloadParams)
|
objPayload, err := h.obj.GetObject(ctx, getPayloadParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get object payload", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get object payload", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -233,7 +255,7 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = objPayload.StreamTo(w); err != nil {
|
if err = objPayload.StreamTo(w); err != nil {
|
||||||
h.logAndSendError(w, "could not stream object payload", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not stream object payload", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -265,21 +287,24 @@ func checkPreconditions(info *data.ObjectInfo, args *conditionalArgs, md5Enabled
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseConditionalHeaders(headers http.Header) (*conditionalArgs, error) {
|
func parseConditionalHeaders(headers http.Header, log *zap.Logger) *conditionalArgs {
|
||||||
var err error
|
|
||||||
args := &conditionalArgs{
|
args := &conditionalArgs{
|
||||||
IfMatch: data.UnQuote(headers.Get(api.IfMatch)),
|
IfMatch: data.UnQuote(headers.Get(api.IfMatch)),
|
||||||
IfNoneMatch: data.UnQuote(headers.Get(api.IfNoneMatch)),
|
IfNoneMatch: data.UnQuote(headers.Get(api.IfNoneMatch)),
|
||||||
}
|
}
|
||||||
|
|
||||||
if args.IfModifiedSince, err = parseHTTPTime(headers.Get(api.IfModifiedSince)); err != nil {
|
if httpTime, err := parseHTTPTime(headers.Get(api.IfModifiedSince)); err == nil {
|
||||||
return nil, err
|
args.IfModifiedSince = httpTime
|
||||||
|
} else {
|
||||||
|
log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfModifiedSince, headers.Get(api.IfModifiedSince)), zap.Error(err))
|
||||||
}
|
}
|
||||||
if args.IfUnmodifiedSince, err = parseHTTPTime(headers.Get(api.IfUnmodifiedSince)); err != nil {
|
if httpTime, err := parseHTTPTime(headers.Get(api.IfUnmodifiedSince)); err == nil {
|
||||||
return nil, err
|
args.IfUnmodifiedSince = httpTime
|
||||||
|
} else {
|
||||||
|
log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfUnmodifiedSince, headers.Get(api.IfUnmodifiedSince)), zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
return args, nil
|
return args
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseHTTPTime(data string) (*time.Time, error) {
|
func parseHTTPTime(data string) (*time.Time, error) {
|
||||||
|
|
|
@ -2,7 +2,7 @@ package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
stderrors "errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -13,7 +13,7 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -89,7 +89,7 @@ func TestPreconditions(t *testing.T) {
|
||||||
name: "IfMatch false",
|
name: "IfMatch false",
|
||||||
info: newInfo(etag, today),
|
info: newInfo(etag, today),
|
||||||
args: &conditionalArgs{IfMatch: etag2},
|
args: &conditionalArgs{IfMatch: etag2},
|
||||||
expected: errors.GetAPIError(errors.ErrPreconditionFailed)},
|
expected: apierr.GetAPIError(apierr.ErrPreconditionFailed)},
|
||||||
{
|
{
|
||||||
name: "IfNoneMatch true",
|
name: "IfNoneMatch true",
|
||||||
info: newInfo(etag, today),
|
info: newInfo(etag, today),
|
||||||
|
@ -99,7 +99,7 @@ func TestPreconditions(t *testing.T) {
|
||||||
name: "IfNoneMatch false",
|
name: "IfNoneMatch false",
|
||||||
info: newInfo(etag, today),
|
info: newInfo(etag, today),
|
||||||
args: &conditionalArgs{IfNoneMatch: etag},
|
args: &conditionalArgs{IfNoneMatch: etag},
|
||||||
expected: errors.GetAPIError(errors.ErrNotModified)},
|
expected: apierr.GetAPIError(apierr.ErrNotModified)},
|
||||||
{
|
{
|
||||||
name: "IfModifiedSince true",
|
name: "IfModifiedSince true",
|
||||||
info: newInfo(etag, today),
|
info: newInfo(etag, today),
|
||||||
|
@ -109,7 +109,7 @@ func TestPreconditions(t *testing.T) {
|
||||||
name: "IfModifiedSince false",
|
name: "IfModifiedSince false",
|
||||||
info: newInfo(etag, yesterday),
|
info: newInfo(etag, yesterday),
|
||||||
args: &conditionalArgs{IfModifiedSince: &today},
|
args: &conditionalArgs{IfModifiedSince: &today},
|
||||||
expected: errors.GetAPIError(errors.ErrNotModified)},
|
expected: apierr.GetAPIError(apierr.ErrNotModified)},
|
||||||
{
|
{
|
||||||
name: "IfUnmodifiedSince true",
|
name: "IfUnmodifiedSince true",
|
||||||
info: newInfo(etag, yesterday),
|
info: newInfo(etag, yesterday),
|
||||||
|
@ -119,7 +119,7 @@ func TestPreconditions(t *testing.T) {
|
||||||
name: "IfUnmodifiedSince false",
|
name: "IfUnmodifiedSince false",
|
||||||
info: newInfo(etag, today),
|
info: newInfo(etag, today),
|
||||||
args: &conditionalArgs{IfUnmodifiedSince: &yesterday},
|
args: &conditionalArgs{IfUnmodifiedSince: &yesterday},
|
||||||
expected: errors.GetAPIError(errors.ErrPreconditionFailed)},
|
expected: apierr.GetAPIError(apierr.ErrPreconditionFailed)},
|
||||||
|
|
||||||
{
|
{
|
||||||
name: "IfMatch true, IfUnmodifiedSince false",
|
name: "IfMatch true, IfUnmodifiedSince false",
|
||||||
|
@ -131,19 +131,19 @@ func TestPreconditions(t *testing.T) {
|
||||||
name: "IfMatch false, IfUnmodifiedSince true",
|
name: "IfMatch false, IfUnmodifiedSince true",
|
||||||
info: newInfo(etag, yesterday),
|
info: newInfo(etag, yesterday),
|
||||||
args: &conditionalArgs{IfMatch: etag2, IfUnmodifiedSince: &today},
|
args: &conditionalArgs{IfMatch: etag2, IfUnmodifiedSince: &today},
|
||||||
expected: errors.GetAPIError(errors.ErrPreconditionFailed),
|
expected: apierr.GetAPIError(apierr.ErrPreconditionFailed),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "IfNoneMatch false, IfModifiedSince true",
|
name: "IfNoneMatch false, IfModifiedSince true",
|
||||||
info: newInfo(etag, today),
|
info: newInfo(etag, today),
|
||||||
args: &conditionalArgs{IfNoneMatch: etag, IfModifiedSince: &yesterday},
|
args: &conditionalArgs{IfNoneMatch: etag, IfModifiedSince: &yesterday},
|
||||||
expected: errors.GetAPIError(errors.ErrNotModified),
|
expected: apierr.GetAPIError(apierr.ErrNotModified),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "IfNoneMatch true, IfModifiedSince false",
|
name: "IfNoneMatch true, IfModifiedSince false",
|
||||||
info: newInfo(etag, yesterday),
|
info: newInfo(etag, yesterday),
|
||||||
args: &conditionalArgs{IfNoneMatch: etag2, IfModifiedSince: &today},
|
args: &conditionalArgs{IfNoneMatch: etag2, IfModifiedSince: &today},
|
||||||
expected: errors.GetAPIError(errors.ErrNotModified),
|
expected: apierr.GetAPIError(apierr.ErrNotModified),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
@ -151,7 +151,7 @@ func TestPreconditions(t *testing.T) {
|
||||||
if tc.expected == nil {
|
if tc.expected == nil {
|
||||||
require.NoError(t, actual)
|
require.NoError(t, actual)
|
||||||
} else {
|
} else {
|
||||||
require.True(t, stderrors.Is(actual, tc.expected), tc.expected, actual)
|
require.True(t, errors.Is(actual, tc.expected), tc.expected, actual)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -193,8 +193,8 @@ func TestGetObject(t *testing.T) {
|
||||||
hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{})
|
hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{})
|
||||||
hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectNotFound{})
|
hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectNotFound{})
|
||||||
|
|
||||||
getObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), errors.ErrNoSuchVersion)
|
getObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), apierr.ErrNoSuchVersion)
|
||||||
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, errors.ErrNoSuchKey)
|
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, apierr.ErrNoSuchKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetObjectEnabledMD5(t *testing.T) {
|
func TestGetObjectEnabledMD5(t *testing.T) {
|
||||||
|
@ -210,6 +210,27 @@ func TestGetObjectEnabledMD5(t *testing.T) {
|
||||||
require.Equal(t, data.Quote(objInfo.MD5Sum), headers.Get(api.ETag))
|
require.Equal(t, data.Quote(objInfo.MD5Sum), headers.Get(api.ETag))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetObjectNotModifiedHeaders(t *testing.T) {
|
||||||
|
hc := prepareHandlerContextWithMinCache(t)
|
||||||
|
bktName, objName, metadataHeader := "bucket", "obj", api.MetadataPrefix+"header"
|
||||||
|
createVersionedBucket(hc, bktName)
|
||||||
|
header := putObjectWithHeaders(hc, bktName, objName, map[string]string{api.CacheControl: "value", metadataHeader: "value"})
|
||||||
|
etag, versionID := header.Get(api.ETag), header.Get(api.AmzVersionID)
|
||||||
|
require.NotEmpty(t, etag)
|
||||||
|
require.NotEmpty(t, versionID)
|
||||||
|
|
||||||
|
putObjectTagging(t, hc, bktName, objName, map[string]string{"key": "value"})
|
||||||
|
|
||||||
|
w := getObjectWithHeaders(hc, bktName, objName, map[string]string{api.IfNoneMatch: etag})
|
||||||
|
require.Equal(t, http.StatusNotModified, w.Code)
|
||||||
|
require.Equal(t, "1", w.Header().Get(api.AmzTaggingCount))
|
||||||
|
require.Equal(t, etag, w.Header().Get(api.ETag))
|
||||||
|
require.NotEmpty(t, w.Header().Get(api.LastModified))
|
||||||
|
require.Equal(t, versionID, w.Header().Get(api.AmzVersionID))
|
||||||
|
require.Equal(t, "value", w.Header().Get(api.CacheControl))
|
||||||
|
require.Equal(t, []string{"value"}, w.Header()[metadataHeader])
|
||||||
|
}
|
||||||
|
|
||||||
func putObjectContent(hc *handlerContext, bktName, objName, content string) http.Header {
|
func putObjectContent(hc *handlerContext, bktName, objName, content string) http.Header {
|
||||||
body := bytes.NewReader([]byte(content))
|
body := bytes.NewReader([]byte(content))
|
||||||
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
|
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
|
||||||
|
@ -228,9 +249,17 @@ func getObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, s
|
||||||
return content
|
return content
|
||||||
}
|
}
|
||||||
|
|
||||||
func getObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code errors.ErrorCode) {
|
func getObjectVersion(tc *handlerContext, bktName, objName, version string) []byte {
|
||||||
|
w := getObjectBaseResponse(tc, bktName, objName, version)
|
||||||
|
assertStatus(tc.t, w, http.StatusOK)
|
||||||
|
content, err := io.ReadAll(w.Result().Body)
|
||||||
|
require.NoError(tc.t, err)
|
||||||
|
return content
|
||||||
|
}
|
||||||
|
|
||||||
|
func getObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apierr.ErrorCode) {
|
||||||
w := getObjectBaseResponse(hc, bktName, objName, version)
|
w := getObjectBaseResponse(hc, bktName, objName, version)
|
||||||
assertS3Error(hc.t, w, errors.GetAPIError(code))
|
assertS3Error(hc.t, w, apierr.GetAPIError(code))
|
||||||
}
|
}
|
||||||
|
|
||||||
func getObjectBaseResponse(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
|
func getObjectBaseResponse(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
|
||||||
|
|
1025
api/handler/handler_fuzz_test.go
Normal file
1025
api/handler/handler_fuzz_test.go
Normal file
File diff suppressed because it is too large
Load diff
|
@ -20,6 +20,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/resolver"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/resolver"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
|
||||||
|
@ -31,14 +32,19 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
|
"github.com/panjf2000/ants/v2"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
)
|
)
|
||||||
|
|
||||||
type handlerContext struct {
|
type handlerContext struct {
|
||||||
|
*handlerContextBase
|
||||||
|
t *testing.T
|
||||||
|
}
|
||||||
|
|
||||||
|
type handlerContextBase struct {
|
||||||
owner user.ID
|
owner user.ID
|
||||||
t *testing.T
|
|
||||||
h *handler
|
h *handler
|
||||||
tp *layer.TestFrostFS
|
tp *layer.TestFrostFS
|
||||||
tree *tree.Tree
|
tree *tree.Tree
|
||||||
|
@ -50,19 +56,19 @@ type handlerContext struct {
|
||||||
cache *layer.Cache
|
cache *layer.Cache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hc *handlerContext) Handler() *handler {
|
func (hc *handlerContextBase) Handler() *handler {
|
||||||
return hc.h
|
return hc.h
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hc *handlerContext) MockedPool() *layer.TestFrostFS {
|
func (hc *handlerContextBase) MockedPool() *layer.TestFrostFS {
|
||||||
return hc.tp
|
return hc.tp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hc *handlerContext) Layer() layer.Client {
|
func (hc *handlerContextBase) Layer() *layer.Layer {
|
||||||
return hc.h.obj
|
return hc.h.obj
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hc *handlerContext) Context() context.Context {
|
func (hc *handlerContextBase) Context() context.Context {
|
||||||
return hc.context
|
return hc.context
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,7 +78,7 @@ type configMock struct {
|
||||||
defaultCopiesNumbers []uint32
|
defaultCopiesNumbers []uint32
|
||||||
bypassContentEncodingInChunks bool
|
bypassContentEncodingInChunks bool
|
||||||
md5Enabled bool
|
md5Enabled bool
|
||||||
aclEnabled bool
|
tlsTerminationHeader string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) DefaultPlacementPolicy(_ string) netmap.PlacementPolicy {
|
func (c *configMock) DefaultPlacementPolicy(_ string) netmap.PlacementPolicy {
|
||||||
|
@ -92,11 +98,11 @@ func (c *configMock) DefaultCopiesNumbers(_ string) []uint32 {
|
||||||
return c.defaultCopiesNumbers
|
return c.defaultCopiesNumbers
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) NewXMLDecoder(r io.Reader) *xml.Decoder {
|
func (c *configMock) NewXMLDecoder(r io.Reader, _ string) *xml.Decoder {
|
||||||
return xml.NewDecoder(r)
|
return xml.NewDecoder(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) BypassContentEncodingInChunks() bool {
|
func (c *configMock) BypassContentEncodingInChunks(_ string) bool {
|
||||||
return c.bypassContentEncodingInChunks
|
return c.bypassContentEncodingInChunks
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,10 +110,6 @@ func (c *configMock) DefaultMaxAge() int {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) NotificatorEnabled() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) ResolveZoneList() []string {
|
func (c *configMock) ResolveZoneList() []string {
|
||||||
return []string{}
|
return []string{}
|
||||||
}
|
}
|
||||||
|
@ -124,31 +126,55 @@ func (c *configMock) MD5Enabled() bool {
|
||||||
return c.md5Enabled
|
return c.md5Enabled
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) ACLEnabled() bool {
|
|
||||||
return c.aclEnabled
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) ResolveNamespaceAlias(ns string) string {
|
func (c *configMock) ResolveNamespaceAlias(ns string) string {
|
||||||
return ns
|
return ns
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *configMock) RetryMaxAttempts() int {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configMock) RetryMaxBackoff() time.Duration {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configMock) RetryStrategy() RetryStrategy {
|
||||||
|
return RetryStrategyConstant
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configMock) TLSTerminationHeader() string {
|
||||||
|
return c.tlsTerminationHeader
|
||||||
|
}
|
||||||
|
|
||||||
func prepareHandlerContext(t *testing.T) *handlerContext {
|
func prepareHandlerContext(t *testing.T) *handlerContext {
|
||||||
return prepareHandlerContextBase(t, layer.DefaultCachesConfigs(zap.NewExample()))
|
hc, err := prepareHandlerContextBase(layer.DefaultCachesConfigs(zap.NewExample()))
|
||||||
|
require.NoError(t, err)
|
||||||
|
return &handlerContext{
|
||||||
|
handlerContextBase: hc,
|
||||||
|
t: t,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func prepareHandlerContextWithMinCache(t *testing.T) *handlerContext {
|
func prepareHandlerContextWithMinCache(t *testing.T) *handlerContext {
|
||||||
return prepareHandlerContextBase(t, getMinCacheConfig(zap.NewExample()))
|
hc, err := prepareHandlerContextBase(getMinCacheConfig(zap.NewExample()))
|
||||||
|
require.NoError(t, err)
|
||||||
|
return &handlerContext{
|
||||||
|
handlerContextBase: hc,
|
||||||
|
t: t,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig) *handlerContext {
|
func prepareHandlerContextBase(cacheCfg *layer.CachesConfig) (*handlerContextBase, error) {
|
||||||
key, err := keys.NewPrivateKey()
|
key, err := keys.NewPrivateKey()
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
l := zap.NewExample()
|
log := zap.NewExample()
|
||||||
tp := layer.NewTestFrostFS(key)
|
tp := layer.NewTestFrostFS(key)
|
||||||
|
|
||||||
testResolver := &resolver.Resolver{Name: "test_resolver"}
|
testResolver := &resolver.Resolver{Name: "test_resolver"}
|
||||||
testResolver.SetResolveFunc(func(_ context.Context, name string) (cid.ID, error) {
|
testResolver.SetResolveFunc(func(_ context.Context, _, name string) (cid.ID, error) {
|
||||||
return tp.ContainerID(name)
|
return tp.ContainerID(name)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -156,12 +182,19 @@ func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig) *hand
|
||||||
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
||||||
|
|
||||||
memCli, err := tree.NewTreeServiceClientMemory()
|
memCli, err := tree.NewTreeServiceClientMemory()
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
treeMock := tree.NewTree(memCli, zap.NewExample())
|
treeMock := tree.NewTree(memCli, zap.NewExample())
|
||||||
|
|
||||||
features := &layer.FeatureSettingsMock{}
|
features := &layer.FeatureSettingsMock{}
|
||||||
|
|
||||||
|
pool, err := ants.NewPool(1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
layerCfg := &layer.Config{
|
layerCfg := &layer.Config{
|
||||||
Cache: layer.NewCache(cacheCfg),
|
Cache: layer.NewCache(cacheCfg),
|
||||||
AnonKey: layer.AnonymousKey{Key: key},
|
AnonKey: layer.AnonymousKey{Key: key},
|
||||||
|
@ -169,36 +202,43 @@ func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig) *hand
|
||||||
TreeService: treeMock,
|
TreeService: treeMock,
|
||||||
Features: features,
|
Features: features,
|
||||||
GateOwner: owner,
|
GateOwner: owner,
|
||||||
|
WorkerPool: pool,
|
||||||
}
|
}
|
||||||
|
|
||||||
var pp netmap.PlacementPolicy
|
var pp netmap.PlacementPolicy
|
||||||
err = pp.DecodeString("REP 1")
|
err = pp.DecodeString("REP 1")
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
cfg := &configMock{
|
cfg := &configMock{
|
||||||
defaultPolicy: pp,
|
defaultPolicy: pp,
|
||||||
}
|
}
|
||||||
h := &handler{
|
h := &handler{
|
||||||
log: l,
|
log: log,
|
||||||
obj: layer.NewLayer(l, tp, layerCfg),
|
obj: layer.NewLayer(log, tp, layerCfg),
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
ape: newAPEMock(),
|
ape: newAPEMock(),
|
||||||
frostfsid: newFrostfsIDMock(),
|
frostfsid: newFrostfsIDMock(),
|
||||||
}
|
}
|
||||||
|
|
||||||
return &handlerContext{
|
accessBox, err := newTestAccessBox(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &handlerContextBase{
|
||||||
owner: owner,
|
owner: owner,
|
||||||
t: t,
|
|
||||||
h: h,
|
h: h,
|
||||||
tp: tp,
|
tp: tp,
|
||||||
tree: treeMock,
|
tree: treeMock,
|
||||||
context: middleware.SetBox(context.Background(), &middleware.Box{AccessBox: newTestAccessBox(t, key)}),
|
context: middleware.SetBox(context.Background(), &middleware.Box{AccessBox: accessBox}),
|
||||||
config: cfg,
|
config: cfg,
|
||||||
|
|
||||||
layerFeatures: features,
|
layerFeatures: features,
|
||||||
treeMock: memCli,
|
treeMock: memCli,
|
||||||
cache: layerCfg.Cache,
|
cache: layerCfg.Cache,
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
|
func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
|
||||||
|
@ -216,12 +256,14 @@ func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
|
||||||
Buckets: minCacheCfg,
|
Buckets: minCacheCfg,
|
||||||
System: minCacheCfg,
|
System: minCacheCfg,
|
||||||
AccessControl: minCacheCfg,
|
AccessControl: minCacheCfg,
|
||||||
|
Network: &cache.NetworkCacheConfig{Lifetime: minCacheCfg.Lifetime},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type apeMock struct {
|
type apeMock struct {
|
||||||
chainMap map[engine.Target][]*chain.Chain
|
chainMap map[engine.Target][]*chain.Chain
|
||||||
policyMap map[string][]byte
|
policyMap map[string][]byte
|
||||||
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func newAPEMock() *apeMock {
|
func newAPEMock() *apeMock {
|
||||||
|
@ -265,6 +307,10 @@ func (a *apeMock) DeletePolicy(namespace string, cnrID cid.ID) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *apeMock) PutBucketPolicy(ns string, cnrID cid.ID, policy []byte, chain []*chain.Chain) error {
|
func (a *apeMock) PutBucketPolicy(ns string, cnrID cid.ID, policy []byte, chain []*chain.Chain) error {
|
||||||
|
if a.err != nil {
|
||||||
|
return a.err
|
||||||
|
}
|
||||||
|
|
||||||
if err := a.PutPolicy(ns, cnrID, policy); err != nil {
|
if err := a.PutPolicy(ns, cnrID, policy); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -279,6 +325,10 @@ func (a *apeMock) PutBucketPolicy(ns string, cnrID cid.ID, policy []byte, chain
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *apeMock) DeleteBucketPolicy(ns string, cnrID cid.ID, chainIDs []chain.ID) error {
|
func (a *apeMock) DeleteBucketPolicy(ns string, cnrID cid.ID, chainIDs []chain.ID) error {
|
||||||
|
if a.err != nil {
|
||||||
|
return a.err
|
||||||
|
}
|
||||||
|
|
||||||
if err := a.DeletePolicy(ns, cnrID); err != nil {
|
if err := a.DeletePolicy(ns, cnrID); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -292,6 +342,10 @@ func (a *apeMock) DeleteBucketPolicy(ns string, cnrID cid.ID, chainIDs []chain.I
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *apeMock) GetBucketPolicy(ns string, cnrID cid.ID) ([]byte, error) {
|
func (a *apeMock) GetBucketPolicy(ns string, cnrID cid.ID) ([]byte, error) {
|
||||||
|
if a.err != nil {
|
||||||
|
return nil, a.err
|
||||||
|
}
|
||||||
|
|
||||||
policy, ok := a.policyMap[ns+cnrID.EncodeToString()]
|
policy, ok := a.policyMap[ns+cnrID.EncodeToString()]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("not found")
|
return nil, errors.New("not found")
|
||||||
|
@ -301,6 +355,10 @@ func (a *apeMock) GetBucketPolicy(ns string, cnrID cid.ID) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *apeMock) SaveACLChains(cid string, chains []*chain.Chain) error {
|
func (a *apeMock) SaveACLChains(cid string, chains []*chain.Chain) error {
|
||||||
|
if a.err != nil {
|
||||||
|
return a.err
|
||||||
|
}
|
||||||
|
|
||||||
for i := range chains {
|
for i := range chains {
|
||||||
if err := a.AddChain(engine.ContainerTarget(cid), chains[i]); err != nil {
|
if err := a.AddChain(engine.ContainerTarget(cid), chains[i]); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -342,10 +400,11 @@ func createTestBucket(hc *handlerContext, bktName string) *data.BucketInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
func createTestBucketWithLock(hc *handlerContext, bktName string, conf *data.ObjectLockConfiguration) *data.BucketInfo {
|
func createTestBucketWithLock(hc *handlerContext, bktName string, conf *data.ObjectLockConfiguration) *data.BucketInfo {
|
||||||
res, err := hc.MockedPool().CreateContainer(hc.Context(), layer.PrmContainerCreate{
|
res, err := hc.MockedPool().CreateContainer(hc.Context(), frostfs.PrmContainerCreate{
|
||||||
Creator: hc.owner,
|
Creator: hc.owner,
|
||||||
Name: bktName,
|
Name: bktName,
|
||||||
AdditionalAttributes: [][2]string{{layer.AttributeLockEnabled, "true"}},
|
AdditionalAttributes: [][2]string{{layer.AttributeLockEnabled, "true"}},
|
||||||
|
Policy: getPlacementPolicy(),
|
||||||
})
|
})
|
||||||
require.NoError(hc.t, err)
|
require.NoError(hc.t, err)
|
||||||
|
|
||||||
|
@ -357,6 +416,7 @@ func createTestBucketWithLock(hc *handlerContext, bktName string, conf *data.Obj
|
||||||
ObjectLockEnabled: true,
|
ObjectLockEnabled: true,
|
||||||
Owner: ownerID,
|
Owner: ownerID,
|
||||||
HomomorphicHashDisabled: res.HomomorphicHashDisabled,
|
HomomorphicHashDisabled: res.HomomorphicHashDisabled,
|
||||||
|
PlacementPolicy: getPlacementPolicy(),
|
||||||
}
|
}
|
||||||
|
|
||||||
key, err := keys.NewPrivateKey()
|
key, err := keys.NewPrivateKey()
|
||||||
|
@ -389,7 +449,7 @@ func createTestObject(hc *handlerContext, bktInfo *data.BucketInfo, objName stri
|
||||||
extObjInfo, err := hc.Layer().PutObject(hc.Context(), &layer.PutObjectParams{
|
extObjInfo, err := hc.Layer().PutObject(hc.Context(), &layer.PutObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Object: objName,
|
Object: objName,
|
||||||
Size: uint64(len(content)),
|
Size: ptr(uint64(len(content))),
|
||||||
Reader: bytes.NewReader(content),
|
Reader: bytes.NewReader(content),
|
||||||
Header: header,
|
Header: header,
|
||||||
Encryption: encryption,
|
Encryption: encryption,
|
||||||
|
@ -416,6 +476,7 @@ func prepareTestRequestWithQuery(hc *handlerContext, bktName, objName string, qu
|
||||||
r.URL.RawQuery = query.Encode()
|
r.URL.RawQuery = query.Encode()
|
||||||
|
|
||||||
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
|
||||||
|
reqInfo.User = hc.owner.String()
|
||||||
r = r.WithContext(middleware.SetReqInfo(hc.Context(), reqInfo))
|
r = r.WithContext(middleware.SetReqInfo(hc.Context(), reqInfo))
|
||||||
|
|
||||||
return w, r
|
return w, r
|
||||||
|
@ -475,3 +536,10 @@ func readResponse(t *testing.T, w *httptest.ResponseRecorder, status int, model
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getPlacementPolicy() (p netmap.PlacementPolicy) {
|
||||||
|
var r netmap.ReplicaDescriptor
|
||||||
|
r.SetNumberOfObjects(1)
|
||||||
|
p.AddReplicas([]netmap.ReplicaDescriptor{r}...)
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
|
@ -27,19 +27,16 @@ func getRangeToDetectContentType(maxSize uint64) *layer.RangeParams {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
conditional, err := parseConditionalHeaders(r.Header)
|
conditional := parseConditionalHeaders(r.Header, h.reqLogger(ctx))
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(w, "could not parse request params", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &layer.HeadObjectParams{
|
p := &layer.HeadObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
|
@ -47,26 +44,27 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||||
}
|
}
|
||||||
|
|
||||||
extendedInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), p)
|
extendedInfo, err := h.obj.GetExtendedObjectInfo(ctx, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not find object", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not find object", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
info := extendedInfo.ObjectInfo
|
info := extendedInfo.ObjectInfo
|
||||||
|
|
||||||
encryptionParams, err := formEncryptionParams(r)
|
encryptionParams, err := h.formEncryptionParams(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
|
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
|
||||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
|
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||||
h.logAndSendError(w, "precondition failed", reqInfo, err)
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,9 +74,17 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
VersionID: info.VersionID(),
|
VersionID: info.VersionID(),
|
||||||
}
|
}
|
||||||
|
|
||||||
tagSet, lockInfo, err := h.obj.GetObjectTaggingAndLock(r.Context(), t, extendedInfo.NodeVersion)
|
tagSet, lockInfo, err := h.obj.GetObjectTaggingAndLock(ctx, t, extendedInfo.NodeVersion)
|
||||||
if err != nil && !errors.IsS3Error(err, errors.ErrNoSuchKey) {
|
if err != nil && !errors.IsS3Error(err, errors.ErrNoSuchKey) {
|
||||||
h.logAndSendError(w, "could not get object meta data", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get object meta data", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
|
||||||
|
if errors.IsS3Error(err, errors.ErrNotModified) {
|
||||||
|
writeNotModifiedHeaders(w.Header(), extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.MD5Enabled())
|
||||||
|
}
|
||||||
|
h.logAndSendError(ctx, w, "precondition failed", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,15 +97,15 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
BucketInfo: bktInfo,
|
BucketInfo: bktInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
objPayload, err := h.obj.GetObject(r.Context(), getParams)
|
objPayload, err := h.obj.GetObject(ctx, getParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get object", reqInfo, err, zap.Stringer("oid", info.ID))
|
h.logAndSendError(ctx, w, "could not get object", reqInfo, err, zap.Stringer("oid", info.ID))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer, err := io.ReadAll(objPayload)
|
buffer, err := io.ReadAll(objPayload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not partly read payload to detect content type", reqInfo, err, zap.Stringer("oid", info.ID))
|
h.logAndSendError(ctx, w, "could not partly read payload to detect content type", reqInfo, err, zap.Stringer("oid", info.ID))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,13 +114,7 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.setLockingHeaders(bktInfo, lockInfo, w.Header()); err != nil {
|
if err = h.setLockingHeaders(bktInfo, lockInfo, w.Header()); err != nil {
|
||||||
h.logAndSendError(w, "could not get locking info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get locking info", reqInfo, err)
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,11 +123,12 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,7 +142,7 @@ func (h *handler) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.WriteResponse(w, http.StatusOK, nil, middleware.MimeNone); err != nil {
|
if err = middleware.WriteResponse(w, http.StatusOK, nil, middleware.MimeNone); err != nil {
|
||||||
h.logAndSendError(w, "write response", reqInfo, err)
|
h.logAndSendError(ctx, w, "write response", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,13 +6,10 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -71,6 +68,16 @@ func TestConditionalHead(t *testing.T) {
|
||||||
api.IfModifiedSince: zeroTime.UTC().Format(http.TimeFormat),
|
api.IfModifiedSince: zeroTime.UTC().Format(http.TimeFormat),
|
||||||
}
|
}
|
||||||
headObject(t, tc, bktName, objName, headers, http.StatusNotModified)
|
headObject(t, tc, bktName, objName, headers, http.StatusNotModified)
|
||||||
|
|
||||||
|
headers = map[string]string{
|
||||||
|
api.IfUnmodifiedSince: zeroTime.UTC().Format(time.RFC3339), // invalid format, header is ignored
|
||||||
|
}
|
||||||
|
headObject(t, tc, bktName, objName, headers, http.StatusOK)
|
||||||
|
|
||||||
|
headers = map[string]string{
|
||||||
|
api.IfModifiedSince: objInfo.Created.Add(time.Minute).Format(time.RFC3339), // invalid format, header is ignored
|
||||||
|
}
|
||||||
|
headObject(t, tc, bktName, objName, headers, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func headObject(t *testing.T, tc *handlerContext, bktName, objName string, headers map[string]string, status int) {
|
func headObject(t *testing.T, tc *handlerContext, bktName, objName string, headers map[string]string, status int) {
|
||||||
|
@ -84,31 +91,6 @@ func headObject(t *testing.T, tc *handlerContext, bktName, objName string, heade
|
||||||
assertStatus(t, w, status)
|
assertStatus(t, w, status)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInvalidAccessThroughCache(t *testing.T) {
|
|
||||||
hc := prepareHandlerContext(t)
|
|
||||||
|
|
||||||
bktName, objName := "bucket-for-cache", "obj-for-cache"
|
|
||||||
bktInfo, _ := createBucketAndObject(hc, bktName, objName)
|
|
||||||
setContainerEACL(hc, bktInfo.CID)
|
|
||||||
|
|
||||||
headObject(t, hc, bktName, objName, nil, http.StatusOK)
|
|
||||||
|
|
||||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
|
||||||
hc.Handler().HeadObjectHandler(w, r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: newTestAccessBox(t, nil)})))
|
|
||||||
assertStatus(t, w, http.StatusForbidden)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setContainerEACL(hc *handlerContext, cnrID cid.ID) {
|
|
||||||
table := eacl.NewTable()
|
|
||||||
table.SetCID(cnrID)
|
|
||||||
for _, op := range fullOps {
|
|
||||||
table.AddRecord(getOthersRecord(op, eacl.ActionDeny))
|
|
||||||
}
|
|
||||||
|
|
||||||
err := hc.MockedPool().SetContainerEACL(hc.Context(), *table, nil)
|
|
||||||
require.NoError(hc.t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHeadObject(t *testing.T) {
|
func TestHeadObject(t *testing.T) {
|
||||||
hc := prepareHandlerContextWithMinCache(t)
|
hc := prepareHandlerContextWithMinCache(t)
|
||||||
bktName, objName := "bucket", "obj"
|
bktName, objName := "bucket", "obj"
|
||||||
|
@ -123,8 +105,29 @@ func TestHeadObject(t *testing.T) {
|
||||||
hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{})
|
hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{})
|
||||||
hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectNotFound{})
|
hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectNotFound{})
|
||||||
|
|
||||||
headObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), s3errors.ErrNoSuchVersion)
|
headObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), apierr.ErrNoSuchVersion)
|
||||||
headObjectAssertS3Error(hc, bktName, objName, emptyVersion, s3errors.ErrNoSuchKey)
|
headObjectAssertS3Error(hc, bktName, objName, emptyVersion, apierr.ErrNoSuchKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHeadObjectNotModifiedHeaders(t *testing.T) {
|
||||||
|
hc := prepareHandlerContextWithMinCache(t)
|
||||||
|
bktName, objName, metadataHeader := "bucket", "obj", api.MetadataPrefix+"header"
|
||||||
|
createVersionedBucket(hc, bktName)
|
||||||
|
header := putObjectWithHeaders(hc, bktName, objName, map[string]string{api.CacheControl: "value", metadataHeader: "value"})
|
||||||
|
etag, versionID := header.Get(api.ETag), header.Get(api.AmzVersionID)
|
||||||
|
require.NotEmpty(t, etag)
|
||||||
|
require.NotEmpty(t, versionID)
|
||||||
|
|
||||||
|
putObjectTagging(t, hc, bktName, objName, map[string]string{"key": "value"})
|
||||||
|
|
||||||
|
w := headObjectWithHeaders(hc, bktName, objName, emptyVersion, map[string]string{api.IfNoneMatch: etag})
|
||||||
|
require.Equal(t, http.StatusNotModified, w.Code)
|
||||||
|
require.Equal(t, "1", w.Header().Get(api.AmzTaggingCount))
|
||||||
|
require.Equal(t, etag, w.Header().Get(api.ETag))
|
||||||
|
require.NotEmpty(t, w.Header().Get(api.LastModified))
|
||||||
|
require.Equal(t, versionID, w.Header().Get(api.AmzVersionID))
|
||||||
|
require.Equal(t, "value", w.Header().Get(api.CacheControl))
|
||||||
|
require.Equal(t, []string{"value"}, w.Header()[metadataHeader])
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsAvailableToResolve(t *testing.T) {
|
func TestIsAvailableToResolve(t *testing.T) {
|
||||||
|
@ -147,21 +150,25 @@ func TestIsAvailableToResolve(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestAccessBox(t *testing.T, key *keys.PrivateKey) *accessbox.Box {
|
func newTestAccessBox(key *keys.PrivateKey) (*accessbox.Box, error) {
|
||||||
var err error
|
var err error
|
||||||
if key == nil {
|
if key == nil {
|
||||||
key, err = keys.NewPrivateKey()
|
key, err = keys.NewPrivateKey()
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var btoken bearer.Token
|
var btoken bearer.Token
|
||||||
btoken.SetEACLTable(*eacl.NewTable())
|
btoken.SetImpersonate(true)
|
||||||
err = btoken.Sign(key.PrivateKey)
|
err = btoken.Sign(key.PrivateKey)
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return &accessbox.Box{
|
return &accessbox.Box{
|
||||||
Gate: &accessbox.GateData{
|
Gate: &accessbox.GateData{
|
||||||
BearerToken: &btoken,
|
BearerToken: &btoken,
|
||||||
},
|
},
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,15 +7,16 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func (h *handler) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, LocationResponse{Location: bktInfo.LocationConstraint}); err != nil {
|
if err = middleware.EncodeToResponse(w, LocationResponse{Location: bktInfo.LocationConstraint}); err != nil {
|
||||||
h.logAndSendError(w, "couldn't encode bucket location response", reqInfo, err)
|
h.logAndSendError(ctx, w, "couldn't encode bucket location response", reqInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
309
api/handler/lifecycle.go
Normal file
309
api/handler/lifecycle.go
Normal file
|
@ -0,0 +1,309 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/md5"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
|
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/util"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxRules = 1000
|
||||||
|
maxRuleIDLen = 255
|
||||||
|
maxNewerNoncurrentVersions = 100
|
||||||
|
)
|
||||||
|
|
||||||
|
func (h *handler) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := h.obj.GetBucketLifecycleConfiguration(ctx, bktInfo)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not get bucket lifecycle configuration", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = middleware.EncodeToResponse(w, cfg); err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not encode GetBucketLifecycle response", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
tee := io.TeeReader(r.Body, &buf)
|
||||||
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
|
// Content-Md5 is required and should be set
|
||||||
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
|
||||||
|
if _, ok := r.Header[api.ContentMD5]; !ok {
|
||||||
|
h.logAndSendError(ctx, w, "missing Content-MD5", reqInfo, apierr.GetAPIError(apierr.ErrMissingContentMD5))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
headerMD5, err := base64.StdEncoding.DecodeString(r.Header.Get(api.ContentMD5))
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "invalid Content-MD5", reqInfo, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := new(data.LifecycleConfiguration)
|
||||||
|
if err = h.cfg.NewXMLDecoder(tee, r.UserAgent()).Decode(cfg); err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not decode body", reqInfo, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyMD5, err := getContentMD5(&buf)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not get content md5", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(headerMD5, bodyMD5) {
|
||||||
|
h.logAndSendError(ctx, w, "Content-MD5 does not match", reqInfo, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not get network info", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = checkLifecycleConfiguration(ctx, cfg, &networkInfo); err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "invalid lifecycle configuration", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
params := &layer.PutBucketLifecycleParams{
|
||||||
|
BktInfo: bktInfo,
|
||||||
|
LifecycleCfg: cfg,
|
||||||
|
}
|
||||||
|
|
||||||
|
params.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = h.obj.PutBucketLifecycleConfiguration(ctx, params); err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not put bucket lifecycle configuration", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = h.obj.DeleteBucketLifecycleConfiguration(ctx, bktInfo); err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not delete bucket lifecycle configuration", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkLifecycleConfiguration(ctx context.Context, cfg *data.LifecycleConfiguration, ni *netmap.NetworkInfo) error {
|
||||||
|
now := layer.TimeNow(ctx)
|
||||||
|
|
||||||
|
if len(cfg.Rules) > maxRules {
|
||||||
|
return fmt.Errorf("%w: number of rules cannot be greater than %d", apierr.GetAPIError(apierr.ErrInvalidRequest), maxRules)
|
||||||
|
}
|
||||||
|
|
||||||
|
ids := make(map[string]struct{}, len(cfg.Rules))
|
||||||
|
for i, rule := range cfg.Rules {
|
||||||
|
if rule.ID == "" {
|
||||||
|
id, err := uuid.NewRandom()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("generate uuid: %w", err)
|
||||||
|
}
|
||||||
|
cfg.Rules[i].ID = id.String()
|
||||||
|
rule.ID = id.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := ids[rule.ID]; ok {
|
||||||
|
return fmt.Errorf("%w: duplicate 'ID': %s", apierr.GetAPIError(apierr.ErrInvalidArgument), rule.ID)
|
||||||
|
}
|
||||||
|
ids[rule.ID] = struct{}{}
|
||||||
|
|
||||||
|
if len(rule.ID) > maxRuleIDLen {
|
||||||
|
return fmt.Errorf("%w: 'ID' value cannot be longer than %d characters", apierr.GetAPIError(apierr.ErrInvalidArgument), maxRuleIDLen)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.Status != data.LifecycleStatusEnabled && rule.Status != data.LifecycleStatusDisabled {
|
||||||
|
return fmt.Errorf("%w: invalid lifecycle status: %s", apierr.GetAPIError(apierr.ErrMalformedXML), rule.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.AbortIncompleteMultipartUpload == nil && rule.Expiration == nil && rule.NonCurrentVersionExpiration == nil {
|
||||||
|
return fmt.Errorf("%w: at least one action needs to be specified in a rule", apierr.GetAPIError(apierr.ErrInvalidRequest))
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.AbortIncompleteMultipartUpload != nil {
|
||||||
|
if rule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil &&
|
||||||
|
*rule.AbortIncompleteMultipartUpload.DaysAfterInitiation <= 0 {
|
||||||
|
return fmt.Errorf("%w: days after initiation must be a positive integer", apierr.GetAPIError(apierr.ErrInvalidArgument))
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.Filter != nil && (rule.Filter.Tag != nil || (rule.Filter.And != nil && len(rule.Filter.And.Tags) > 0)) {
|
||||||
|
return fmt.Errorf("%w: abort incomplete multipart upload cannot be specified with tags", apierr.GetAPIError(apierr.ErrInvalidRequest))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.Expiration != nil {
|
||||||
|
if rule.Expiration.ExpiredObjectDeleteMarker != nil {
|
||||||
|
if rule.Expiration.Days != nil || rule.Expiration.Date != "" {
|
||||||
|
return fmt.Errorf("%w: expired object delete marker cannot be specified with days or date", apierr.GetAPIError(apierr.ErrMalformedXML))
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.Filter != nil && (rule.Filter.Tag != nil || (rule.Filter.And != nil && len(rule.Filter.And.Tags) > 0)) {
|
||||||
|
return fmt.Errorf("%w: expired object delete marker cannot be specified with tags", apierr.GetAPIError(apierr.ErrInvalidRequest))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.Expiration.Days != nil && *rule.Expiration.Days <= 0 {
|
||||||
|
return fmt.Errorf("%w: expiration days must be a positive integer", apierr.GetAPIError(apierr.ErrInvalidArgument))
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.Expiration.Date != "" {
|
||||||
|
parsedTime, err := time.Parse("2006-01-02T15:04:05Z", rule.Expiration.Date)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%w: invalid value of expiration date: %s", apierr.GetAPIError(apierr.ErrInvalidArgument), rule.Expiration.Date)
|
||||||
|
}
|
||||||
|
|
||||||
|
epoch, err := util.TimeToEpoch(ni, now, parsedTime)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("convert time to epoch: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.Rules[i].Expiration.Epoch = &epoch
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.NonCurrentVersionExpiration != nil {
|
||||||
|
if rule.NonCurrentVersionExpiration.NewerNonCurrentVersions != nil && rule.NonCurrentVersionExpiration.NonCurrentDays == nil {
|
||||||
|
return fmt.Errorf("%w: newer noncurrent versions cannot be specified without noncurrent days", apierr.GetAPIError(apierr.ErrMalformedXML))
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.NonCurrentVersionExpiration.NewerNonCurrentVersions != nil &&
|
||||||
|
(*rule.NonCurrentVersionExpiration.NewerNonCurrentVersions > maxNewerNoncurrentVersions ||
|
||||||
|
*rule.NonCurrentVersionExpiration.NewerNonCurrentVersions <= 0) {
|
||||||
|
return fmt.Errorf("%w: newer noncurrent versions must be a positive integer up to %d", apierr.GetAPIError(apierr.ErrInvalidArgument),
|
||||||
|
maxNewerNoncurrentVersions)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.NonCurrentVersionExpiration.NonCurrentDays != nil && *rule.NonCurrentVersionExpiration.NonCurrentDays <= 0 {
|
||||||
|
return fmt.Errorf("%w: noncurrent days must be a positive integer", apierr.GetAPIError(apierr.ErrInvalidArgument))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := checkLifecycleRuleFilter(rule.Filter); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.Filter != nil && rule.Filter.Prefix != "" && rule.Prefix != "" {
|
||||||
|
return fmt.Errorf("%w: rule cannot have two prefixes", apierr.GetAPIError(apierr.ErrMalformedXML))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkLifecycleRuleFilter(filter *data.LifecycleRuleFilter) error {
|
||||||
|
if filter == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var fields int
|
||||||
|
|
||||||
|
if filter.And != nil {
|
||||||
|
fields++
|
||||||
|
for _, tag := range filter.And.Tags {
|
||||||
|
err := checkTag(tag)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if filter.And.ObjectSizeLessThan != nil {
|
||||||
|
if *filter.And.ObjectSizeLessThan == 0 {
|
||||||
|
return fmt.Errorf("%w: the maximum object size must be more than 0", apierr.GetAPIError(apierr.ErrInvalidRequest))
|
||||||
|
}
|
||||||
|
|
||||||
|
if filter.And.ObjectSizeGreaterThan != nil && *filter.And.ObjectSizeLessThan <= *filter.And.ObjectSizeGreaterThan {
|
||||||
|
return fmt.Errorf("%w: the maximum object size must be larger than the minimum object size", apierr.GetAPIError(apierr.ErrInvalidRequest))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if filter.ObjectSizeGreaterThan != nil {
|
||||||
|
fields++
|
||||||
|
}
|
||||||
|
|
||||||
|
if filter.ObjectSizeLessThan != nil {
|
||||||
|
if *filter.ObjectSizeLessThan == 0 {
|
||||||
|
return fmt.Errorf("%w: the maximum object size must be more than 0", apierr.GetAPIError(apierr.ErrInvalidRequest))
|
||||||
|
}
|
||||||
|
fields++
|
||||||
|
}
|
||||||
|
|
||||||
|
if filter.Prefix != "" {
|
||||||
|
fields++
|
||||||
|
}
|
||||||
|
|
||||||
|
if filter.Tag != nil {
|
||||||
|
fields++
|
||||||
|
err := checkTag(*filter.Tag)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fields > 1 {
|
||||||
|
return fmt.Errorf("%w: filter cannot have more than one field", apierr.GetAPIError(apierr.ErrMalformedXML))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getContentMD5(reader io.Reader) ([]byte, error) {
|
||||||
|
hash := md5.New()
|
||||||
|
_, err := io.Copy(hash, reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return hash.Sum(nil), nil
|
||||||
|
}
|
563
api/handler/lifecycle_test.go
Normal file
563
api/handler/lifecycle_test.go
Normal file
|
@ -0,0 +1,563 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/md5"
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/xml"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
|
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
|
"github.com/mr-tron/base58"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
|
hc := prepareHandlerContextWithMinCache(t)
|
||||||
|
|
||||||
|
bktName := "bucket-lifecycle"
|
||||||
|
createBucket(hc, bktName)
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
body *data.LifecycleConfiguration
|
||||||
|
errorCode apierr.ErrorCode
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "correct configuration",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
ID: "rule-1",
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
Date: time.Now().Format("2006-01-02T15:04:05.000Z"),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
And: &data.LifecycleRuleAndOperator{
|
||||||
|
Prefix: "prefix/",
|
||||||
|
Tags: []data.Tag{{Key: "key", Value: "value"}, {Key: "tag", Value: ""}},
|
||||||
|
ObjectSizeGreaterThan: ptr(uint64(100)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "rule-2",
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
|
||||||
|
DaysAfterInitiation: ptr(14),
|
||||||
|
},
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
ExpiredObjectDeleteMarker: ptr(true),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
ObjectSizeLessThan: ptr(uint64(100)),
|
||||||
|
},
|
||||||
|
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||||
|
NewerNonCurrentVersions: ptr(1),
|
||||||
|
NonCurrentDays: ptr(21),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "too many rules",
|
||||||
|
body: func() *data.LifecycleConfiguration {
|
||||||
|
lifecycle := new(data.LifecycleConfiguration)
|
||||||
|
for i := 0; i <= maxRules; i++ {
|
||||||
|
lifecycle.Rules = append(lifecycle.Rules, data.LifecycleRule{
|
||||||
|
ID: "Rule" + strconv.Itoa(i),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return lifecycle
|
||||||
|
}(),
|
||||||
|
errorCode: apierr.ErrInvalidRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "duplicate rule ID",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
ID: "Rule",
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "Rule",
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "too long rule ID",
|
||||||
|
body: func() *data.LifecycleConfiguration {
|
||||||
|
id := make([]byte, maxRuleIDLen+1)
|
||||||
|
_, err := io.ReadFull(rand.Reader, id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
ID: base58.Encode(id)[:maxRuleIDLen+1],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}(),
|
||||||
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid status",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: "invalid",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrMalformedXML,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no actions",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
Prefix: "prefix/",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid days after initiation",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
|
||||||
|
DaysAfterInitiation: ptr(0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid expired object delete marker declaration",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
ExpiredObjectDeleteMarker: ptr(false),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrMalformedXML,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid expiration days",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid expiration date",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Date: "invalid",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "newer noncurrent versions is too small",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||||
|
NonCurrentDays: ptr(1),
|
||||||
|
NewerNonCurrentVersions: ptr(0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "newer noncurrent versions is too large",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||||
|
NonCurrentDays: ptr(1),
|
||||||
|
NewerNonCurrentVersions: ptr(101),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid noncurrent days",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||||
|
NonCurrentDays: ptr(0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "more than one filter field",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
Prefix: "prefix/",
|
||||||
|
ObjectSizeGreaterThan: ptr(uint64(100)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrMalformedXML,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid tag in filter",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
Tag: &data.Tag{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidTagKey,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "abort incomplete multipart upload with tag",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
|
||||||
|
DaysAfterInitiation: ptr(14),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
Tag: &data.Tag{Key: "key", Value: "value"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "expired object delete marker with tag",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
ExpiredObjectDeleteMarker: ptr(true),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
And: &data.LifecycleRuleAndOperator{
|
||||||
|
Tags: []data.Tag{{Key: "key", Value: "value"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid size range",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
And: &data.LifecycleRuleAndOperator{
|
||||||
|
ObjectSizeGreaterThan: ptr(uint64(100)),
|
||||||
|
ObjectSizeLessThan: ptr(uint64(100)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "two prefixes",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
Prefix: "prefix-1/",
|
||||||
|
},
|
||||||
|
Prefix: "prefix-2/",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrMalformedXML,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "newer noncurrent versions without noncurrent days",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||||
|
NewerNonCurrentVersions: ptr(10),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrMalformedXML,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid maximum object size in filter",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
ObjectSizeLessThan: ptr(uint64(0)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid maximum object size in filter and",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
And: &data.LifecycleRuleAndOperator{
|
||||||
|
Prefix: "prefix/",
|
||||||
|
ObjectSizeLessThan: ptr(uint64(0)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidRequest,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
if tc.errorCode > 0 {
|
||||||
|
putBucketLifecycleConfigurationErr(hc, bktName, tc.body, apierr.GetAPIError(tc.errorCode))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
putBucketLifecycleConfiguration(hc, bktName, tc.body)
|
||||||
|
|
||||||
|
cfg := getBucketLifecycleConfiguration(hc, bktName)
|
||||||
|
require.Equal(t, tc.body.Rules, cfg.Rules)
|
||||||
|
|
||||||
|
deleteBucketLifecycleConfiguration(hc, bktName)
|
||||||
|
getBucketLifecycleConfigurationErr(hc, bktName, apierr.GetAPIError(apierr.ErrNoSuchLifecycleConfiguration))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPutBucketLifecycleIDGeneration(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-lifecycle-id"
|
||||||
|
createBucket(hc, bktName)
|
||||||
|
|
||||||
|
lifecycle := &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
|
||||||
|
DaysAfterInitiation: ptr(14),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
putBucketLifecycleConfiguration(hc, bktName, lifecycle)
|
||||||
|
cfg := getBucketLifecycleConfiguration(hc, bktName)
|
||||||
|
require.Len(t, cfg.Rules, 2)
|
||||||
|
require.NotEmpty(t, cfg.Rules[0].ID)
|
||||||
|
require.NotEmpty(t, cfg.Rules[1].ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPutBucketLifecycleInvalidMD5(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-lifecycle-md5"
|
||||||
|
createBucket(hc, bktName)
|
||||||
|
|
||||||
|
lifecycle := &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
w, r := prepareTestRequest(hc, bktName, "", lifecycle)
|
||||||
|
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||||
|
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrMissingContentMD5))
|
||||||
|
|
||||||
|
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
|
||||||
|
r.Header.Set(api.ContentMD5, "")
|
||||||
|
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||||
|
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||||
|
|
||||||
|
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
|
||||||
|
r.Header.Set(api.ContentMD5, "some-hash")
|
||||||
|
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||||
|
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPutBucketLifecycleInvalidXML(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-lifecycle-invalid-xml"
|
||||||
|
createBucket(hc, bktName)
|
||||||
|
|
||||||
|
cfg := &data.CORSConfiguration{}
|
||||||
|
body, err := xml.Marshal(cfg)
|
||||||
|
require.NoError(t, err)
|
||||||
|
contentMD5, err := getContentMD5(bytes.NewReader(body))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
w, r := prepareTestRequest(hc, bktName, "", cfg)
|
||||||
|
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString(contentMD5))
|
||||||
|
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||||
|
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrMalformedXML))
|
||||||
|
}
|
||||||
|
|
||||||
|
func putBucketLifecycleConfiguration(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration) {
|
||||||
|
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg)
|
||||||
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func putBucketLifecycleConfigurationErr(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration, err apierr.Error) {
|
||||||
|
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg)
|
||||||
|
assertS3Error(hc.t, w, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func putBucketLifecycleConfigurationBase(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration) *httptest.ResponseRecorder {
|
||||||
|
w, r := prepareTestRequest(hc, bktName, "", cfg)
|
||||||
|
|
||||||
|
rawBody, err := xml.Marshal(cfg)
|
||||||
|
require.NoError(hc.t, err)
|
||||||
|
|
||||||
|
hash := md5.New()
|
||||||
|
hash.Write(rawBody)
|
||||||
|
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString(hash.Sum(nil)))
|
||||||
|
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBucketLifecycleConfiguration(hc *handlerContext, bktName string) *data.LifecycleConfiguration {
|
||||||
|
w := getBucketLifecycleConfigurationBase(hc, bktName)
|
||||||
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
|
res := &data.LifecycleConfiguration{}
|
||||||
|
parseTestResponse(hc.t, w, res)
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBucketLifecycleConfigurationErr(hc *handlerContext, bktName string, err apierr.Error) {
|
||||||
|
w := getBucketLifecycleConfigurationBase(hc, bktName)
|
||||||
|
assertS3Error(hc.t, w, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBucketLifecycleConfigurationBase(hc *handlerContext, bktName string) *httptest.ResponseRecorder {
|
||||||
|
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||||
|
hc.Handler().GetBucketLifecycleHandler(w, r)
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteBucketLifecycleConfiguration(hc *handlerContext, bktName string) {
|
||||||
|
w := deleteBucketLifecycleConfigurationBase(hc, bktName)
|
||||||
|
assertStatus(hc.t, w, http.StatusNoContent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteBucketLifecycleConfigurationBase(hc *handlerContext, bktName string) *httptest.ResponseRecorder {
|
||||||
|
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||||
|
hc.Handler().DeleteBucketLifecycleHandler(w, r)
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
func ptr[T any](t T) *T {
|
||||||
|
return &t
|
||||||
|
}
|
|
@ -15,12 +15,13 @@ func (h *handler) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
var (
|
var (
|
||||||
own user.ID
|
own user.ID
|
||||||
res *ListBucketsResponse
|
res *ListBucketsResponse
|
||||||
reqInfo = middleware.GetReqInfo(r.Context())
|
ctx = r.Context()
|
||||||
|
reqInfo = middleware.GetReqInfo(ctx)
|
||||||
)
|
)
|
||||||
|
|
||||||
list, err := h.obj.ListBuckets(r.Context())
|
list, err := h.obj.ListBuckets(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,6 +44,6 @@ func (h *handler) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, res); err != nil {
|
if err = middleware.EncodeToResponse(w, res); err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
)
|
)
|
||||||
|
@ -26,34 +26,35 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bktInfo.ObjectLockEnabled {
|
if !bktInfo.ObjectLockEnabled {
|
||||||
h.logAndSendError(w, "couldn't put object locking configuration", reqInfo,
|
h.logAndSendError(ctx, w, "couldn't put object locking configuration", reqInfo,
|
||||||
apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotAllowed))
|
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotAllowed))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
lockingConf := &data.ObjectLockConfiguration{}
|
lockingConf := &data.ObjectLockConfiguration{}
|
||||||
if err = h.cfg.NewXMLDecoder(r.Body).Decode(lockingConf); err != nil {
|
if err = h.cfg.NewXMLDecoder(r.Body, r.UserAgent()).Decode(lockingConf); err != nil {
|
||||||
h.logAndSendError(w, "couldn't parse locking configuration", reqInfo, err)
|
h.logAndSendError(ctx, w, "couldn't parse locking configuration", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = checkLockConfiguration(lockingConf); err != nil {
|
if err = checkLockConfiguration(lockingConf); err != nil {
|
||||||
h.logAndSendError(w, "invalid lock configuration", reqInfo, err)
|
h.logAndSendError(ctx, w, "invalid lock configuration", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
|
h.logAndSendError(ctx, w, "couldn't get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,30 +67,31 @@ func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
|
||||||
Settings: &newSettings,
|
Settings: &newSettings,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutBucketSettings(r.Context(), sp); err != nil {
|
if err = h.obj.PutBucketSettings(ctx, sp); err != nil {
|
||||||
h.logAndSendError(w, "couldn't put bucket settings", reqInfo, err)
|
h.logAndSendError(ctx, w, "couldn't put bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bktInfo.ObjectLockEnabled {
|
if !bktInfo.ObjectLockEnabled {
|
||||||
h.logAndSendError(w, "object lock disabled", reqInfo,
|
h.logAndSendError(ctx, w, "object lock disabled", reqInfo,
|
||||||
apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound))
|
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
|
h.logAndSendError(ctx, w, "couldn't get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,33 +103,34 @@ func (h *handler) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, settings.LockConfiguration); err != nil {
|
if err = middleware.EncodeToResponse(w, settings.LockConfiguration); err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bktInfo.ObjectLockEnabled {
|
if !bktInfo.ObjectLockEnabled {
|
||||||
h.logAndSendError(w, "object lock disabled", reqInfo,
|
h.logAndSendError(ctx, w, "object lock disabled", reqInfo,
|
||||||
apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound))
|
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
legalHold := &data.LegalHold{}
|
legalHold := &data.LegalHold{}
|
||||||
if err = h.cfg.NewXMLDecoder(r.Body).Decode(legalHold); err != nil {
|
if err = h.cfg.NewXMLDecoder(r.Body, r.UserAgent()).Decode(legalHold); err != nil {
|
||||||
h.logAndSendError(w, "couldn't parse legal hold configuration", reqInfo, err)
|
h.logAndSendError(ctx, w, "couldn't parse legal hold configuration", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if legalHold.Status != legalHoldOn && legalHold.Status != legalHoldOff {
|
if legalHold.Status != legalHoldOn && legalHold.Status != legalHoldOff {
|
||||||
h.logAndSendError(w, "invalid legal hold status", reqInfo,
|
h.logAndSendError(ctx, w, "invalid legal hold status", reqInfo,
|
||||||
fmt.Errorf("invalid status %s", legalHold.Status))
|
fmt.Errorf("invalid status %s", legalHold.Status))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -147,28 +150,29 @@ func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
||||||
|
|
||||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutLockInfo(r.Context(), p); err != nil {
|
if err = h.obj.PutLockInfo(ctx, p); err != nil {
|
||||||
h.logAndSendError(w, "couldn't head put legal hold", reqInfo, err)
|
h.logAndSendError(ctx, w, "couldn't head put legal hold", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bktInfo.ObjectLockEnabled {
|
if !bktInfo.ObjectLockEnabled {
|
||||||
h.logAndSendError(w, "object lock disabled", reqInfo,
|
h.logAndSendError(ctx, w, "object lock disabled", reqInfo,
|
||||||
apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound))
|
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,9 +182,9 @@ func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
||||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||||
}
|
}
|
||||||
|
|
||||||
lockInfo, err := h.obj.GetLockInfo(r.Context(), p)
|
lockInfo, err := h.obj.GetLockInfo(ctx, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "couldn't head lock object", reqInfo, err)
|
h.logAndSendError(ctx, w, "couldn't head lock object", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -190,33 +194,34 @@ func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, legalHold); err != nil {
|
if err = middleware.EncodeToResponse(w, legalHold); err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !bktInfo.ObjectLockEnabled {
|
if !bktInfo.ObjectLockEnabled {
|
||||||
h.logAndSendError(w, "object lock disabled", reqInfo,
|
h.logAndSendError(ctx, w, "object lock disabled", reqInfo,
|
||||||
apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound))
|
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
retention := &data.Retention{}
|
retention := &data.Retention{}
|
||||||
if err = h.cfg.NewXMLDecoder(r.Body).Decode(retention); err != nil {
|
if err = h.cfg.NewXMLDecoder(r.Body, r.UserAgent()).Decode(retention); err != nil {
|
||||||
h.logAndSendError(w, "couldn't parse object retention", reqInfo, err)
|
h.logAndSendError(ctx, w, "couldn't parse object retention", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
lock, err := formObjectLockFromRetention(r.Context(), retention, r.Header)
|
lock, err := formObjectLockFromRetention(ctx, retention, r.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid retention configuration", reqInfo, err)
|
h.logAndSendError(ctx, w, "invalid retention configuration", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,28 +236,29 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
||||||
|
|
||||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutLockInfo(r.Context(), p); err != nil {
|
if err = h.obj.PutLockInfo(ctx, p); err != nil {
|
||||||
h.logAndSendError(w, "couldn't put legal hold", reqInfo, err)
|
h.logAndSendError(ctx, w, "couldn't put legal hold", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bktInfo.ObjectLockEnabled {
|
if !bktInfo.ObjectLockEnabled {
|
||||||
h.logAndSendError(w, "object lock disabled", reqInfo,
|
h.logAndSendError(ctx, w, "object lock disabled", reqInfo,
|
||||||
apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound))
|
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -262,14 +268,14 @@ func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
||||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||||
}
|
}
|
||||||
|
|
||||||
lockInfo, err := h.obj.GetLockInfo(r.Context(), p)
|
lockInfo, err := h.obj.GetLockInfo(ctx, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "couldn't head lock object", reqInfo, err)
|
h.logAndSendError(ctx, w, "couldn't head lock object", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !lockInfo.IsRetentionSet() {
|
if !lockInfo.IsRetentionSet() {
|
||||||
h.logAndSendError(w, "retention lock isn't set", reqInfo, apiErrors.GetAPIError(apiErrors.ErrNoSuchKey))
|
h.logAndSendError(ctx, w, "retention lock isn't set", reqInfo, apierr.GetAPIError(apierr.ErrNoSuchKey))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -282,7 +288,7 @@ func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, retention); err != nil {
|
if err = middleware.EncodeToResponse(w, retention); err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -314,7 +320,7 @@ func checkLockConfiguration(conf *data.ObjectLockConfiguration) error {
|
||||||
func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig *data.ObjectLockConfiguration, header http.Header) (*data.ObjectLock, error) {
|
func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig *data.ObjectLockConfiguration, header http.Header) (*data.ObjectLock, error) {
|
||||||
if !bktInfo.ObjectLockEnabled {
|
if !bktInfo.ObjectLockEnabled {
|
||||||
if existLockHeaders(header) {
|
if existLockHeaders(header) {
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound)
|
return nil, apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound)
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
@ -346,7 +352,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
|
||||||
until := header.Get(api.AmzObjectLockRetainUntilDate)
|
until := header.Get(api.AmzObjectLockRetainUntilDate)
|
||||||
|
|
||||||
if mode != "" && until == "" || mode == "" && until != "" {
|
if mode != "" && until == "" || mode == "" && until != "" {
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrObjectLockInvalidHeaders)
|
return nil, apierr.GetAPIError(apierr.ErrObjectLockInvalidHeaders)
|
||||||
}
|
}
|
||||||
|
|
||||||
if mode != "" {
|
if mode != "" {
|
||||||
|
@ -355,7 +361,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
if mode != complianceMode && mode != governanceMode {
|
if mode != complianceMode && mode != governanceMode {
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrUnknownWORMModeDirective)
|
return nil, apierr.GetAPIError(apierr.ErrUnknownWORMModeDirective)
|
||||||
}
|
}
|
||||||
|
|
||||||
objectLock.Retention.IsCompliance = mode == complianceMode
|
objectLock.Retention.IsCompliance = mode == complianceMode
|
||||||
|
@ -364,7 +370,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
|
||||||
if until != "" {
|
if until != "" {
|
||||||
retentionDate, err := time.Parse(time.RFC3339, until)
|
retentionDate, err := time.Parse(time.RFC3339, until)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidRetentionDate)
|
return nil, apierr.GetAPIError(apierr.ErrInvalidRetentionDate)
|
||||||
}
|
}
|
||||||
if objectLock.Retention == nil {
|
if objectLock.Retention == nil {
|
||||||
objectLock.Retention = &data.RetentionLock{}
|
objectLock.Retention = &data.RetentionLock{}
|
||||||
|
@ -382,7 +388,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
if objectLock.Retention.Until.Before(layer.TimeNow(ctx)) {
|
if objectLock.Retention.Until.Before(layer.TimeNow(ctx)) {
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrPastObjectLockRetainDate)
|
return nil, apierr.GetAPIError(apierr.ErrPastObjectLockRetainDate)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -397,16 +403,16 @@ func existLockHeaders(header http.Header) bool {
|
||||||
|
|
||||||
func formObjectLockFromRetention(ctx context.Context, retention *data.Retention, header http.Header) (*data.ObjectLock, error) {
|
func formObjectLockFromRetention(ctx context.Context, retention *data.Retention, header http.Header) (*data.ObjectLock, error) {
|
||||||
if retention.Mode != governanceMode && retention.Mode != complianceMode {
|
if retention.Mode != governanceMode && retention.Mode != complianceMode {
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrMalformedXML)
|
return nil, apierr.GetAPIError(apierr.ErrMalformedXML)
|
||||||
}
|
}
|
||||||
|
|
||||||
retentionDate, err := time.Parse(time.RFC3339, retention.RetainUntilDate)
|
retentionDate, err := time.Parse(time.RFC3339, retention.RetainUntilDate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrMalformedXML)
|
return nil, apierr.GetAPIError(apierr.ErrMalformedXML)
|
||||||
}
|
}
|
||||||
|
|
||||||
if retentionDate.Before(layer.TimeNow(ctx)) {
|
if retentionDate.Before(layer.TimeNow(ctx)) {
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrPastObjectLockRetainDate)
|
return nil, apierr.GetAPIError(apierr.ErrPastObjectLockRetainDate)
|
||||||
}
|
}
|
||||||
|
|
||||||
var bypass bool
|
var bypass bool
|
||||||
|
|
|
@ -12,7 +12,7 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -270,23 +270,24 @@ func TestPutBucketLockConfigurationHandler(t *testing.T) {
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
name string
|
name string
|
||||||
bucket string
|
bucket string
|
||||||
expectedError apiErrors.Error
|
expectedError apierr.Error
|
||||||
noError bool
|
noError bool
|
||||||
configuration *data.ObjectLockConfiguration
|
configuration *data.ObjectLockConfiguration
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "bkt not found",
|
name: "bkt not found",
|
||||||
expectedError: apiErrors.GetAPIError(apiErrors.ErrNoSuchBucket),
|
bucket: "not-found-bucket",
|
||||||
|
expectedError: apierr.GetAPIError(apierr.ErrNoSuchBucket),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bkt lock disabled",
|
name: "bkt lock disabled",
|
||||||
bucket: bktLockDisabled,
|
bucket: bktLockDisabled,
|
||||||
expectedError: apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotAllowed),
|
expectedError: apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotAllowed),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid configuration",
|
name: "invalid configuration",
|
||||||
bucket: bktLockEnabled,
|
bucket: bktLockEnabled,
|
||||||
expectedError: apiErrors.GetAPIError(apiErrors.ErrInternalError),
|
expectedError: apierr.GetAPIError(apierr.ErrInternalError),
|
||||||
configuration: &data.ObjectLockConfiguration{ObjectLockEnabled: "dummy"},
|
configuration: &data.ObjectLockConfiguration{ObjectLockEnabled: "dummy"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -359,18 +360,19 @@ func TestGetBucketLockConfigurationHandler(t *testing.T) {
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
name string
|
name string
|
||||||
bucket string
|
bucket string
|
||||||
expectedError apiErrors.Error
|
expectedError apierr.Error
|
||||||
noError bool
|
noError bool
|
||||||
expectedConf *data.ObjectLockConfiguration
|
expectedConf *data.ObjectLockConfiguration
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "bkt not found",
|
name: "bkt not found",
|
||||||
expectedError: apiErrors.GetAPIError(apiErrors.ErrNoSuchBucket),
|
bucket: "not-found-bucket",
|
||||||
|
expectedError: apierr.GetAPIError(apierr.ErrNoSuchBucket),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bkt lock disabled",
|
name: "bkt lock disabled",
|
||||||
bucket: bktLockDisabled,
|
bucket: bktLockDisabled,
|
||||||
expectedError: apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound),
|
expectedError: apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bkt lock enabled empty default",
|
name: "bkt lock enabled empty default",
|
||||||
|
@ -407,7 +409,7 @@ func TestGetBucketLockConfigurationHandler(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertS3Error(t *testing.T, w *httptest.ResponseRecorder, expectedError apiErrors.Error) {
|
func assertS3Error(t *testing.T, w *httptest.ResponseRecorder, expectedError apierr.Error) {
|
||||||
actualErrorResponse := &middleware.ErrorResponse{}
|
actualErrorResponse := &middleware.ErrorResponse{}
|
||||||
err := xml.NewDecoder(w.Result().Body).Decode(actualErrorResponse)
|
err := xml.NewDecoder(w.Result().Body).Decode(actualErrorResponse)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -415,7 +417,7 @@ func assertS3Error(t *testing.T, w *httptest.ResponseRecorder, expectedError api
|
||||||
require.Equal(t, expectedError.HTTPStatusCode, w.Code)
|
require.Equal(t, expectedError.HTTPStatusCode, w.Code)
|
||||||
require.Equal(t, expectedError.Code, actualErrorResponse.Code)
|
require.Equal(t, expectedError.Code, actualErrorResponse.Code)
|
||||||
|
|
||||||
if expectedError.ErrCode != apiErrors.ErrInternalError {
|
if expectedError.ErrCode != apierr.ErrInternalError {
|
||||||
require.Equal(t, expectedError.Description, actualErrorResponse.Message)
|
require.Equal(t, expectedError.Description, actualErrorResponse.Message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -473,33 +475,33 @@ func TestObjectRetention(t *testing.T) {
|
||||||
objName := "obj-for-retention"
|
objName := "obj-for-retention"
|
||||||
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||||
|
|
||||||
getObjectRetention(hc, bktName, objName, nil, apiErrors.ErrNoSuchKey)
|
getObjectRetention(hc, bktName, objName, nil, apierr.ErrNoSuchKey)
|
||||||
|
|
||||||
retention := &data.Retention{Mode: governanceMode, RetainUntilDate: time.Now().Add(time.Minute).UTC().Format(time.RFC3339)}
|
retention := &data.Retention{Mode: governanceMode, RetainUntilDate: time.Now().Add(time.Minute).UTC().Format(time.RFC3339)}
|
||||||
putObjectRetention(hc, bktName, objName, retention, false, 0)
|
putObjectRetention(hc, bktName, objName, retention, false, 0)
|
||||||
getObjectRetention(hc, bktName, objName, retention, 0)
|
getObjectRetention(hc, bktName, objName, retention, 0)
|
||||||
|
|
||||||
retention = &data.Retention{Mode: governanceMode, RetainUntilDate: time.Now().UTC().Add(time.Minute).Format(time.RFC3339)}
|
retention = &data.Retention{Mode: governanceMode, RetainUntilDate: time.Now().UTC().Add(time.Minute).Format(time.RFC3339)}
|
||||||
putObjectRetention(hc, bktName, objName, retention, false, apiErrors.ErrInternalError)
|
putObjectRetention(hc, bktName, objName, retention, false, apierr.ErrInternalError)
|
||||||
|
|
||||||
retention = &data.Retention{Mode: complianceMode, RetainUntilDate: time.Now().Add(time.Minute).UTC().Format(time.RFC3339)}
|
retention = &data.Retention{Mode: complianceMode, RetainUntilDate: time.Now().Add(time.Minute).UTC().Format(time.RFC3339)}
|
||||||
putObjectRetention(hc, bktName, objName, retention, true, 0)
|
putObjectRetention(hc, bktName, objName, retention, true, 0)
|
||||||
getObjectRetention(hc, bktName, objName, retention, 0)
|
getObjectRetention(hc, bktName, objName, retention, 0)
|
||||||
|
|
||||||
putObjectRetention(hc, bktName, objName, retention, true, apiErrors.ErrInternalError)
|
putObjectRetention(hc, bktName, objName, retention, true, apierr.ErrInternalError)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getObjectRetention(hc *handlerContext, bktName, objName string, retention *data.Retention, errCode apiErrors.ErrorCode) {
|
func getObjectRetention(hc *handlerContext, bktName, objName string, retention *data.Retention, errCode apierr.ErrorCode) {
|
||||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||||
hc.Handler().GetObjectRetentionHandler(w, r)
|
hc.Handler().GetObjectRetentionHandler(w, r)
|
||||||
if errCode == 0 {
|
if errCode == 0 {
|
||||||
assertRetention(hc.t, w, retention)
|
assertRetention(hc.t, w, retention)
|
||||||
} else {
|
} else {
|
||||||
assertS3Error(hc.t, w, apiErrors.GetAPIError(errCode))
|
assertS3Error(hc.t, w, apierr.GetAPIError(errCode))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func putObjectRetention(hc *handlerContext, bktName, objName string, retention *data.Retention, byPass bool, errCode apiErrors.ErrorCode) {
|
func putObjectRetention(hc *handlerContext, bktName, objName string, retention *data.Retention, byPass bool, errCode apierr.ErrorCode) {
|
||||||
w, r := prepareTestRequest(hc, bktName, objName, retention)
|
w, r := prepareTestRequest(hc, bktName, objName, retention)
|
||||||
if byPass {
|
if byPass {
|
||||||
r.Header.Set(api.AmzBypassGovernanceRetention, strconv.FormatBool(true))
|
r.Header.Set(api.AmzBypassGovernanceRetention, strconv.FormatBool(true))
|
||||||
|
@ -508,7 +510,7 @@ func putObjectRetention(hc *handlerContext, bktName, objName string, retention *
|
||||||
if errCode == 0 {
|
if errCode == 0 {
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
} else {
|
} else {
|
||||||
assertS3Error(hc.t, w, apiErrors.GetAPIError(errCode))
|
assertS3Error(hc.t, w, apierr.GetAPIError(errCode))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -572,37 +574,37 @@ func TestPutLockErrors(t *testing.T) {
|
||||||
createTestBucketWithLock(hc, bktName, nil)
|
createTestBucketWithLock(hc, bktName, nil)
|
||||||
|
|
||||||
headers := map[string]string{api.AmzObjectLockMode: complianceMode}
|
headers := map[string]string{api.AmzObjectLockMode: complianceMode}
|
||||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrObjectLockInvalidHeaders)
|
putObjectWithLockFailed(t, hc, bktName, objName, headers, apierr.ErrObjectLockInvalidHeaders)
|
||||||
|
|
||||||
delete(headers, api.AmzObjectLockMode)
|
delete(headers, api.AmzObjectLockMode)
|
||||||
headers[api.AmzObjectLockRetainUntilDate] = time.Now().Add(time.Minute).Format(time.RFC3339)
|
headers[api.AmzObjectLockRetainUntilDate] = time.Now().Add(time.Minute).Format(time.RFC3339)
|
||||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrObjectLockInvalidHeaders)
|
putObjectWithLockFailed(t, hc, bktName, objName, headers, apierr.ErrObjectLockInvalidHeaders)
|
||||||
|
|
||||||
headers[api.AmzObjectLockMode] = "dummy"
|
headers[api.AmzObjectLockMode] = "dummy"
|
||||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrUnknownWORMModeDirective)
|
putObjectWithLockFailed(t, hc, bktName, objName, headers, apierr.ErrUnknownWORMModeDirective)
|
||||||
|
|
||||||
headers[api.AmzObjectLockMode] = complianceMode
|
headers[api.AmzObjectLockMode] = complianceMode
|
||||||
headers[api.AmzObjectLockRetainUntilDate] = time.Now().Format(time.RFC3339)
|
headers[api.AmzObjectLockRetainUntilDate] = time.Now().Format(time.RFC3339)
|
||||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrPastObjectLockRetainDate)
|
putObjectWithLockFailed(t, hc, bktName, objName, headers, apierr.ErrPastObjectLockRetainDate)
|
||||||
|
|
||||||
headers[api.AmzObjectLockRetainUntilDate] = "dummy"
|
headers[api.AmzObjectLockRetainUntilDate] = "dummy"
|
||||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrInvalidRetentionDate)
|
putObjectWithLockFailed(t, hc, bktName, objName, headers, apierr.ErrInvalidRetentionDate)
|
||||||
|
|
||||||
putObject(hc, bktName, objName)
|
putObject(hc, bktName, objName)
|
||||||
|
|
||||||
retention := &data.Retention{Mode: governanceMode}
|
retention := &data.Retention{Mode: governanceMode}
|
||||||
putObjectRetentionFailed(t, hc, bktName, objName, retention, apiErrors.ErrMalformedXML)
|
putObjectRetentionFailed(t, hc, bktName, objName, retention, apierr.ErrMalformedXML)
|
||||||
|
|
||||||
retention.Mode = "dummy"
|
retention.Mode = "dummy"
|
||||||
retention.RetainUntilDate = time.Now().Add(time.Minute).UTC().Format(time.RFC3339)
|
retention.RetainUntilDate = time.Now().Add(time.Minute).UTC().Format(time.RFC3339)
|
||||||
putObjectRetentionFailed(t, hc, bktName, objName, retention, apiErrors.ErrMalformedXML)
|
putObjectRetentionFailed(t, hc, bktName, objName, retention, apierr.ErrMalformedXML)
|
||||||
|
|
||||||
retention.Mode = governanceMode
|
retention.Mode = governanceMode
|
||||||
retention.RetainUntilDate = time.Now().UTC().Format(time.RFC3339)
|
retention.RetainUntilDate = time.Now().UTC().Format(time.RFC3339)
|
||||||
putObjectRetentionFailed(t, hc, bktName, objName, retention, apiErrors.ErrPastObjectLockRetainDate)
|
putObjectRetentionFailed(t, hc, bktName, objName, retention, apierr.ErrPastObjectLockRetainDate)
|
||||||
}
|
}
|
||||||
|
|
||||||
func putObjectWithLockFailed(t *testing.T, hc *handlerContext, bktName, objName string, headers map[string]string, errCode apiErrors.ErrorCode) {
|
func putObjectWithLockFailed(t *testing.T, hc *handlerContext, bktName, objName string, headers map[string]string, errCode apierr.ErrorCode) {
|
||||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||||
|
|
||||||
for key, val := range headers {
|
for key, val := range headers {
|
||||||
|
@ -610,13 +612,13 @@ func putObjectWithLockFailed(t *testing.T, hc *handlerContext, bktName, objName
|
||||||
}
|
}
|
||||||
|
|
||||||
hc.Handler().PutObjectHandler(w, r)
|
hc.Handler().PutObjectHandler(w, r)
|
||||||
assertS3Error(t, w, apiErrors.GetAPIError(errCode))
|
assertS3Error(t, w, apierr.GetAPIError(errCode))
|
||||||
}
|
}
|
||||||
|
|
||||||
func putObjectRetentionFailed(t *testing.T, hc *handlerContext, bktName, objName string, retention *data.Retention, errCode apiErrors.ErrorCode) {
|
func putObjectRetentionFailed(t *testing.T, hc *handlerContext, bktName, objName string, retention *data.Retention, errCode apierr.ErrorCode) {
|
||||||
w, r := prepareTestRequest(hc, bktName, objName, retention)
|
w, r := prepareTestRequest(hc, bktName, objName, retention)
|
||||||
hc.Handler().PutObjectRetentionHandler(w, r)
|
hc.Handler().PutObjectRetentionHandler(w, r)
|
||||||
assertS3Error(t, w, apiErrors.GetAPIError(errCode))
|
assertS3Error(t, w, apierr.GetAPIError(errCode))
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertRetentionApproximate(t *testing.T, w *httptest.ResponseRecorder, retention *data.Retention, delta float64) {
|
func assertRetentionApproximate(t *testing.T, w *httptest.ResponseRecorder, retention *data.Retention, delta float64) {
|
||||||
|
|
|
@ -5,7 +5,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
|
@ -13,7 +15,6 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -27,10 +28,11 @@ type (
|
||||||
}
|
}
|
||||||
|
|
||||||
CompleteMultipartUploadResponse struct {
|
CompleteMultipartUploadResponse struct {
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult" json:"-"`
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult" json:"-"`
|
||||||
Bucket string `xml:"Bucket"`
|
Bucket string `xml:"Bucket"`
|
||||||
Key string `xml:"Key"`
|
Key string `xml:"Key"`
|
||||||
ETag string `xml:"ETag"`
|
ETag string `xml:"ETag"`
|
||||||
|
Location string `xml:"Location"`
|
||||||
}
|
}
|
||||||
|
|
||||||
ListMultipartUploadsResponse struct {
|
ListMultipartUploadsResponse struct {
|
||||||
|
@ -55,11 +57,11 @@ type (
|
||||||
Initiator Initiator `xml:"Initiator"`
|
Initiator Initiator `xml:"Initiator"`
|
||||||
IsTruncated bool `xml:"IsTruncated"`
|
IsTruncated bool `xml:"IsTruncated"`
|
||||||
Key string `xml:"Key"`
|
Key string `xml:"Key"`
|
||||||
MaxParts int `xml:"MaxParts,omitempty"`
|
MaxParts int `xml:"MaxParts"`
|
||||||
NextPartNumberMarker int `xml:"NextPartNumberMarker,omitempty"`
|
NextPartNumberMarker int `xml:"NextPartNumberMarker"`
|
||||||
Owner Owner `xml:"Owner"`
|
Owner Owner `xml:"Owner"`
|
||||||
Parts []*layer.Part `xml:"Part"`
|
Parts []*layer.Part `xml:"Part"`
|
||||||
PartNumberMarker int `xml:"PartNumberMarker,omitempty"`
|
PartNumberMarker int `xml:"PartNumberMarker"`
|
||||||
StorageClass string `xml:"StorageClass"`
|
StorageClass string `xml:"StorageClass"`
|
||||||
UploadID string `xml:"UploadId"`
|
UploadID string `xml:"UploadId"`
|
||||||
}
|
}
|
||||||
|
@ -102,26 +104,20 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
uploadID := uuid.New()
|
uploadID := uuid.New()
|
||||||
cannedACLStatus := aclHeadersStatus(r)
|
cannedACLStatus := aclHeadersStatus(r)
|
||||||
additional := []zap.Field{zap.String("uploadID", uploadID.String())}
|
additional := []zap.Field{zap.String("uploadID", uploadID.String())}
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
if cannedACLStatus == aclStatusYes {
|
||||||
if err != nil {
|
h.logAndSendError(ctx, w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
||||||
h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
apeEnabled := bktInfo.APEEnabled || settings.CannedACL != ""
|
|
||||||
if apeEnabled && cannedACLStatus == aclStatusYes {
|
|
||||||
h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,31 +130,17 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
||||||
Data: &layer.UploadData{},
|
Data: &layer.UploadData{},
|
||||||
}
|
}
|
||||||
|
|
||||||
needUpdateEACLTable := !(apeEnabled || cannedACLStatus == aclStatusNo)
|
|
||||||
if needUpdateEACLTable {
|
|
||||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(w, "couldn't get gate key", reqInfo, err, additional...)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if _, err = parseACLHeaders(r.Header, key); err != nil {
|
|
||||||
h.logAndSendError(w, "could not parse acl", reqInfo, err, additional...)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.Data.ACLHeaders = formACLHeadersForMultipart(r.Header)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(r.Header.Get(api.AmzTagging)) > 0 {
|
if len(r.Header.Get(api.AmzTagging)) > 0 {
|
||||||
p.Data.TagSet, err = parseTaggingHeader(r.Header)
|
p.Data.TagSet, err = parseTaggingHeader(r.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not parse tagging", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "could not parse tagging", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Info.Encryption, err = formEncryptionParams(r)
|
p.Info.Encryption, err = h.formEncryptionParams(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,12 +154,12 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
||||||
|
|
||||||
p.CopiesNumbers, err = h.pickCopiesNumbers(p.Header, reqInfo.Namespace, bktInfo.LocationConstraint)
|
p.CopiesNumbers, err = h.pickCopiesNumbers(p.Header, reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid copies number", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.CreateMultipartUpload(r.Context(), p); err != nil {
|
if err = h.obj.CreateMultipartUpload(ctx, p); err != nil {
|
||||||
h.logAndSendError(w, "could create multipart upload", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "could create multipart upload", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,36 +174,18 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, resp); err != nil {
|
if err = middleware.EncodeToResponse(w, resp); err != nil {
|
||||||
h.logAndSendError(w, "could not encode InitiateMultipartUploadResponse to response", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "could not encode InitiateMultipartUploadResponse to response", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func formACLHeadersForMultipart(header http.Header) map[string]string {
|
|
||||||
result := make(map[string]string)
|
|
||||||
|
|
||||||
if value := header.Get(api.AmzACL); value != "" {
|
|
||||||
result[api.AmzACL] = value
|
|
||||||
}
|
|
||||||
if value := header.Get(api.AmzGrantRead); value != "" {
|
|
||||||
result[api.AmzGrantRead] = value
|
|
||||||
}
|
|
||||||
if value := header.Get(api.AmzGrantFullControl); value != "" {
|
|
||||||
result[api.AmzGrantFullControl] = value
|
|
||||||
}
|
|
||||||
if value := header.Get(api.AmzGrantWrite); value != "" {
|
|
||||||
result[api.AmzGrantWrite] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -234,20 +198,17 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
partNumber, err := strconv.Atoi(partNumStr)
|
partNumber, err := strconv.Atoi(partNumStr)
|
||||||
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
|
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
|
||||||
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...)
|
h.logAndSendError(ctx, w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := h.getBodyReader(r)
|
body, err := h.getBodyReader(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "failed to get body reader", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "failed to get body reader", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var size uint64
|
size := h.getPutPayloadSize(r)
|
||||||
if r.ContentLength > 0 {
|
|
||||||
size = uint64(r.ContentLength)
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &layer.UploadPartParams{
|
p := &layer.UploadPartParams{
|
||||||
Info: &layer.UploadInfoParams{
|
Info: &layer.UploadInfoParams{
|
||||||
|
@ -262,15 +223,15 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
ContentSHA256Hash: r.Header.Get(api.AmzContentSha256),
|
ContentSHA256Hash: r.Header.Get(api.AmzContentSha256),
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Info.Encryption, err = formEncryptionParams(r)
|
p.Info.Encryption, err = h.formEncryptionParams(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hash, err := h.obj.UploadPart(r.Context(), p)
|
hash, err := h.obj.UploadPart(ctx, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not upload a part", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "could not upload a part", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,7 +241,7 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
w.Header().Set(api.ETag, data.Quote(hash))
|
w.Header().Set(api.ETag, data.Quote(hash))
|
||||||
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
||||||
h.logAndSendError(w, "write response", reqInfo, err)
|
h.logAndSendError(ctx, w, "write response", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -298,7 +259,7 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
partNumber, err := strconv.Atoi(partNumStr)
|
partNumber, err := strconv.Atoi(partNumStr)
|
||||||
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
|
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
|
||||||
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...)
|
h.logAndSendError(ctx, w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -309,26 +270,26 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
srcBucket, srcObject, err := path2BucketObject(src)
|
srcBucket, srcObject, err := path2BucketObject(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid source copy", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "invalid source copy", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
srcRange, err := parseRange(r.Header.Get(api.AmzCopySourceRange))
|
srcRange, err := parseRange(r.Header.Get(api.AmzCopySourceRange))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not parse copy range", reqInfo,
|
h.logAndSendError(ctx, w, "could not parse copy range", reqInfo,
|
||||||
errors.GetAPIError(errors.ErrInvalidCopyPartRange), additional...)
|
errors.GetAPIError(errors.ErrInvalidCopyPartRange), additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
srcBktInfo, err := h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner)
|
srcBktInfo, err := h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get source bucket info", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "could not get source bucket info", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get target bucket info", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "could not get target bucket info", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,35 +302,35 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
||||||
srcInfo, err := h.obj.GetObjectInfo(ctx, headPrm)
|
srcInfo, err := h.obj.GetObjectInfo(ctx, headPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.IsS3Error(err, errors.ErrNoSuchKey) && versionID != "" {
|
if errors.IsS3Error(err, errors.ErrNoSuchKey) && versionID != "" {
|
||||||
h.logAndSendError(w, "could not head source object version", reqInfo,
|
h.logAndSendError(ctx, w, "could not head source object version", reqInfo,
|
||||||
errors.GetAPIError(errors.ErrBadRequest), additional...)
|
errors.GetAPIError(errors.ErrBadRequest), additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
h.logAndSendError(w, "could not head source object", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "could not head source object", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
args, err := parseCopyObjectArgs(r.Header)
|
args, err := parseCopyObjectArgs(r.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not parse copy object args", reqInfo,
|
h.logAndSendError(ctx, w, "could not parse copy object args", reqInfo,
|
||||||
errors.GetAPIError(errors.ErrInvalidCopyPartRange), additional...)
|
errors.GetAPIError(errors.ErrInvalidCopyPartRange), additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = checkPreconditions(srcInfo, args.Conditional, h.cfg.MD5Enabled()); err != nil {
|
if err = checkPreconditions(srcInfo, args.Conditional, h.cfg.MD5Enabled()); err != nil {
|
||||||
h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed),
|
h.logAndSendError(ctx, w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed),
|
||||||
additional...)
|
additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
srcEncryptionParams, err := formCopySourceEncryptionParams(r)
|
srcEncryptionParams, err := h.formCopySourceEncryptionParams(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = srcEncryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcInfo.Headers)); err != nil {
|
if err = srcEncryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcInfo.Headers)); err != nil {
|
||||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrBadRequest), err), additional...)
|
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrBadRequest), err), additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -387,15 +348,15 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
||||||
Range: srcRange,
|
Range: srcRange,
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Info.Encryption, err = formEncryptionParams(r)
|
p.Info.Encryption, err = h.formEncryptionParams(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := h.obj.UploadPartCopy(ctx, p)
|
info, err := h.obj.UploadPartCopy(ctx, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not upload part copy", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "could not upload part copy", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -409,22 +370,23 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err, additional...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -439,13 +401,13 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
||||||
)
|
)
|
||||||
|
|
||||||
reqBody := new(CompleteMultipartUpload)
|
reqBody := new(CompleteMultipartUpload)
|
||||||
if err = h.cfg.NewXMLDecoder(r.Body).Decode(reqBody); err != nil {
|
if err = h.cfg.NewXMLDecoder(r.Body, r.UserAgent()).Decode(reqBody); err != nil {
|
||||||
h.logAndSendError(w, "could not read complete multipart upload xml", reqInfo,
|
h.logAndSendError(ctx, w, "could not read complete multipart upload xml", reqInfo,
|
||||||
fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error()), additional...)
|
fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error()), additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if len(reqBody.Parts) == 0 {
|
if len(reqBody.Parts) == 0 {
|
||||||
h.logAndSendError(w, "invalid xml with parts", reqInfo, errors.GetAPIError(errors.ErrMalformedXML), additional...)
|
h.logAndSendError(ctx, w, "invalid xml with parts", reqInfo, errors.GetAPIError(errors.ErrMalformedXML), additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -456,17 +418,18 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
||||||
|
|
||||||
// Start complete multipart upload which may take some time to fetch object
|
// Start complete multipart upload which may take some time to fetch object
|
||||||
// and re-upload it part by part.
|
// and re-upload it part by part.
|
||||||
objInfo, err := h.completeMultipartUpload(r, c, bktInfo, reqInfo)
|
objInfo, err := h.completeMultipartUpload(r, c, bktInfo)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "complete multipart error", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "complete multipart error", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
response := CompleteMultipartUploadResponse{
|
response := CompleteMultipartUploadResponse{
|
||||||
Bucket: objInfo.Bucket,
|
Bucket: objInfo.Bucket,
|
||||||
Key: objInfo.Name,
|
Key: objInfo.Name,
|
||||||
ETag: data.Quote(objInfo.ETag(h.cfg.MD5Enabled())),
|
ETag: data.Quote(objInfo.ETag(h.cfg.MD5Enabled())),
|
||||||
|
Location: getObjectLocation(r, reqInfo.BucketName, reqInfo.ObjectName, reqInfo.RequestVHSEnabled),
|
||||||
}
|
}
|
||||||
|
|
||||||
if settings.VersioningEnabled() {
|
if settings.VersioningEnabled() {
|
||||||
|
@ -474,11 +437,39 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err, additional...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMultipartParams, bktInfo *data.BucketInfo, reqInfo *middleware.ReqInfo) (*data.ObjectInfo, error) {
|
// returns "https" if the tls boolean is true, "http" otherwise.
|
||||||
|
func getURLScheme(r *http.Request) string {
|
||||||
|
if r.TLS != nil {
|
||||||
|
return "https"
|
||||||
|
}
|
||||||
|
return "http"
|
||||||
|
}
|
||||||
|
|
||||||
|
// getObjectLocation gets the fully qualified URL of an object.
|
||||||
|
func getObjectLocation(r *http.Request, bucket, object string, vhsEnabled bool) string {
|
||||||
|
proto := middleware.GetSourceScheme(r)
|
||||||
|
if proto == "" {
|
||||||
|
proto = getURLScheme(r)
|
||||||
|
}
|
||||||
|
u := &url.URL{
|
||||||
|
Host: r.Host,
|
||||||
|
Path: path.Join("/", bucket, object),
|
||||||
|
Scheme: proto,
|
||||||
|
}
|
||||||
|
|
||||||
|
// If vhs enabled then we need to use bucket DNS style.
|
||||||
|
if vhsEnabled {
|
||||||
|
u.Path = path.Join("/", object)
|
||||||
|
}
|
||||||
|
|
||||||
|
return u.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMultipartParams, bktInfo *data.BucketInfo) (*data.ObjectInfo, error) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
uploadData, extendedObjInfo, err := h.obj.CompleteMultipartUpload(ctx, c)
|
uploadData, extendedObjInfo, err := h.obj.CompleteMultipartUpload(ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -496,57 +487,21 @@ func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMult
|
||||||
TagSet: uploadData.TagSet,
|
TagSet: uploadData.TagSet,
|
||||||
NodeVersion: extendedObjInfo.NodeVersion,
|
NodeVersion: extendedObjInfo.NodeVersion,
|
||||||
}
|
}
|
||||||
if _, err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
if err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
||||||
return nil, fmt.Errorf("could not put tagging file of completed multipart upload: %w", err)
|
return nil, fmt.Errorf("could not put tagging file of completed multipart upload: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(uploadData.ACLHeaders) != 0 {
|
|
||||||
sessionTokenSetEACL, err := getSessionTokenSetEACL(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't get eacl token: %w", err)
|
|
||||||
}
|
|
||||||
key, err := h.bearerTokenIssuerKey(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't get gate key: %w", err)
|
|
||||||
}
|
|
||||||
acl, err := parseACLHeaders(r.Header, key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("could not parse acl: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
resInfo := &resourceInfo{
|
|
||||||
Bucket: objInfo.Bucket,
|
|
||||||
Object: objInfo.Name,
|
|
||||||
}
|
|
||||||
astObject, err := aclToAst(acl, resInfo)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("could not translate acl of completed multipart upload to ast: %w", err)
|
|
||||||
}
|
|
||||||
if _, err = h.updateBucketACL(r, astObject, bktInfo, sessionTokenSetEACL); err != nil {
|
|
||||||
return nil, fmt.Errorf("could not update bucket acl while completing multipart upload: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s := &SendNotificationParams{
|
|
||||||
Event: EventObjectCreatedCompleteMultipartUpload,
|
|
||||||
NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.MD5Enabled()),
|
|
||||||
BktInfo: bktInfo,
|
|
||||||
ReqInfo: reqInfo,
|
|
||||||
}
|
|
||||||
if err = h.sendNotifications(ctx, s); err != nil {
|
|
||||||
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
return objInfo, nil
|
return objInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -559,7 +514,7 @@ func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Req
|
||||||
if maxUploadsStr != "" {
|
if maxUploadsStr != "" {
|
||||||
val, err := strconv.Atoi(maxUploadsStr)
|
val, err := strconv.Atoi(maxUploadsStr)
|
||||||
if err != nil || val < 1 || val > 1000 {
|
if err != nil || val < 1 || val > 1000 {
|
||||||
h.logAndSendError(w, "invalid maxUploads", reqInfo, errors.GetAPIError(errors.ErrInvalidMaxUploads))
|
h.logAndSendError(ctx, w, "invalid maxUploads", reqInfo, errors.GetAPIError(errors.ErrInvalidMaxUploads))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
maxUploads = val
|
maxUploads = val
|
||||||
|
@ -575,23 +530,29 @@ func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Req
|
||||||
UploadIDMarker: queryValues.Get(uploadIDMarkerQueryName),
|
UploadIDMarker: queryValues.Get(uploadIDMarkerQueryName),
|
||||||
}
|
}
|
||||||
|
|
||||||
list, err := h.obj.ListMultipartUploads(r.Context(), p)
|
if p.EncodingType != "" && strings.ToLower(p.EncodingType) != urlEncodingType {
|
||||||
|
h.logAndSendError(ctx, w, "invalid encoding type", reqInfo, errors.GetAPIError(errors.ErrInvalidEncodingMethod))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
list, err := h.obj.ListMultipartUploads(ctx, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not list multipart uploads", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not list multipart uploads", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, encodeListMultipartUploadsToResponse(list, p)); err != nil {
|
if err = middleware.EncodeToResponse(w, encodeListMultipartUploadsToResponse(list, p)); err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -600,14 +561,14 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
queryValues = reqInfo.URL.Query()
|
queryValues = reqInfo.URL.Query()
|
||||||
uploadID = queryValues.Get(uploadIDHeaderName)
|
uploadID = queryValues.Get(uploadIDHeaderName)
|
||||||
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
|
additional = []zap.Field{zap.String("uploadID", uploadID)}
|
||||||
maxParts = layer.MaxSizePartsList
|
maxParts = layer.MaxSizePartsList
|
||||||
)
|
)
|
||||||
|
|
||||||
if queryValues.Get("max-parts") != "" {
|
if queryValues.Get("max-parts") != "" {
|
||||||
val, err := strconv.Atoi(queryValues.Get("max-parts"))
|
val, err := strconv.Atoi(queryValues.Get("max-parts"))
|
||||||
if err != nil || val < 0 {
|
if err != nil || val < 0 {
|
||||||
h.logAndSendError(w, "invalid MaxParts", reqInfo, errors.GetAPIError(errors.ErrInvalidMaxParts), additional...)
|
h.logAndSendError(ctx, w, "invalid MaxParts", reqInfo, errors.GetAPIError(errors.ErrInvalidMaxParts), additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if val < layer.MaxSizePartsList {
|
if val < layer.MaxSizePartsList {
|
||||||
|
@ -617,7 +578,7 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
if queryValues.Get("part-number-marker") != "" {
|
if queryValues.Get("part-number-marker") != "" {
|
||||||
if partNumberMarker, err = strconv.Atoi(queryValues.Get("part-number-marker")); err != nil || partNumberMarker < 0 {
|
if partNumberMarker, err = strconv.Atoi(queryValues.Get("part-number-marker")); err != nil || partNumberMarker < 0 {
|
||||||
h.logAndSendError(w, "invalid PartNumberMarker", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "invalid PartNumberMarker", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -632,34 +593,35 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
PartNumberMarker: partNumberMarker,
|
PartNumberMarker: partNumberMarker,
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Info.Encryption, err = formEncryptionParams(r)
|
p.Info.Encryption, err = h.formEncryptionParams(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
list, err := h.obj.ListParts(r.Context(), p)
|
list, err := h.obj.ListParts(ctx, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not list parts", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "could not list parts", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, encodeListPartsToResponse(list, p)); err != nil {
|
if err = middleware.EncodeToResponse(w, encodeListPartsToResponse(list, p)); err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadID := reqInfo.URL.Query().Get(uploadIDHeaderName)
|
uploadID := reqInfo.URL.Query().Get(uploadIDHeaderName)
|
||||||
additional := []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
|
additional := []zap.Field{zap.String("uploadID", uploadID)}
|
||||||
|
|
||||||
p := &layer.UploadInfoParams{
|
p := &layer.UploadInfoParams{
|
||||||
UploadID: uploadID,
|
UploadID: uploadID,
|
||||||
|
@ -667,14 +629,14 @@ func (h *handler) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Req
|
||||||
Key: reqInfo.ObjectName,
|
Key: reqInfo.ObjectName,
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Encryption, err = formEncryptionParams(r)
|
p.Encryption, err = h.formEncryptionParams(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.AbortMultipartUpload(r.Context(), p); err != nil {
|
if err = h.obj.AbortMultipartUpload(ctx, p); err != nil {
|
||||||
h.logAndSendError(w, "could not abort multipart upload", reqInfo, err, additional...)
|
h.logAndSendError(ctx, w, "could not abort multipart upload", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -685,14 +647,14 @@ func encodeListMultipartUploadsToResponse(info *layer.ListMultipartUploadsInfo,
|
||||||
res := ListMultipartUploadsResponse{
|
res := ListMultipartUploadsResponse{
|
||||||
Bucket: params.Bkt.Name,
|
Bucket: params.Bkt.Name,
|
||||||
CommonPrefixes: fillPrefixes(info.Prefixes, params.EncodingType),
|
CommonPrefixes: fillPrefixes(info.Prefixes, params.EncodingType),
|
||||||
Delimiter: params.Delimiter,
|
Delimiter: s3PathEncode(params.Delimiter, params.EncodingType),
|
||||||
EncodingType: params.EncodingType,
|
EncodingType: params.EncodingType,
|
||||||
IsTruncated: info.IsTruncated,
|
IsTruncated: info.IsTruncated,
|
||||||
KeyMarker: params.KeyMarker,
|
KeyMarker: s3PathEncode(params.KeyMarker, params.EncodingType),
|
||||||
MaxUploads: params.MaxUploads,
|
MaxUploads: params.MaxUploads,
|
||||||
NextKeyMarker: info.NextKeyMarker,
|
NextKeyMarker: s3PathEncode(info.NextKeyMarker, params.EncodingType),
|
||||||
NextUploadIDMarker: info.NextUploadIDMarker,
|
NextUploadIDMarker: info.NextUploadIDMarker,
|
||||||
Prefix: params.Prefix,
|
Prefix: s3PathEncode(params.Prefix, params.EncodingType),
|
||||||
UploadIDMarker: params.UploadIDMarker,
|
UploadIDMarker: params.UploadIDMarker,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -704,7 +666,7 @@ func encodeListMultipartUploadsToResponse(info *layer.ListMultipartUploadsInfo,
|
||||||
ID: u.Owner.String(),
|
ID: u.Owner.String(),
|
||||||
DisplayName: u.Owner.String(),
|
DisplayName: u.Owner.String(),
|
||||||
},
|
},
|
||||||
Key: u.Key,
|
Key: s3PathEncode(u.Key, params.EncodingType),
|
||||||
Owner: Owner{
|
Owner: Owner{
|
||||||
ID: u.Owner.String(),
|
ID: u.Owner.String(),
|
||||||
DisplayName: u.Owner.String(),
|
DisplayName: u.Owner.String(),
|
||||||
|
|
|
@ -2,21 +2,27 @@ package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
|
"crypto/rand"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
s3Errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||||
|
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -35,7 +41,7 @@ func TestMultipartUploadInvalidPart(t *testing.T) {
|
||||||
etag1, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
|
etag1, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
|
||||||
etag2, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
|
etag2, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
|
||||||
w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
|
w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
|
||||||
assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
|
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrEntityTooSmall))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeleteMultipartAllParts(t *testing.T) {
|
func TestDeleteMultipartAllParts(t *testing.T) {
|
||||||
|
@ -49,13 +55,17 @@ func TestDeleteMultipartAllParts(t *testing.T) {
|
||||||
// unversioned bucket
|
// unversioned bucket
|
||||||
createTestBucket(hc, bktName)
|
createTestBucket(hc, bktName)
|
||||||
multipartUpload(hc, bktName, objName, nil, objLen, partSize)
|
multipartUpload(hc, bktName, objName, nil, objLen, partSize)
|
||||||
|
hc.tp.ClearTombstoneOIDCount()
|
||||||
deleteObject(t, hc, bktName, objName, emptyVersion)
|
deleteObject(t, hc, bktName, objName, emptyVersion)
|
||||||
require.Empty(t, hc.tp.Objects())
|
require.Empty(t, hc.tp.Objects())
|
||||||
|
require.Equal(t, objLen/partSize+1, hc.tp.TombstoneOIDCount())
|
||||||
|
|
||||||
// encrypted multipart
|
// encrypted multipart
|
||||||
multipartUploadEncrypted(hc, bktName, objName, nil, objLen, partSize)
|
multipartUploadEncrypted(hc, bktName, objName, nil, objLen, partSize)
|
||||||
|
hc.tp.ClearTombstoneOIDCount()
|
||||||
deleteObject(t, hc, bktName, objName, emptyVersion)
|
deleteObject(t, hc, bktName, objName, emptyVersion)
|
||||||
require.Empty(t, hc.tp.Objects())
|
require.Empty(t, hc.tp.Objects())
|
||||||
|
require.Equal(t, objLen/partSize+1, hc.tp.TombstoneOIDCount())
|
||||||
|
|
||||||
// versions bucket
|
// versions bucket
|
||||||
createTestBucket(hc, bktName2)
|
createTestBucket(hc, bktName2)
|
||||||
|
@ -63,11 +73,27 @@ func TestDeleteMultipartAllParts(t *testing.T) {
|
||||||
multipartUpload(hc, bktName2, objName, nil, objLen, partSize)
|
multipartUpload(hc, bktName2, objName, nil, objLen, partSize)
|
||||||
_, hdr := getObject(hc, bktName2, objName)
|
_, hdr := getObject(hc, bktName2, objName)
|
||||||
versionID := hdr.Get("X-Amz-Version-Id")
|
versionID := hdr.Get("X-Amz-Version-Id")
|
||||||
|
hc.tp.ClearTombstoneOIDCount()
|
||||||
deleteObject(t, hc, bktName2, objName, emptyVersion)
|
deleteObject(t, hc, bktName2, objName, emptyVersion)
|
||||||
|
require.Equal(t, 0, hc.tp.TombstoneOIDCount())
|
||||||
deleteObject(t, hc, bktName2, objName, versionID)
|
deleteObject(t, hc, bktName2, objName, versionID)
|
||||||
|
require.Equal(t, objLen/partSize+1, hc.tp.TombstoneOIDCount())
|
||||||
require.Empty(t, hc.tp.Objects())
|
require.Empty(t, hc.tp.Objects())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSpecialMultipartName(t *testing.T) {
|
||||||
|
hc := prepareHandlerContextWithMinCache(t)
|
||||||
|
|
||||||
|
bktName, objName := "bucket", "bucket-settings"
|
||||||
|
|
||||||
|
createTestBucket(hc, bktName)
|
||||||
|
putBucketVersioning(t, hc, bktName, true)
|
||||||
|
|
||||||
|
createMultipartUpload(hc, bktName, objName, nil)
|
||||||
|
res := getBucketVersioning(hc, bktName)
|
||||||
|
require.Equal(t, enabledValue, res.Status)
|
||||||
|
}
|
||||||
|
|
||||||
func TestMultipartReUploadPart(t *testing.T) {
|
func TestMultipartReUploadPart(t *testing.T) {
|
||||||
hc := prepareHandlerContext(t)
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
@ -86,7 +112,7 @@ func TestMultipartReUploadPart(t *testing.T) {
|
||||||
require.Equal(t, etag2, list.Parts[1].ETag)
|
require.Equal(t, etag2, list.Parts[1].ETag)
|
||||||
|
|
||||||
w := completeMultipartUploadBase(hc, bktName, objName, uploadInfo.UploadID, []string{etag1, etag2})
|
w := completeMultipartUploadBase(hc, bktName, objName, uploadInfo.UploadID, []string{etag1, etag2})
|
||||||
assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
|
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrEntityTooSmall))
|
||||||
|
|
||||||
etag1, data1 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSizeFirst)
|
etag1, data1 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSizeFirst)
|
||||||
etag2, data2 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSizeLast)
|
etag2, data2 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSizeLast)
|
||||||
|
@ -109,6 +135,108 @@ func TestMultipartReUploadPart(t *testing.T) {
|
||||||
equalDataSlices(t, append(data1, data2...), data)
|
equalDataSlices(t, append(data1, data2...), data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMultipartRemovePartsSplit(t *testing.T) {
|
||||||
|
bktName, objName := "bucket-to-upload-part", "object-multipart"
|
||||||
|
partSize := 8
|
||||||
|
|
||||||
|
t.Run("reupload part", func(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||||
|
|
||||||
|
uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
|
||||||
|
|
||||||
|
multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
objID := oidtest.ID()
|
||||||
|
_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
|
||||||
|
"Number": "1",
|
||||||
|
"OID": objID.EncodeToString(),
|
||||||
|
"Owner": usertest.ID().EncodeToString(),
|
||||||
|
"ETag": "etag",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
|
||||||
|
require.Len(t, hc.tp.Objects(), 2)
|
||||||
|
|
||||||
|
list := listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
|
||||||
|
require.Len(t, list.Parts, 1)
|
||||||
|
require.Equal(t, `"etag"`, list.Parts[0].ETag)
|
||||||
|
|
||||||
|
etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
|
||||||
|
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
|
||||||
|
require.Len(t, list.Parts, 1)
|
||||||
|
require.Equal(t, etag1, list.Parts[0].ETag)
|
||||||
|
|
||||||
|
require.Len(t, hc.tp.Objects(), 1)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("abort multipart", func(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||||
|
|
||||||
|
uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
|
||||||
|
|
||||||
|
multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
objID := oidtest.ID()
|
||||||
|
_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
|
||||||
|
"Number": "1",
|
||||||
|
"OID": objID.EncodeToString(),
|
||||||
|
"Owner": usertest.ID().EncodeToString(),
|
||||||
|
"ETag": "etag",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
|
||||||
|
require.Len(t, hc.tp.Objects(), 2)
|
||||||
|
|
||||||
|
abortMultipartUpload(hc, bktName, objName, uploadInfo.UploadID)
|
||||||
|
require.Empty(t, hc.tp.Objects())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("complete multipart", func(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||||
|
|
||||||
|
etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
|
||||||
|
|
||||||
|
multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
objID := oidtest.ID()
|
||||||
|
_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
|
||||||
|
"Number": "1",
|
||||||
|
"OID": objID.EncodeToString(),
|
||||||
|
"Owner": usertest.ID().EncodeToString(),
|
||||||
|
"ETag": "etag",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
|
||||||
|
require.Len(t, hc.tp.Objects(), 2)
|
||||||
|
|
||||||
|
completeMultipartUpload(hc, bktName, objName, uploadInfo.UploadID, []string{etag1})
|
||||||
|
require.Falsef(t, containsOID(hc.tp.Objects(), objID), "frostfs contains '%s' object, but shouldn't", objID)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsOID(objects []*object.Object, objID oid.ID) bool {
|
||||||
|
for _, o := range objects {
|
||||||
|
oID, _ := o.ID()
|
||||||
|
if oID.Equals(objID) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func TestListMultipartUploads(t *testing.T) {
|
func TestListMultipartUploads(t *testing.T) {
|
||||||
hc := prepareHandlerContext(t)
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
@ -132,14 +260,14 @@ func TestListMultipartUploads(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("check max uploads", func(t *testing.T) {
|
t.Run("check max uploads", func(t *testing.T) {
|
||||||
listUploads := listMultipartUploadsBase(hc, bktName, "", "", "", "", 2)
|
listUploads := listMultipartUploads(hc, bktName, "", "", "", "", 2)
|
||||||
require.Len(t, listUploads.Uploads, 2)
|
require.Len(t, listUploads.Uploads, 2)
|
||||||
require.Equal(t, uploadInfo1.UploadID, listUploads.Uploads[0].UploadID)
|
require.Equal(t, uploadInfo1.UploadID, listUploads.Uploads[0].UploadID)
|
||||||
require.Equal(t, uploadInfo2.UploadID, listUploads.Uploads[1].UploadID)
|
require.Equal(t, uploadInfo2.UploadID, listUploads.Uploads[1].UploadID)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("check prefix", func(t *testing.T) {
|
t.Run("check prefix", func(t *testing.T) {
|
||||||
listUploads := listMultipartUploadsBase(hc, bktName, "/my", "", "", "", -1)
|
listUploads := listMultipartUploads(hc, bktName, "/my", "", "", "", -1)
|
||||||
require.Len(t, listUploads.Uploads, 2)
|
require.Len(t, listUploads.Uploads, 2)
|
||||||
require.Equal(t, uploadInfo1.UploadID, listUploads.Uploads[0].UploadID)
|
require.Equal(t, uploadInfo1.UploadID, listUploads.Uploads[0].UploadID)
|
||||||
require.Equal(t, uploadInfo2.UploadID, listUploads.Uploads[1].UploadID)
|
require.Equal(t, uploadInfo2.UploadID, listUploads.Uploads[1].UploadID)
|
||||||
|
@ -147,7 +275,7 @@ func TestListMultipartUploads(t *testing.T) {
|
||||||
|
|
||||||
t.Run("check markers", func(t *testing.T) {
|
t.Run("check markers", func(t *testing.T) {
|
||||||
t.Run("check only key-marker", func(t *testing.T) {
|
t.Run("check only key-marker", func(t *testing.T) {
|
||||||
listUploads := listMultipartUploadsBase(hc, bktName, "", "", "", objName2, -1)
|
listUploads := listMultipartUploads(hc, bktName, "", "", "", objName2, -1)
|
||||||
require.Len(t, listUploads.Uploads, 1)
|
require.Len(t, listUploads.Uploads, 1)
|
||||||
// If upload-id-marker is not specified, only the keys lexicographically greater than the specified key-marker will be included in the list.
|
// If upload-id-marker is not specified, only the keys lexicographically greater than the specified key-marker will be included in the list.
|
||||||
require.Equal(t, uploadInfo3.UploadID, listUploads.Uploads[0].UploadID)
|
require.Equal(t, uploadInfo3.UploadID, listUploads.Uploads[0].UploadID)
|
||||||
|
@ -158,7 +286,7 @@ func TestListMultipartUploads(t *testing.T) {
|
||||||
if uploadIDMarker > uploadInfo2.UploadID {
|
if uploadIDMarker > uploadInfo2.UploadID {
|
||||||
uploadIDMarker = uploadInfo2.UploadID
|
uploadIDMarker = uploadInfo2.UploadID
|
||||||
}
|
}
|
||||||
listUploads := listMultipartUploadsBase(hc, bktName, "", "", uploadIDMarker, "", -1)
|
listUploads := listMultipartUploads(hc, bktName, "", "", uploadIDMarker, "", -1)
|
||||||
// If key-marker is not specified, the upload-id-marker parameter is ignored.
|
// If key-marker is not specified, the upload-id-marker parameter is ignored.
|
||||||
require.Len(t, listUploads.Uploads, 3)
|
require.Len(t, listUploads.Uploads, 3)
|
||||||
})
|
})
|
||||||
|
@ -166,7 +294,7 @@ func TestListMultipartUploads(t *testing.T) {
|
||||||
t.Run("check key-marker along with upload-id-marker", func(t *testing.T) {
|
t.Run("check key-marker along with upload-id-marker", func(t *testing.T) {
|
||||||
uploadIDMarker := "00000000-0000-0000-0000-000000000000"
|
uploadIDMarker := "00000000-0000-0000-0000-000000000000"
|
||||||
|
|
||||||
listUploads := listMultipartUploadsBase(hc, bktName, "", "", uploadIDMarker, objName3, -1)
|
listUploads := listMultipartUploads(hc, bktName, "", "", uploadIDMarker, objName3, -1)
|
||||||
require.Len(t, listUploads.Uploads, 1)
|
require.Len(t, listUploads.Uploads, 1)
|
||||||
// If upload-id-marker is specified, any multipart uploads for a key equal to the key-marker might also be included,
|
// If upload-id-marker is specified, any multipart uploads for a key equal to the key-marker might also be included,
|
||||||
// provided those multipart uploads have upload IDs lexicographically greater than the specified upload-id-marker.
|
// provided those multipart uploads have upload IDs lexicographically greater than the specified upload-id-marker.
|
||||||
|
@ -261,6 +389,10 @@ func TestMultipartUploadSize(t *testing.T) {
|
||||||
attr := getObjectAttributes(hc, newBucket, newObjName, objectParts)
|
attr := getObjectAttributes(hc, newBucket, newObjName, objectParts)
|
||||||
require.Equal(t, 1, attr.ObjectParts.PartsCount)
|
require.Equal(t, 1, attr.ObjectParts.PartsCount)
|
||||||
require.Equal(t, srcObjInfo.Headers[layer.AttributeDecryptedSize], strconv.Itoa(attr.ObjectParts.Parts[0].Size))
|
require.Equal(t, srcObjInfo.Headers[layer.AttributeDecryptedSize], strconv.Itoa(attr.ObjectParts.Parts[0].Size))
|
||||||
|
|
||||||
|
result := listVersions(t, hc, bktName)
|
||||||
|
require.Len(t, result.Version, 1)
|
||||||
|
require.EqualValues(t, objLen, result.Version[0].Size)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -279,13 +411,19 @@ func TestListParts(t *testing.T) {
|
||||||
require.Len(t, list.Parts, 2)
|
require.Len(t, list.Parts, 2)
|
||||||
require.Equal(t, etag1, list.Parts[0].ETag)
|
require.Equal(t, etag1, list.Parts[0].ETag)
|
||||||
require.Equal(t, etag2, list.Parts[1].ETag)
|
require.Equal(t, etag2, list.Parts[1].ETag)
|
||||||
|
require.Zero(t, list.PartNumberMarker)
|
||||||
|
require.Equal(t, 2, list.NextPartNumberMarker)
|
||||||
|
|
||||||
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "1", http.StatusOK)
|
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "1", http.StatusOK)
|
||||||
require.Len(t, list.Parts, 1)
|
require.Len(t, list.Parts, 1)
|
||||||
require.Equal(t, etag2, list.Parts[0].ETag)
|
require.Equal(t, etag2, list.Parts[0].ETag)
|
||||||
|
require.Equal(t, 1, list.PartNumberMarker)
|
||||||
|
require.Equal(t, 2, list.NextPartNumberMarker)
|
||||||
|
|
||||||
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "2", http.StatusOK)
|
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "2", http.StatusOK)
|
||||||
require.Len(t, list.Parts, 0)
|
require.Len(t, list.Parts, 0)
|
||||||
|
require.Equal(t, 2, list.PartNumberMarker)
|
||||||
|
require.Equal(t, 0, list.NextPartNumberMarker)
|
||||||
|
|
||||||
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "7", http.StatusOK)
|
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "7", http.StatusOK)
|
||||||
require.Len(t, list.Parts, 0)
|
require.Len(t, list.Parts, 0)
|
||||||
|
@ -395,7 +533,7 @@ func TestUploadPartCheckContentSHA256(t *testing.T) {
|
||||||
r.Header.Set(api.AmzContentSha256, tc.hash)
|
r.Header.Set(api.AmzContentSha256, tc.hash)
|
||||||
hc.Handler().UploadPartHandler(w, r)
|
hc.Handler().UploadPartHandler(w, r)
|
||||||
if tc.error {
|
if tc.error {
|
||||||
assertS3Error(t, w, s3Errors.GetAPIError(s3Errors.ErrContentSHA256Mismatch))
|
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch))
|
||||||
|
|
||||||
list := listParts(hc, bktName, objName, multipartUpload.UploadID, "0", http.StatusOK)
|
list := listParts(hc, bktName, objName, multipartUpload.UploadID, "0", http.StatusOK)
|
||||||
require.Len(t, list.Parts, 1)
|
require.Len(t, list.Parts, 1)
|
||||||
|
@ -422,6 +560,147 @@ func TestUploadPartCheckContentSHA256(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMultipartObjectLocation(t *testing.T) {
|
||||||
|
for _, tc := range []struct {
|
||||||
|
req *http.Request
|
||||||
|
bucket string
|
||||||
|
object string
|
||||||
|
vhsEnabled bool
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
req: &http.Request{
|
||||||
|
Host: "127.0.0.1:8084",
|
||||||
|
Header: map[string][]string{"X-Forwarded-Scheme": {"http"}},
|
||||||
|
},
|
||||||
|
bucket: "testbucket1",
|
||||||
|
object: "test/1.txt",
|
||||||
|
expected: "http://127.0.0.1:8084/testbucket1/test/1.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
req: &http.Request{
|
||||||
|
Host: "localhost:8084",
|
||||||
|
Header: map[string][]string{"X-Forwarded-Scheme": {"https"}},
|
||||||
|
},
|
||||||
|
bucket: "testbucket1",
|
||||||
|
object: "test/1.txt",
|
||||||
|
expected: "https://localhost:8084/testbucket1/test/1.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
req: &http.Request{
|
||||||
|
Host: "s3.mybucket.org",
|
||||||
|
Header: map[string][]string{"X-Forwarded-Scheme": {"http"}},
|
||||||
|
},
|
||||||
|
bucket: "mybucket",
|
||||||
|
object: "test/1.txt",
|
||||||
|
expected: "http://s3.mybucket.org/mybucket/test/1.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
req: &http.Request{Host: "mys3.mybucket.org"},
|
||||||
|
bucket: "mybucket",
|
||||||
|
object: "test/1.txt",
|
||||||
|
expected: "http://mys3.mybucket.org/mybucket/test/1.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
req: &http.Request{Host: "s3.bucket.org", TLS: &tls.ConnectionState{}},
|
||||||
|
bucket: "bucket",
|
||||||
|
object: "obj",
|
||||||
|
expected: "https://s3.bucket.org/bucket/obj",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
req: &http.Request{
|
||||||
|
Host: "mybucket.s3dev.frostfs.devenv",
|
||||||
|
},
|
||||||
|
bucket: "mybucket",
|
||||||
|
object: "test/1.txt",
|
||||||
|
vhsEnabled: true,
|
||||||
|
expected: "http://mybucket.s3dev.frostfs.devenv/test/1.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
req: &http.Request{
|
||||||
|
Host: "mybucket.s3dev.frostfs.devenv",
|
||||||
|
Header: map[string][]string{"X-Forwarded-Scheme": {"https"}},
|
||||||
|
},
|
||||||
|
bucket: "mybucket",
|
||||||
|
object: "test/1.txt",
|
||||||
|
vhsEnabled: true,
|
||||||
|
expected: "https://mybucket.s3dev.frostfs.devenv/test/1.txt",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run("", func(t *testing.T) {
|
||||||
|
location := getObjectLocation(tc.req, tc.bucket, tc.object, tc.vhsEnabled)
|
||||||
|
require.Equal(t, tc.expected, location)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUploadPartWithNegativeContentLength(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName, objName := "bucket-to-upload-part", "object-multipart"
|
||||||
|
createTestBucket(hc, bktName)
|
||||||
|
partSize := 5 * 1024 * 1024
|
||||||
|
|
||||||
|
multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||||
|
|
||||||
|
partBody := make([]byte, partSize)
|
||||||
|
_, err := rand.Read(partBody)
|
||||||
|
require.NoError(hc.t, err)
|
||||||
|
|
||||||
|
query := make(url.Values)
|
||||||
|
query.Set(uploadIDQuery, multipartUpload.UploadID)
|
||||||
|
query.Set(partNumberQuery, "1")
|
||||||
|
|
||||||
|
w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, partBody)
|
||||||
|
r.ContentLength = -1
|
||||||
|
hc.Handler().UploadPartHandler(w, r)
|
||||||
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
|
|
||||||
|
completeMultipartUpload(hc, bktName, objName, multipartUpload.UploadID, []string{w.Header().Get(api.ETag)})
|
||||||
|
res, _ := getObject(hc, bktName, objName)
|
||||||
|
equalDataSlices(t, partBody, res)
|
||||||
|
|
||||||
|
resp := getObjectAttributes(hc, bktName, objName, objectParts)
|
||||||
|
require.Len(t, resp.ObjectParts.Parts, 1)
|
||||||
|
require.Equal(t, partSize, resp.ObjectParts.Parts[0].Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListMultipartUploadsEncoding(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-to-list-uploads-encoding"
|
||||||
|
createTestBucket(hc, bktName)
|
||||||
|
|
||||||
|
listAllMultipartUploadsErr(hc, bktName, "invalid", apierr.GetAPIError(apierr.ErrInvalidEncodingMethod))
|
||||||
|
|
||||||
|
objects := []string{"foo()/bar", "foo()/bar/xyzzy", "asdf+b"}
|
||||||
|
for _, objName := range objects {
|
||||||
|
createMultipartUpload(hc, bktName, objName, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
listResponse := listMultipartUploadsURL(hc, bktName, "foo(", ")", "", "", -1)
|
||||||
|
|
||||||
|
require.Len(t, listResponse.CommonPrefixes, 1)
|
||||||
|
require.Equal(t, "foo%28%29", listResponse.CommonPrefixes[0].Prefix)
|
||||||
|
require.Equal(t, "foo%28", listResponse.Prefix)
|
||||||
|
require.Equal(t, "%29", listResponse.Delimiter)
|
||||||
|
require.Equal(t, "url", listResponse.EncodingType)
|
||||||
|
require.Equal(t, maxObjectList, listResponse.MaxUploads)
|
||||||
|
|
||||||
|
listResponse = listMultipartUploads(hc, bktName, "", "", "", "", 1)
|
||||||
|
require.Empty(t, listResponse.EncodingType)
|
||||||
|
|
||||||
|
listResponse = listMultipartUploadsURL(hc, bktName, "", "", "", listResponse.NextKeyMarker, 1)
|
||||||
|
|
||||||
|
require.Len(t, listResponse.CommonPrefixes, 0)
|
||||||
|
require.Len(t, listResponse.Uploads, 1)
|
||||||
|
require.Equal(t, "foo%28%29/bar", listResponse.Uploads[0].Key)
|
||||||
|
require.Equal(t, "asdf%2Bb", listResponse.KeyMarker)
|
||||||
|
require.Equal(t, "foo%28%29/bar", listResponse.NextKeyMarker)
|
||||||
|
require.Equal(t, "url", listResponse.EncodingType)
|
||||||
|
require.Equal(t, 1, listResponse.MaxUploads)
|
||||||
|
}
|
||||||
|
|
||||||
func uploadPartCopy(hc *handlerContext, bktName, objName, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse {
|
func uploadPartCopy(hc *handlerContext, bktName, objName, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse {
|
||||||
return uploadPartCopyBase(hc, bktName, objName, false, uploadID, num, srcObj, start, end)
|
return uploadPartCopyBase(hc, bktName, objName, false, uploadID, num, srcObj, start, end)
|
||||||
}
|
}
|
||||||
|
@ -447,16 +726,42 @@ func uploadPartCopyBase(hc *handlerContext, bktName, objName string, encrypted b
|
||||||
return uploadPartCopyResponse
|
return uploadPartCopyResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
func listAllMultipartUploads(hc *handlerContext, bktName string) *ListMultipartUploadsResponse {
|
func listMultipartUploads(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker string, maxUploads int) *ListMultipartUploadsResponse {
|
||||||
return listMultipartUploadsBase(hc, bktName, "", "", "", "", -1)
|
w := listMultipartUploadsBase(hc, bktName, prefix, delimiter, uploadIDMarker, keyMarker, "", maxUploads)
|
||||||
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
|
res := &ListMultipartUploadsResponse{}
|
||||||
|
parseTestResponse(hc.t, w, res)
|
||||||
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func listMultipartUploadsBase(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker string, maxUploads int) *ListMultipartUploadsResponse {
|
func listMultipartUploadsURL(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker string, maxUploads int) *ListMultipartUploadsResponse {
|
||||||
|
w := listMultipartUploadsBase(hc, bktName, prefix, delimiter, uploadIDMarker, keyMarker, urlEncodingType, maxUploads)
|
||||||
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
|
res := &ListMultipartUploadsResponse{}
|
||||||
|
parseTestResponse(hc.t, w, res)
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func listAllMultipartUploads(hc *handlerContext, bktName string) *ListMultipartUploadsResponse {
|
||||||
|
w := listMultipartUploadsBase(hc, bktName, "", "", "", "", "", -1)
|
||||||
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
|
res := &ListMultipartUploadsResponse{}
|
||||||
|
parseTestResponse(hc.t, w, res)
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func listAllMultipartUploadsErr(hc *handlerContext, bktName, encoding string, err apierr.Error) {
|
||||||
|
w := listMultipartUploadsBase(hc, bktName, "", "", "", "", encoding, -1)
|
||||||
|
assertS3Error(hc.t, w, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func listMultipartUploadsBase(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker, encoding string, maxUploads int) *httptest.ResponseRecorder {
|
||||||
query := make(url.Values)
|
query := make(url.Values)
|
||||||
query.Set(prefixQueryName, prefix)
|
query.Set(prefixQueryName, prefix)
|
||||||
query.Set(delimiterQueryName, delimiter)
|
query.Set(delimiterQueryName, delimiter)
|
||||||
query.Set(uploadIDMarkerQueryName, uploadIDMarker)
|
query.Set(uploadIDMarkerQueryName, uploadIDMarker)
|
||||||
query.Set(keyMarkerQueryName, keyMarker)
|
query.Set(keyMarkerQueryName, keyMarker)
|
||||||
|
query.Set(encodingTypeQueryName, encoding)
|
||||||
if maxUploads != -1 {
|
if maxUploads != -1 {
|
||||||
query.Set(maxUploadsQueryName, strconv.Itoa(maxUploads))
|
query.Set(maxUploadsQueryName, strconv.Itoa(maxUploads))
|
||||||
}
|
}
|
||||||
|
@ -464,10 +769,7 @@ func listMultipartUploadsBase(hc *handlerContext, bktName, prefix, delimiter, up
|
||||||
w, r := prepareTestRequestWithQuery(hc, bktName, "", query, nil)
|
w, r := prepareTestRequestWithQuery(hc, bktName, "", query, nil)
|
||||||
|
|
||||||
hc.Handler().ListMultipartUploadsHandler(w, r)
|
hc.Handler().ListMultipartUploadsHandler(w, r)
|
||||||
listPartsResponse := &ListMultipartUploadsResponse{}
|
return w
|
||||||
readResponse(hc.t, w, http.StatusOK, listPartsResponse)
|
|
||||||
|
|
||||||
return listPartsResponse
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func listParts(hc *handlerContext, bktName, objName string, uploadID, partNumberMarker string, status int) *ListPartsResponse {
|
func listParts(hc *handlerContext, bktName, objName string, uploadID, partNumberMarker string, status int) *ListPartsResponse {
|
||||||
|
|
|
@ -7,10 +7,6 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
h.logAndSendError(r.Context(), w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,274 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
SendNotificationParams struct {
|
|
||||||
Event string
|
|
||||||
NotificationInfo *data.NotificationInfo
|
|
||||||
BktInfo *data.BucketInfo
|
|
||||||
ReqInfo *middleware.ReqInfo
|
|
||||||
User string
|
|
||||||
Time time.Time
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
filterRuleSuffixName = "suffix"
|
|
||||||
filterRulePrefixName = "prefix"
|
|
||||||
|
|
||||||
EventObjectCreated = "s3:ObjectCreated:*"
|
|
||||||
EventObjectCreatedPut = "s3:ObjectCreated:Put"
|
|
||||||
EventObjectCreatedPost = "s3:ObjectCreated:Post"
|
|
||||||
EventObjectCreatedCopy = "s3:ObjectCreated:Copy"
|
|
||||||
EventReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
|
|
||||||
EventObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload"
|
|
||||||
EventObjectRemoved = "s3:ObjectRemoved:*"
|
|
||||||
EventObjectRemovedDelete = "s3:ObjectRemoved:Delete"
|
|
||||||
EventObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
|
|
||||||
EventObjectRestore = "s3:ObjectRestore:*"
|
|
||||||
EventObjectRestorePost = "s3:ObjectRestore:Post"
|
|
||||||
EventObjectRestoreCompleted = "s3:ObjectRestore:Completed"
|
|
||||||
EventReplication = "s3:Replication:*"
|
|
||||||
EventReplicationOperationFailedReplication = "s3:Replication:OperationFailedReplication"
|
|
||||||
EventReplicationOperationNotTracked = "s3:Replication:OperationNotTracked"
|
|
||||||
EventReplicationOperationMissedThreshold = "s3:Replication:OperationMissedThreshold"
|
|
||||||
EventReplicationOperationReplicatedAfterThreshold = "s3:Replication:OperationReplicatedAfterThreshold"
|
|
||||||
EventObjectRestoreDelete = "s3:ObjectRestore:Delete"
|
|
||||||
EventLifecycleTransition = "s3:LifecycleTransition"
|
|
||||||
EventIntelligentTiering = "s3:IntelligentTiering"
|
|
||||||
EventObjectACLPut = "s3:ObjectAcl:Put"
|
|
||||||
EventLifecycleExpiration = "s3:LifecycleExpiration:*"
|
|
||||||
EventLifecycleExpirationDelete = "s3:LifecycleExpiration:Delete"
|
|
||||||
EventLifecycleExpirationDeleteMarkerCreated = "s3:LifecycleExpiration:DeleteMarkerCreated"
|
|
||||||
EventObjectTagging = "s3:ObjectTagging:*"
|
|
||||||
EventObjectTaggingPut = "s3:ObjectTagging:Put"
|
|
||||||
EventObjectTaggingDelete = "s3:ObjectTagging:Delete"
|
|
||||||
)
|
|
||||||
|
|
||||||
var validEvents = map[string]struct{}{
|
|
||||||
EventReducedRedundancyLostObject: {},
|
|
||||||
EventObjectCreated: {},
|
|
||||||
EventObjectCreatedPut: {},
|
|
||||||
EventObjectCreatedPost: {},
|
|
||||||
EventObjectCreatedCopy: {},
|
|
||||||
EventObjectCreatedCompleteMultipartUpload: {},
|
|
||||||
EventObjectRemoved: {},
|
|
||||||
EventObjectRemovedDelete: {},
|
|
||||||
EventObjectRemovedDeleteMarkerCreated: {},
|
|
||||||
EventObjectRestore: {},
|
|
||||||
EventObjectRestorePost: {},
|
|
||||||
EventObjectRestoreCompleted: {},
|
|
||||||
EventReplication: {},
|
|
||||||
EventReplicationOperationFailedReplication: {},
|
|
||||||
EventReplicationOperationNotTracked: {},
|
|
||||||
EventReplicationOperationMissedThreshold: {},
|
|
||||||
EventReplicationOperationReplicatedAfterThreshold: {},
|
|
||||||
EventObjectRestoreDelete: {},
|
|
||||||
EventLifecycleTransition: {},
|
|
||||||
EventIntelligentTiering: {},
|
|
||||||
EventObjectACLPut: {},
|
|
||||||
EventLifecycleExpiration: {},
|
|
||||||
EventLifecycleExpirationDelete: {},
|
|
||||||
EventLifecycleExpirationDeleteMarkerCreated: {},
|
|
||||||
EventObjectTagging: {},
|
|
||||||
EventObjectTaggingPut: {},
|
|
||||||
EventObjectTaggingDelete: {},
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
conf := &data.NotificationConfiguration{}
|
|
||||||
if err = h.cfg.NewXMLDecoder(r.Body).Decode(conf); err != nil {
|
|
||||||
h.logAndSendError(w, "couldn't decode notification configuration", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = h.checkBucketConfiguration(r.Context(), conf, reqInfo); err != nil {
|
|
||||||
h.logAndSendError(w, "couldn't check bucket configuration", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &layer.PutBucketNotificationConfigurationParams{
|
|
||||||
RequestInfo: reqInfo,
|
|
||||||
BktInfo: bktInfo,
|
|
||||||
Configuration: conf,
|
|
||||||
}
|
|
||||||
|
|
||||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = h.obj.PutBucketNotificationConfiguration(r.Context(), p); err != nil {
|
|
||||||
h.logAndSendError(w, "couldn't put bucket configuration", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
conf, err := h.obj.GetBucketNotificationConfiguration(r.Context(), bktInfo)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(w, "could not get bucket notification configuration", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, conf); err != nil {
|
|
||||||
h.logAndSendError(w, "could not encode bucket notification configuration to response", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) sendNotifications(ctx context.Context, p *SendNotificationParams) error {
|
|
||||||
if !h.cfg.NotificatorEnabled() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
conf, err := h.obj.GetBucketNotificationConfiguration(ctx, p.BktInfo)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to get notification configuration: %w", err)
|
|
||||||
}
|
|
||||||
if conf.IsEmpty() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
box, err := middleware.GetBoxData(ctx)
|
|
||||||
if err == nil && box.Gate.BearerToken != nil {
|
|
||||||
p.User = bearer.ResolveIssuer(*box.Gate.BearerToken).EncodeToString()
|
|
||||||
}
|
|
||||||
|
|
||||||
p.Time = layer.TimeNow(ctx)
|
|
||||||
|
|
||||||
topics := filterSubjects(conf, p.Event, p.NotificationInfo.Name)
|
|
||||||
|
|
||||||
return h.notificator.SendNotifications(topics, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkBucketConfiguration checks notification configuration and generates an ID for configurations with empty ids.
|
|
||||||
func (h *handler) checkBucketConfiguration(ctx context.Context, conf *data.NotificationConfiguration, r *middleware.ReqInfo) (completed bool, err error) {
|
|
||||||
if conf == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.TopicConfigurations != nil || conf.LambdaFunctionConfigurations != nil {
|
|
||||||
return completed, errors.GetAPIError(errors.ErrNotificationTopicNotSupported)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, q := range conf.QueueConfigurations {
|
|
||||||
if err = checkEvents(q.Events); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = checkRules(q.Filter.Key.FilterRules); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.cfg.NotificatorEnabled() {
|
|
||||||
if err = h.notificator.SendTestNotification(q.QueueArn, r.BucketName, r.RequestID, r.Host, layer.TimeNow(ctx)); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
h.reqLogger(ctx).Warn(logs.FailedToSendTestEventBecauseNotificationsIsDisabled)
|
|
||||||
}
|
|
||||||
|
|
||||||
if q.ID == "" {
|
|
||||||
completed = true
|
|
||||||
conf.QueueConfigurations[i].ID = uuid.NewString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkRules(rules []data.FilterRule) error {
|
|
||||||
names := make(map[string]struct{})
|
|
||||||
|
|
||||||
for _, r := range rules {
|
|
||||||
if r.Name != filterRuleSuffixName && r.Name != filterRulePrefixName {
|
|
||||||
return errors.GetAPIError(errors.ErrFilterNameInvalid)
|
|
||||||
}
|
|
||||||
if _, ok := names[r.Name]; ok {
|
|
||||||
if r.Name == filterRuleSuffixName {
|
|
||||||
return errors.GetAPIError(errors.ErrFilterNameSuffix)
|
|
||||||
}
|
|
||||||
return errors.GetAPIError(errors.ErrFilterNamePrefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
names[r.Name] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkEvents(events []string) error {
|
|
||||||
for _, e := range events {
|
|
||||||
if _, ok := validEvents[e]; !ok {
|
|
||||||
return errors.GetAPIError(errors.ErrEventNotification)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func filterSubjects(conf *data.NotificationConfiguration, eventType, objName string) map[string]string {
|
|
||||||
topics := make(map[string]string)
|
|
||||||
|
|
||||||
for _, t := range conf.QueueConfigurations {
|
|
||||||
event := false
|
|
||||||
for _, e := range t.Events {
|
|
||||||
// the second condition is comparison with the events ending with *:
|
|
||||||
// s3:ObjectCreated:*, s3:ObjectRemoved:* etc without the last char
|
|
||||||
if eventType == e || strings.HasPrefix(eventType, e[:len(e)-1]) {
|
|
||||||
event = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !event {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
filter := true
|
|
||||||
for _, f := range t.Filter.Key.FilterRules {
|
|
||||||
if f.Name == filterRulePrefixName && !strings.HasPrefix(objName, f.Value) ||
|
|
||||||
f.Name == filterRuleSuffixName && !strings.HasSuffix(objName, f.Value) {
|
|
||||||
filter = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if filter {
|
|
||||||
topics[t.ID] = t.QueueArn
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return topics
|
|
||||||
}
|
|
|
@ -1,115 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestFilterSubjects(t *testing.T) {
|
|
||||||
config := &data.NotificationConfiguration{
|
|
||||||
QueueConfigurations: []data.QueueConfiguration{
|
|
||||||
{
|
|
||||||
ID: "test1",
|
|
||||||
QueueArn: "test1",
|
|
||||||
Events: []string{EventObjectCreated, EventObjectRemovedDelete},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "test2",
|
|
||||||
QueueArn: "test2",
|
|
||||||
Events: []string{EventObjectTagging},
|
|
||||||
Filter: data.Filter{Key: data.Key{FilterRules: []data.FilterRule{
|
|
||||||
{Name: "prefix", Value: "dir/"},
|
|
||||||
{Name: "suffix", Value: ".png"},
|
|
||||||
}}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("no topics because suitable events not found", func(t *testing.T) {
|
|
||||||
topics := filterSubjects(config, EventObjectACLPut, "dir/a.png")
|
|
||||||
require.Empty(t, topics)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("no topics because of not suitable prefix", func(t *testing.T) {
|
|
||||||
topics := filterSubjects(config, EventObjectTaggingPut, "dirw/cat.png")
|
|
||||||
require.Empty(t, topics)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("no topics because of not suitable suffix", func(t *testing.T) {
|
|
||||||
topics := filterSubjects(config, EventObjectTaggingPut, "a.jpg")
|
|
||||||
require.Empty(t, topics)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("filter topics from queue configs without prefix suffix filter and exact event", func(t *testing.T) {
|
|
||||||
topics := filterSubjects(config, EventObjectCreatedPut, "dir/a.png")
|
|
||||||
require.Contains(t, topics, "test1")
|
|
||||||
require.Len(t, topics, 1)
|
|
||||||
require.Equal(t, topics["test1"], "test1")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("filter topics from queue configs with prefix suffix filter and '*' ending event", func(t *testing.T) {
|
|
||||||
topics := filterSubjects(config, EventObjectTaggingPut, "dir/a.png")
|
|
||||||
require.Contains(t, topics, "test2")
|
|
||||||
require.Len(t, topics, 1)
|
|
||||||
require.Equal(t, topics["test2"], "test2")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckRules(t *testing.T) {
|
|
||||||
t.Run("correct rules with prefix and suffix", func(t *testing.T) {
|
|
||||||
rules := []data.FilterRule{
|
|
||||||
{Name: "prefix", Value: "asd"},
|
|
||||||
{Name: "suffix", Value: "asd"},
|
|
||||||
}
|
|
||||||
err := checkRules(rules)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("correct rules with prefix", func(t *testing.T) {
|
|
||||||
rules := []data.FilterRule{
|
|
||||||
{Name: "prefix", Value: "asd"},
|
|
||||||
}
|
|
||||||
err := checkRules(rules)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("correct rules with suffix", func(t *testing.T) {
|
|
||||||
rules := []data.FilterRule{
|
|
||||||
{Name: "suffix", Value: "asd"},
|
|
||||||
}
|
|
||||||
err := checkRules(rules)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("incorrect rules with wrong name", func(t *testing.T) {
|
|
||||||
rules := []data.FilterRule{
|
|
||||||
{Name: "prefix", Value: "sdf"},
|
|
||||||
{Name: "sfx", Value: "asd"},
|
|
||||||
}
|
|
||||||
err := checkRules(rules)
|
|
||||||
require.ErrorIs(t, err, errors.GetAPIError(errors.ErrFilterNameInvalid))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("incorrect rules with repeating suffix", func(t *testing.T) {
|
|
||||||
rules := []data.FilterRule{
|
|
||||||
{Name: "suffix", Value: "asd"},
|
|
||||||
{Name: "suffix", Value: "asdf"},
|
|
||||||
{Name: "prefix", Value: "jk"},
|
|
||||||
}
|
|
||||||
err := checkRules(rules)
|
|
||||||
require.ErrorIs(t, err, errors.GetAPIError(errors.ErrFilterNameSuffix))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("incorrect rules with repeating prefix", func(t *testing.T) {
|
|
||||||
rules := []data.FilterRule{
|
|
||||||
{Name: "suffix", Value: "ds"},
|
|
||||||
{Name: "prefix", Value: "asd"},
|
|
||||||
{Name: "prefix", Value: "asdf"},
|
|
||||||
}
|
|
||||||
err := checkRules(rules)
|
|
||||||
require.ErrorIs(t, err, errors.GetAPIError(errors.ErrFilterNamePrefix))
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
|
@ -16,26 +17,27 @@ import (
|
||||||
|
|
||||||
// ListObjectsV1Handler handles objects listing requests for API version 1.
|
// ListObjectsV1Handler handles objects listing requests for API version 1.
|
||||||
func (h *handler) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
params, err := parseListObjectsArgsV1(reqInfo)
|
params, err := parseListObjectsArgsV1(reqInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "failed to parse arguments", reqInfo, err)
|
h.logAndSendError(ctx, w, "failed to parse arguments", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if params.BktInfo, err = h.getBucketAndCheckOwner(r, reqInfo.BucketName); err != nil {
|
if params.BktInfo, err = h.getBucketAndCheckOwner(r, reqInfo.BucketName); err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
list, err := h.obj.ListObjectsV1(r.Context(), params)
|
list, err := h.obj.ListObjectsV1(ctx, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, h.encodeV1(params, list)); err != nil {
|
if err = middleware.EncodeToResponse(w, h.encodeV1(params, list)); err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,26 +62,27 @@ func (h *handler) encodeV1(p *layer.ListObjectsParamsV1, list *layer.ListObjects
|
||||||
|
|
||||||
// ListObjectsV2Handler handles objects listing requests for API version 2.
|
// ListObjectsV2Handler handles objects listing requests for API version 2.
|
||||||
func (h *handler) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
params, err := parseListObjectsArgsV2(reqInfo)
|
params, err := parseListObjectsArgsV2(reqInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "failed to parse arguments", reqInfo, err)
|
h.logAndSendError(ctx, w, "failed to parse arguments", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if params.BktInfo, err = h.getBucketAndCheckOwner(r, reqInfo.BucketName); err != nil {
|
if params.BktInfo, err = h.getBucketAndCheckOwner(r, reqInfo.BucketName); err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
list, err := h.obj.ListObjectsV2(r.Context(), params)
|
list, err := h.obj.ListObjectsV2(ctx, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = middleware.EncodeToResponse(w, h.encodeV2(params, list)); err != nil {
|
if err = middleware.EncodeToResponse(w, h.encodeV2(params, list)); err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,6 +156,10 @@ func parseListObjectArgs(reqInfo *middleware.ReqInfo) (*layer.ListObjectsParamsC
|
||||||
res.Delimiter = queryValues.Get("delimiter")
|
res.Delimiter = queryValues.Get("delimiter")
|
||||||
res.Encode = queryValues.Get("encoding-type")
|
res.Encode = queryValues.Get("encoding-type")
|
||||||
|
|
||||||
|
if res.Encode != "" && strings.ToLower(res.Encode) != urlEncodingType {
|
||||||
|
return nil, errors.GetAPIError(errors.ErrInvalidEncodingMethod)
|
||||||
|
}
|
||||||
|
|
||||||
if queryValues.Get("max-keys") == "" {
|
if queryValues.Get("max-keys") == "" {
|
||||||
res.MaxKeys = maxObjectList
|
res.MaxKeys = maxObjectList
|
||||||
} else if res.MaxKeys, err = strconv.Atoi(queryValues.Get("max-keys")); err != nil || res.MaxKeys < 0 {
|
} else if res.MaxKeys, err = strconv.Atoi(queryValues.Get("max-keys")); err != nil || res.MaxKeys < 0 {
|
||||||
|
@ -214,27 +221,28 @@ func fillContents(src []*data.ExtendedNodeVersion, encode string, fetchOwner, md
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := middleware.GetReqInfo(r.Context())
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
p, err := parseListObjectVersionsRequest(reqInfo)
|
p, err := parseListObjectVersionsRequest(reqInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "failed to parse request", reqInfo, err)
|
h.logAndSendError(ctx, w, "failed to parse request", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.BktInfo, err = h.getBucketAndCheckOwner(r, reqInfo.BucketName); err != nil {
|
if p.BktInfo, err = h.getBucketAndCheckOwner(r, reqInfo.BucketName); err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := h.obj.ListObjectVersions(r.Context(), p)
|
info, err := h.obj.ListObjectVersions(ctx, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
response := encodeListObjectVersionsToResponse(info, p.BktInfo.Name, h.cfg.MD5Enabled())
|
response := encodeListObjectVersionsToResponse(p, info, p.BktInfo.Name, h.cfg.MD5Enabled())
|
||||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,6 +265,10 @@ func parseListObjectVersionsRequest(reqInfo *middleware.ReqInfo) (*layer.ListObj
|
||||||
res.Encode = queryValues.Get("encoding-type")
|
res.Encode = queryValues.Get("encoding-type")
|
||||||
res.VersionIDMarker = queryValues.Get("version-id-marker")
|
res.VersionIDMarker = queryValues.Get("version-id-marker")
|
||||||
|
|
||||||
|
if res.Encode != "" && strings.ToLower(res.Encode) != urlEncodingType {
|
||||||
|
return nil, errors.GetAPIError(errors.ErrInvalidEncodingMethod)
|
||||||
|
}
|
||||||
|
|
||||||
if res.VersionIDMarker != "" && res.KeyMarker == "" {
|
if res.VersionIDMarker != "" && res.KeyMarker == "" {
|
||||||
return nil, errors.GetAPIError(errors.VersionIDMarkerWithoutKeyMarker)
|
return nil, errors.GetAPIError(errors.VersionIDMarkerWithoutKeyMarker)
|
||||||
}
|
}
|
||||||
|
@ -264,24 +276,28 @@ func parseListObjectVersionsRequest(reqInfo *middleware.ReqInfo) (*layer.ListObj
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeListObjectVersionsToResponse(info *layer.ListObjectVersionsInfo, bucketName string, md5Enabled bool) *ListObjectsVersionsResponse {
|
func encodeListObjectVersionsToResponse(p *layer.ListObjectVersionsParams, info *layer.ListObjectVersionsInfo, bucketName string, md5Enabled bool) *ListObjectsVersionsResponse {
|
||||||
res := ListObjectsVersionsResponse{
|
res := ListObjectsVersionsResponse{
|
||||||
Name: bucketName,
|
Name: bucketName,
|
||||||
IsTruncated: info.IsTruncated,
|
IsTruncated: info.IsTruncated,
|
||||||
KeyMarker: info.KeyMarker,
|
KeyMarker: s3PathEncode(info.KeyMarker, p.Encode),
|
||||||
NextKeyMarker: info.NextKeyMarker,
|
NextKeyMarker: s3PathEncode(info.NextKeyMarker, p.Encode),
|
||||||
NextVersionIDMarker: info.NextVersionIDMarker,
|
NextVersionIDMarker: info.NextVersionIDMarker,
|
||||||
VersionIDMarker: info.VersionIDMarker,
|
VersionIDMarker: info.VersionIDMarker,
|
||||||
|
Prefix: s3PathEncode(p.Prefix, p.Encode),
|
||||||
|
Delimiter: s3PathEncode(p.Delimiter, p.Encode),
|
||||||
|
EncodingType: p.Encode,
|
||||||
|
MaxKeys: p.MaxKeys,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, prefix := range info.CommonPrefixes {
|
for _, prefix := range info.CommonPrefixes {
|
||||||
res.CommonPrefixes = append(res.CommonPrefixes, CommonPrefix{Prefix: prefix})
|
res.CommonPrefixes = append(res.CommonPrefixes, CommonPrefix{Prefix: s3PathEncode(prefix, p.Encode)})
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ver := range info.Version {
|
for _, ver := range info.Version {
|
||||||
res.Version = append(res.Version, ObjectVersionResponse{
|
res.Version = append(res.Version, ObjectVersionResponse{
|
||||||
IsLatest: ver.IsLatest,
|
IsLatest: ver.IsLatest,
|
||||||
Key: ver.NodeVersion.FilePath,
|
Key: s3PathEncode(ver.NodeVersion.FilePath, p.Encode),
|
||||||
LastModified: ver.NodeVersion.Created.UTC().Format(time.RFC3339),
|
LastModified: ver.NodeVersion.Created.UTC().Format(time.RFC3339),
|
||||||
Owner: Owner{
|
Owner: Owner{
|
||||||
ID: ver.NodeVersion.Owner.String(),
|
ID: ver.NodeVersion.Owner.String(),
|
||||||
|
@ -297,7 +313,7 @@ func encodeListObjectVersionsToResponse(info *layer.ListObjectVersionsInfo, buck
|
||||||
for _, del := range info.DeleteMarker {
|
for _, del := range info.DeleteMarker {
|
||||||
res.DeleteMarker = append(res.DeleteMarker, DeleteMarkerEntry{
|
res.DeleteMarker = append(res.DeleteMarker, DeleteMarkerEntry{
|
||||||
IsLatest: del.IsLatest,
|
IsLatest: del.IsLatest,
|
||||||
Key: del.NodeVersion.FilePath,
|
Key: s3PathEncode(del.NodeVersion.FilePath, p.Encode),
|
||||||
LastModified: del.NodeVersion.Created.UTC().Format(time.RFC3339),
|
LastModified: del.NodeVersion.Created.UTC().Format(time.RFC3339),
|
||||||
Owner: Owner{
|
Owner: Owner{
|
||||||
ID: del.NodeVersion.Owner.String(),
|
ID: del.NodeVersion.Owner.String(),
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue