Compare commits

...

458 commits

Author SHA1 Message Date
f391966326 [#581] Clean up remaining NeoFS mentions
Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2024-12-13 18:18:04 +03:00
d986e74897 [#147] Add Kludge profiles
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2024-12-13 11:25:07 +00:00
df1af2d2c9 [#559] Return error from multipart deleting
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-12-13 11:23:56 +00:00
04b8fc2b5f [#562] Empty default value for TLS termination header param
If the service is accessed not through a proxy and the
default value of the parameter with the header key is
not empty, then the system administrator does not
control disabling TLS verification in any way, because
the client can simply add a known header, thereby
skipping the verification. Therefore, the default value
of the header parameter is made empty. If it is empty,
then TLS verification cannot be disabled in any way.
Thus, the system administrator will be able to control
the enabling/disabling of TLS.

Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-12-13 11:12:58 +00:00
59b789f57e [#576] Update frostfs-sdk-go version
The new version of frostfs-sdk-go
contains a fix to solve the problem
of not being able to delete an EC object.

Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-12-13 03:17:19 +03:00
128939c01e [#562] Add tests for form encryption params
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-12-11 16:09:43 +03:00
4a4ce00994 [#562] Support TLS termination header for SSE-C
The TLS termination header added for determining
whether TLS needs to be checked. If the system
requests come through a proxy server and TLS can
terminate at the proxy level, you should use this
header to disable TLS verification at SSE-C.

Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-12-11 16:09:43 +03:00
980763c468 [#573] Refine CODEOWNERS settings
Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2024-12-10 14:35:40 +00:00
9395b5f39d [#339] v4: Don't duplicate content-length as signed header
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-12-10 15:04:56 +03:00
11c1a86404 [#339] Fix stream sigV4a chunk-encoding
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-12-10 15:04:56 +03:00
4515a7ae88 [#339] Don't explicitly use smithy-go
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-12-10 15:04:56 +03:00
c5deb2e148 [#339] Drop unused and add link to source files
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-12-10 15:04:56 +03:00
ea714c2e9e [#339] Fix logging in authmate [pre]sign command
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-12-10 15:04:56 +03:00
7bf31bea18 [#339] lint: Ignore aws sdk dirs
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-12-10 15:04:56 +03:00
cc43975536 [#339] Presign fix aws sdk
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-12-10 15:04:56 +03:00
c4c757eea6 [#339] Drop aws-sdk-go v1
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-12-10 15:04:56 +03:00
389e0de403 [#339] Don't include additional content-length header for signing
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-12-10 15:04:56 +03:00
8da71c3ae0 [#339] sigv4a: Support presign
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-12-10 15:04:56 +03:00
cc9a68401f [#339] Add aws-sdk-go-v2
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-12-10 15:04:56 +03:00
8f7ccb0f62
[#570] Remove frostfs-api-go dependency
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-12-10 11:03:30 +03:00
2c002b657e
[#570] Update frostfs-sdk-go with new tree service client
Fix imports in order to apply new sdk changes caused by merging with frostfs-api-go and reimplementing tree service client

Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-12-09 15:08:48 +03:00
f215d200e8 [#559] Remove multipart objects using tombstones
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-12-04 11:03:01 +03:00
51322cccdf [#502] Add Dropped logs (by sampling) metric
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2024-12-03 12:16:56 +00:00
3cd88d6204 Release v0.31.1
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-11-29 13:35:58 +00:00
e71ba5e22a [#543] Add md5 sse-c S3Tests compatability
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2024-11-28 06:06:55 +00:00
e3141fc8e3 [#563] Ignore precondition headers with invalid date format
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-11-27 12:07:04 +03:00
a12fea8a5b Release v0.31.0
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-11-20 15:45:07 +03:00
9875307c9b [#556] Check bucket name not only during creation
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-11-20 08:13:27 +00:00
b1775f9478 [#553] authmate: Add retryer to create access box
After using AddChain to provide access to container we have to wait:
* tx with APE chain be accepted by blockchain
* cache in storage node be updated

it takes a while. So we add retry
 (the same as when we add bucket settings during bucket creation)

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-11-19 15:46:00 +03:00
4fa45bdac2 [#553] authmate: Don't use basic acl
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-11-19 15:45:54 +03:00
368c7d2acd [#549] Add tracing attributes
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-11-18 11:55:31 +00:00
31076796ce [#550] Execute CI on push to master
Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2024-11-15 14:31:11 +03:00
eff0de43d5 [#538] Return headers with 304 Not Modified
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-11-13 13:49:09 +00:00
fb00dff83b [#540] Add md5 S3Tests compatability
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2024-11-13 14:50:16 +03:00
d8f126b339 [#539] Fix listing v1 bookmark marker
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-11-12 12:58:09 +00:00
7ab902d8d2 [#536] Add rule ID generation
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-11-12 12:51:02 +00:00
0792fcf456 [#536] Fix error codes in lifecycle configuration check
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-11-12 12:51:02 +00:00
c46ffa8146 [#536] Add prefix to lifecycle rule
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-11-12 12:51:02 +00:00
3260308cc0 [#528] Check owner ID before deleting bucket
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-11-12 12:47:43 +00:00
d6e6a13576 [#542] Stop using obsolete .github directory
This commit is a part of multi-repo cleanup effort:
TrueCloudLab/frostfs-infra#136

Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2024-11-06 15:31:16 +03:00
17d40245de [#505] docs: Add example of uploading file using presigned URL
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-11-02 08:53:54 +00:00
979d85b046 [#505] authmate: Add flag for headers in generate-presigned-url cmd
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-11-02 08:53:54 +00:00
539dab8680 [#501] Add the trace id to the logs of middlware
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-11-02 08:51:48 +00:00
76008d4ba1 [#501] Consider using request logger in logAndSendError
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-11-02 08:51:48 +00:00
8bc19725ba [#521] Add documentation for multinet settings
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-10-29 15:55:27 +03:00
9e64304499 [#521] Use handler to register dial events
While frostfs-node uses dial handler to udpate metric
value, gateway starts with simple event logging.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-10-29 15:55:27 +03:00
94504e9746 [#521] Use source dialer for gRPC connection to storage
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-10-29 15:55:27 +03:00
a8458dbc27 [#521] Add internal/net package with multinet dialer source
Code is taken from frostfs-node#1422
Author: Dmitrii Stepanov (dstepanov-yadro)

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-10-29 15:55:26 +03:00
424038de6c [#524] Update pool to treat maintenance mode differently
Contains these changes for pool component in SDK:
* frostfs-sdk-go#279 fix mm error counting during search operation
* frostfs-sdk-go#283 immediately mark mm node as unhealthy
* frostfs-sdk-go#278 do not reconnect to mm node

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-10-23 12:17:20 +00:00
3cf27d281d [#509] Support fallback address when getting box
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-10-23 15:01:31 +03:00
3c7cb82553 [#509] Init resolvers before first resolving
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-10-23 15:01:31 +03:00
57b7e83380 [#509] Save isCustom flag into accessbox
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-10-23 15:01:31 +03:00
6a90f4e624 [#509] Update docs
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-10-23 15:01:31 +03:00
cb3753f286 [#509] Add tests for custom credentials
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-10-23 15:01:31 +03:00
81209e308c [#509] Fix tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-10-23 15:01:31 +03:00
b78e55e101 [#509] Support custom AWS credentials
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-10-23 15:01:31 +03:00
25c24f5ce6 [#522] Add waiter to contract clients
Signed-off-by: Vladimir Domnich <v.domnich@yadro.com>
2024-10-23 09:22:19 +03:00
09c11262c6 [#516] Check Content-Md5 of lifecycle configuration
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-10-22 14:21:49 +00:00
f120715a37 [#516] Convert expiration date to epoch
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-10-22 14:21:49 +00:00
aaed083d82 [#520] Support the continuous use of interceptors
We can always add interceptors to the grpc
connection to the storage, since the actual
use will be controlled by the configuration
from the frostfs-observability library.

Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-10-21 11:49:22 +03:00
e35b582fe2 [#506] Deny bucket names with dot
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-10-08 12:50:22 +03:00
39fc7aa3ee [#404] Fix using encoding type in multipart upload listing
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-10-08 09:44:05 +00:00
da41f47826 [#404] Add check of encoding type in object listing
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-10-08 09:44:05 +00:00
9e5fb4be95 [#507] Return not implemented by default in bucket router
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-10-07 05:38:35 +00:00
346243b159 [#450] Fix getPutPayloadSize
If X-Amz-Decoded-Content-Length explicitly set use it even if value is 0

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-10-03 14:23:06 +03:00
03481274f0 [#467] authmate: Add sign command
Support singing arbitrary data using aws sigv4 algorithm

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-10-02 13:42:22 +00:00
c2adbd758a [#488] middleware/auth: Add frostfs-to-s3 error transformation
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-10-02 13:18:25 +03:00
bc17ab5e47 [#488] middleware/policy: Add frostfs-to-s3 error transformation
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-10-02 12:35:04 +03:00
9fadfbbc2f [#488] Renamed api/errors, layer/frostfs and layer/tree package names
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-10-02 12:35:04 +03:00
827ea1a41e [#488] Move layer/frostfs.go to layer/frostfs/frostfs.go
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-10-02 12:35:04 +03:00
968f10a72f [#488] Move layer/tree_service.go to layer/tree/tree_service.go
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-10-02 12:35:04 +03:00
582e6ac642 [#503] Update SDK to fix error counting
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-09-27 10:36:48 +03:00
99f273f9af [#461] Configure logger sampling policy
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2024-09-26 10:34:44 +03:00
cd96adef36 [#499] Fix of sighup traicing docs
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-09-25 14:34:18 +03:00
738ce14f50 [#434] Remove container on failed bucket creation
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-09-25 07:15:24 +00:00
5358e39f71 [#496] Update frostfs-go-sdk for in-flight requests support
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-09-23 08:57:40 +03:00
34c1426b9f [#484] Add root ca cert for telemetry configuration
Signed-off-by: Aleksey Savaitan <a.savaitan@yadro.com>
2024-09-19 11:07:13 +00:00
8ca73e2079 [#493] Fix of receiving VHS namespaces map
In the process of forming a map with namespaces
for which VHS is enabled, we resolve the alias
of the namespace. The problem is that to resolve,
we need default namespace names, which in turn do
not have time to decide by this time. Therefore,
now the check for the default name takes place
directly in the prepareVHSNamespaces function
based on previously read default names.

Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-09-17 16:57:05 +03:00
a87c636b4c [#493] Fix X-Frostfs-S3-VHS header processing
It is assumed that the X-Frostfs-S3-VHS header
will have the value enabled/disabled instead
of true/false.

Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-09-16 14:28:19 +03:00
26baf8a94e [#492] Add panic catchers to fuzzing tests
Signed-off-by: Roman Ognev <r.ognev@yadro.com>
2024-09-16 10:36:30 +00:00
f187141ae5 [#486] Fix PUT object with negative Content-Length
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-09-16 08:45:46 +00:00
3cffc782e9 [#486] Fix PUT encrypted object
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-09-16 08:45:46 +00:00
d0e4d55772 [#460] Add network info cache
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-09-13 09:56:24 +00:00
42e72889a5 [#450] Add test for chunk-encoded object size
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-09-13 12:00:24 +03:00
98815d5473 [#450] Fix aws-chunked header workflow
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2024-09-13 11:59:07 +03:00
62615d7ab7 [#369] Request reproducer
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-09-11 15:25:09 +03:00
575ab4d294 [#369] Enhanced http requests logging
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-09-11 15:25:09 +03:00
d919e6cce2 [#482] Fix containers resolving
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-09-05 12:33:14 +03:00
056f168d77 [#448] multipart: Support removing duplicated parts
Previously after tree split we can have duplicated parts
(several objects and tree node referred to the same part number).
Some of them couldn't be deleted after abort or compete action.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-09-03 13:20:38 +00:00
9bdfe2a016 [#479] Update APE to support s3:PatchObject action
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-09-03 11:57:59 +00:00
d6b506f6d9 [#466] Implement PATCH for multipart objects
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-09-03 11:57:59 +00:00
a2e0b92575 [#473] Add PATCH to extensions doc
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-09-03 11:57:59 +00:00
b08f476ea7 [#462] Implement PATCH for regular objects
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-09-03 11:57:59 +00:00
f4275d837a [#413] Add SECURITY.md
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2024-09-03 11:45:05 +00:00
664f83b2b7 [#480] Add fuzzing tests
Signed-off-by: Roman Ognev <r.ognev@yadro.com>
2024-09-02 15:59:07 +03:00
136b5521fe [#475] Support graceful_close_on_switch_timeout param
This allows in-flight requests finish during rebalance

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-08-29 13:22:08 +00:00
a5f670d904 [#329] Reduce using mutex when update app settings
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-08-29 12:03:26 +00:00
d76c4fe2a2 [#472] tree: Don't use sorted GetSubTree for nodes without FileName
Sorted GetSubTree doesn't return nodes without FileName attribute
if there are more than 1000 nodes in subtree

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-08-27 09:43:29 +00:00
0637133c61 [#470] Update Go version to 1.22
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-08-23 12:18:42 +03:00
bf00fa6aa9 [#449] Add support headers for vhs and servername
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-08-23 08:35:05 +00:00
ff690ce996 [#446] Add tests for address style middleware
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-08-23 08:35:05 +00:00
534ae7f0f1 [#446] Add support virtual-hosted-style
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-08-23 08:35:05 +00:00
77673797f9 [#474] lint: Update golangci-lint to v1.60 and fix issues
Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-08-22 19:13:52 +03:00
9e1766ff74 [#459] Update APE
Update to policy engine version with lifecycle actions support

Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-08-21 16:42:34 +03:00
e73f11c251 [#452] tree: Fix logging
Don't log parsing tags error in listing

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-08-21 10:38:36 +03:00
5cb77018f8 [#42] Change valid time format for lifecycle configuration
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-08-21 10:38:36 +03:00
fa68a4ce40 [#412] Store creation epoch of delete markers
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-08-21 10:38:35 +03:00
0644067496 [#412] Store creation epoch in tree service
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-08-21 10:38:35 +03:00
481520705a [#42] Support expiration lifecycle
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-08-21 10:38:35 +03:00
28723f4a68 [#447] Add tree pool request duration metric
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-08-21 06:36:55 +00:00
20719bd85c [#456] PostObject: check object key for emptiness
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-08-13 09:27:26 +03:00
4f27e34974 [#456] auth: Fix authentication for POST Object
During post object operation field AuthHeader in middleware.Box wasn't set
that led to panic

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-08-12 15:25:53 +03:00
3dc989d7fe [#451] Support Location in CompleteMultipart response
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-08-06 15:45:09 +03:00
69e77aecc9 [#451] Don't skip [Next]PartNumberMarker in response
In ListParts response we should always keep PartNumberMarker (even if it's zero)
and NextPartNumberMarker (even if response isn't truncated)

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-08-06 15:44:56 +03:00
c34680d157 [#437] Rename getCORSAddress
This function is going to be reused
for lifecycles as well.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-07-26 06:54:10 +00:00
f5326b9f04 [#437] tree: Support removing old split system nodes
It's need to fit user expectation on deleting CORS for example.
Previously after removing cors (that was uploaded in split manner)
we can still get some data (from other node)
because deletion worked only for latest node version.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-07-26 06:54:10 +00:00
51c5c227c2 [#31] Add force bucket delete flag
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2024-07-25 14:04:54 +03:00
c506620199 [#439] Update SDK version
New SDK version supports extended log records in pool component during inner nodes health check.

Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-07-24 12:16:37 +00:00
6cb0026007 [#427] layer: Split FrostFS ReadObject to separate methods
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-07-23 16:53:59 +03:00
971006a28c [#422] Support separate container for CORS
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-07-23 12:33:29 +00:00
527e0dc612 [#421] docker: Fix warnings
Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-07-23 12:22:15 +00:00
3213dd7236 [#404] Add processing of encoding-type in versions listing
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-07-23 12:10:35 +00:00
a031777a1b Release v0.30.0
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-07-19 17:09:58 +03:00
b2a5da8247 [#430] Bump frostfs-api-go for latest stable marshaler
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-07-19 16:42:36 +03:00
ec349e4523 [#430] Adopt compatibility workarounds in Tree API
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-07-19 14:47:47 +03:00
977a20760b [#430] Delete all split version at once
Previously after split we can get two `null` versioned object with the same key
and deleting such key removes only one node/object.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-07-19 11:26:51 +03:00
2948d1f942 [#430] ci: Update go version
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-07-19 11:24:50 +03:00
c0011ebb8d [#430] tree: Fix multipart having system name
Previously if multipart key has the same name as some system node
(e.g. bucket-settings, bucket-cors etc.) it shadows real system node
and bucket started to be unversioned again for example.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-07-19 11:24:50 +03:00
456319d2f1 [#430] Fix split tree
Update tree service to fix split tree problem.
Tree intermediate nodes can be duplicated, so we must handle this.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-07-19 11:24:46 +03:00
1d965b23ab [#432] doc: Fix grammar mistakes in authentication
Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-07-17 17:08:48 +03:00
3b83de31d2 [#419] Update SDK version
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-07-08 12:15:24 +00:00
70eedfc077 [#414] authmate: Add register-user command
New command allows register user in frostfsid and
set allowed rules in policy contract

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-07-08 14:13:00 +03:00
f86b82351a [#398] Fix parameter parsing in bucket retryer
RetryStrategyExponential should use jitter backoff
instead of constant delay function

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-07-03 13:42:24 +03:00
465eaa816a [#372] Drop [e]ACL related code
Always consider buckets as APE compatible

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-07-01 16:58:44 +03:00
9241954496 [#372] authmate: Don't create creds with eacl table
Allow only impersonate flag.
Don't allow SetEACL container session token.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-07-01 16:26:21 +03:00
77f8bdac58 [#372] Drop kludge.acl_enabled flag
Now only APE container can be created using s3-gw

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-07-01 16:26:19 +03:00
91541a432d [#411] Check uniqueness in DeleteMultipleObjects
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-06-26 16:39:06 +03:00
943b30d9f4 [#411] Don't check object tags on deletion
By specification https://docs.aws.amazon.com/AmazonS3/latest/userguide/tagging-and-policies.html
we shouldn't check object tags on PUT and DELETE

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-06-26 16:38:56 +03:00
414f3943e2 [#410] Drop layer.Client interface
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-06-25 15:57:55 +03:00
9432782ce6 [#401] Drop notifications
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-06-25 15:49:37 +03:00
2b04fcb5ec [#406] Remove control api
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-06-21 06:36:56 +00:00
280d11c794 [#407] Don't set full_control for bucket owner
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-06-19 10:55:24 +03:00
ed34b2cae4 [#402] auth: Extend test coverage
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-06-14 10:06:00 +00:00
76f553d292 [#403] Set resource tags into resource properties
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-06-13 11:12:40 +03:00
1513a9252b [#403] go.mod: Update APE
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-06-13 11:12:35 +03:00
bb81afc14a [#398] Support retryer
Add two strategy for PutBucketSettings request retryer:
* exponential backoff (increasing up to `max_backoff` delays with jitter)
* constant backoff (always the same `max_backoff` delay between requests)

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-06-06 13:02:17 +00:00
58850f590e [#335] Improve determining AccessBox latest version
Signed-off-by: Anoke <rustamgta1011@gmail.com>
2024-06-06 12:35:48 +00:00
e25dc90c20 [#399] Add OPTIONS method for object operations
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-06-04 12:59:45 +00:00
71bae5cd9a [#400] Update frostfs-sdk-go version with support EC
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-06-04 12:22:06 +00:00
b5fae316cf [#396] Add user to response
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2024-06-04 09:37:55 +00:00
9f3ea470e6 [#395] Port changelog and prepare it for next release
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-05-27 14:27:11 +03:00
9787b29542 [#392] go.mod: Update APE to drop private IPs checking
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-05-27 11:26:15 +03:00
9152b084ec [#387] Fix typo
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-05-22 15:06:02 +00:00
21dbe3ea8e [#387] api: Add tests for middleware
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-05-22 15:06:02 +00:00
f4d174e740 [#387] middleware: Extend test coverage
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-05-22 15:06:02 +00:00
8a758293b9 [#387] middleware: Delete unused code
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-05-22 15:06:02 +00:00
fb521c7ac6 [#367] policy: Set IAM-MFA property to false by default
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-05-22 12:05:42 +03:00
87b9e97a80 [#354] Do not proceed on bucket remove error
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-05-17 20:38:39 +03:00
d62d8f3874 [#385] Support the renaming of ObjectRequest and ObjectContainer
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2024-05-14 16:51:36 +03:00
6bf6a3b1a3 [#362] Check user and groups during policy check
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-05-08 15:25:14 +03:00
2f108c9951 [#362] Expand control service
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-05-08 15:15:49 +03:00
c43ef040dc [#382] Fix request type determination
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-05-07 15:17:22 +03:00
2ab655b909 [#380] Add test for credentials versioning
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-05-03 07:24:13 +00:00
1c398551e5 [#380] creds: Increase test coverage
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-05-03 07:24:13 +00:00
db05021786 [#379] Add Iana CharsetReader for Oracle integration
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2024-04-25 17:44:38 +03:00
034396d554 [#377] Add check of Source IP
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-04-22 15:29:18 +03:00
3c436d8de9 [#365] Include iam user tags in query
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2024-04-22 10:47:43 +03:00
45f77de8c8 [#371] Add custom Source IP header configuration
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-04-22 07:42:45 +00:00
d903de2457 [#370] Fix fetching attributes from tree
Port TrueCloudLab/frostfs-s3-gw#374

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-04-19 17:33:55 +03:00
e22ff52165 [#367] Add check of AccessBox attributes
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-04-19 06:25:26 +00:00
5315f7b733 [#269] Create frostfsid wrapper with cache
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-04-18 09:32:30 +03:00
43a687b572 [#269] authmate: Update frostfsid using
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-04-17 12:11:23 +03:00
29a2dae40c [#269] Move frostfsid client to separate package
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-04-17 12:11:23 +03:00
fec3b3f31e [#269] Add frostfsid cache configuration
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-04-17 12:11:23 +03:00
7db89c840b [#368] Update vulnerable dependencies
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-04-17 11:29:09 +03:00
3ff027587c [#357] Add check of request and resource tags
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-04-17 07:06:58 +00:00
9f29fcbd52 [#353] docs: Add bucket policy docs
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-04-15 11:41:19 +03:00
8307c73fef [#364] Fix removing combined object
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-04-12 14:56:38 +03:00
d8889fca56 [#340] Fix encode object acl
In the process of encode the acl of an object,
we use a map. As a result, when traversing the
map, we can get a different sequence of permissions
each time. Therefore, a list is used instead of a map.

Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-04-11 09:28:30 +00:00
61ff4702a2 [#360] Reuse single target during policy check
Policy engine library is able to manage multiple
targets and resolve different status results.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-04-10 17:56:47 +03:00
6da1acc554 [#360] Use 'c' prefix for bucket policies instead of 'n'
With 'c' prefix, acl chains become shorter, thus gateway
receives shorter results and avoids sessions to neo-go.

There is still issue with many IAM rules.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-04-10 17:56:47 +03:00
3ea3f971e1 [#359] Update APE to allow put tombstone on delete object
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-04-10 15:12:30 +03:00
cb83f7646f [#347] port: Explicitly specify sorting order of subtree for object listing
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-04-09 18:57:47 +03:00
9c012d0a66 [#355] Remove policies when delete bucket
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-04-09 15:49:46 +00:00
bda014b7b4 [#355] Update frostfs-contract to terminate session iterator
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-04-09 15:49:46 +00:00
37d05dcefd [#353] Add check of listing parameters and versionID
Add properties in policy check:
* s3:delimiter
* s3:prefix
* s3:max-keys
* s3:VersionId

Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-04-08 17:57:55 +03:00
8407b3ea4c [#352] policy: Use iterators to list chains
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-04-04 12:51:12 +00:00
e537675223 [#341] Update CHANGELOG
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-04-03 12:04:48 +00:00
789464e134 [#341] Add "h2" as next proto to allow HTTP/2 requests in http.Serve
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-04-03 12:04:48 +00:00
a138f4954b [#341] Test HTTP/2 requests
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-04-03 12:04:48 +00:00
8669bf6b50 [#346] acl: Update APE and fix using
* Remove native policy when remove bucket policy
* Allow policies that contain only s3 compatible statements
(now deny rules cannot be converted to native rules)

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-04-02 12:43:04 +00:00
6b8095182e [#343] docs: Actualize s3 compatibility table
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-04-02 15:02:51 +03:00
348126b3b8 [#301] go.mod: Update sdk-go
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-03-28 09:13:27 +03:00
fbe7a784e8 [#301] Support GetBucketPolicyStatus
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-03-28 09:13:25 +03:00
bfcde09f07 [#291] server auto re-binding
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2024-03-27 14:28:50 +03:00
94bd1dfe28 [#334] Add auth doc
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-03-21 12:12:29 +03:00
80c7b73eb9 [#306] In APE buckets forbid canned acl except private
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-03-19 16:57:26 +03:00
62cc5a04a7 [#328] Log error on failed response writing
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-03-15 11:02:26 +03:00
6788306998 [#328] Log invalid tree service KVs
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-03-04 15:35:23 +03:00
4ee3648183 [#328] Log invalid lock enabled header
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-03-04 15:09:51 +03:00
ee48d1dc85 [#325] Log error on failed request id generation
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-03-04 09:49:41 +00:00
f958eef2b3 [#325] Use default empty data.LockInfo in get/head in case of error
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-03-04 09:49:41 +00:00
81b44ab3d3 [#325] Fix mutex usage in controller
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-03-04 09:49:41 +00:00
623001c403 [#325] Close listener on error
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-03-04 09:49:41 +00:00
70043c4800 [#324] Close nns resolver after use
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-03-04 09:06:26 +00:00
8050ca2d51 [#306] Use session token for container read operations
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-03-01 18:14:33 +03:00
c12e264697 [#306] Simplify cid resolver for metrics
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-03-01 17:46:16 +03:00
e9f38a49e4 [#306] Fix forming key for bucket cache
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-03-01 16:09:40 +03:00
fabb4134bc [#318] Use log msg from constants
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-29 17:30:28 +03:00
e1ee36b979 [#318] Fix tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-29 17:30:28 +03:00
937367caaf [#318] Fix panic on invalid multipart form
Previously, simple 'curl -X POST http://localhost:8084/test' leads to panic because of wrong handle matching

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-29 17:30:28 +03:00
7b86bac6ee [#318] Log unmatched requests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-29 17:30:28 +03:00
529ec7e0b9 [#318] Don't log empty bucket/name
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-29 17:30:28 +03:00
4741e74210 [#318] Log successfully authenticated accessKeyIDs
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-29 17:30:28 +03:00
f1470bab4a [#318] auth: Add context for logged errors
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-29 17:30:28 +03:00
6e5bcaef97 [#318] Log policy request checking
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-29 17:30:28 +03:00
1522db05c5 [#318] Log namespace for requests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-29 17:30:28 +03:00
31da31862a [#300] Update error logging in DeleteMultipleObjects
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-02-29 14:24:32 +00:00
7de1ffdbe9 [#306] Fix billing tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-28 18:00:27 +03:00
3285a2e105 [#306] policy: Change default access strategy
Use access strategy based on bucket type and/or config flags.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-28 17:53:13 +03:00
1bfea006b0 [#306] Update APE
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-28 17:50:08 +03:00
56b50f2075 [#306] Remove flag to disable policy contract
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-28 17:50:08 +03:00
8f89f275bd [#306] Save bucket policy as native chain
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-28 17:50:08 +03:00
ff15f9f28a [#306] Fix update settings for buckets without owner key in tree
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-28 17:50:08 +03:00
c868af8a62 [#306] Add flag to enable old ACL bucket creation
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-28 17:50:08 +03:00
bac1b3fb2d [#306] Use zero basic acl to mark APE containers
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-28 17:50:08 +03:00
c452d58ce2 [#306] Reduce number of policy contract invocations
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-28 17:50:08 +03:00
499a202d28 [#306] Update CHANGELOG.md
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-28 17:50:08 +03:00
d9d12debc3 [#306] Add tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-28 17:50:08 +03:00
3d0d2032c6 [#306] acl: Handle put/get acl for APE buckets
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-28 17:50:08 +03:00
1f2cf0ed67 [#306] Use APE instead of eACL on bucket creation
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-28 17:50:08 +03:00
37be8851b3 [#306] Simplify namespaces configuration
Resolve ns alias at the beginning of the request just once.
Keep in ns map only one default ns key.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-28 17:50:08 +03:00
c4c199defe [#306] Use s3 as chain id prefix to be consistent
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-28 17:50:08 +03:00
2981a47e99 [#321] Use correct owner id in billing metrics
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-02-28 14:52:44 +03:00
391fc9cbe3 [#311] Change object owner for anonymous put
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-02-21 15:03:16 +00:00
4eb2c7fb7d [#290] Fix TestErrorTimeoutChecking test
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-20 11:39:49 +00:00
563c1d9bd7 [#308] Fix linter issues
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-02-16 18:25:06 +03:00
0f3b4ab0ed [#308] Update linter versions
Latest golangci-lint has newer x/tools version and
it is incompatible with internal linter.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-02-16 18:24:53 +03:00
bd8d2d00ba [#313] logger: Fix logging level changing for journald
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2024-02-16 17:44:16 +03:00
cc34f659d1 [#305] Extract removal checking into separate method
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-09 09:44:24 +03:00
924e87face [#305] Support checking if accessbox was removed
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-09 09:41:48 +03:00
5121c73d3f [#307] Update APE
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-07 12:24:13 +03:00
c334adeb6d [#165] Sort nodes in ServiceClientMemory
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:53:12 +03:00
a74d498df2 [#165] Return sort after HEAD in listing
We have to sort object after HEAD because we make request in different goroutines,
so the order is not deterministic.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:15:08 +03:00
69297a4a38 [#165] Delete object from tree in case of storage error
Extend storage node errors in case of which we continue deleting from tree
with 'object not found' error

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:15:08 +03:00
71d82d1cc8 [#165] Fix lint issues
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:15:08 +03:00
fafe4af529 [#165] Fix real object size in listing
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:15:03 +03:00
88f1acbdfc [#165] Cancel context in outdated list session
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
4e15452853 [#165] Fix lint errors
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
da642a498a [#165] Listing fix data race
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
de32dfd7ce [#165] Rename SubTreeStreamImpl to SubTreeStreamMemoryImpl
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
8a30f18ff6 [#165] Don't use recursion in list streaming
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
2d7973b3f1 [#165] Refactor list versions
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
6d52f46012 [#165] Fix v1 listing bookmark
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
0ae49eaab0 [#165] Generalize allObjectListingParams
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
093de13f54 [#165] Add stream listing tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
cf4fc3b602 [#165] Extend error on getting listing containers not in current namespace
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
739a6ec9df [#165] Support latest only stream listing
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
c7ee628ab0 [#165] Fix versions listing
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
4ad84b9b94 [#165] listing: Use NodeVersion instead of ObjectInfo
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
3e20f736a6 [#165] Move listing function to one file
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
b52552e8c2 [#165] Add batching in streamin listing
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
6e8960b2ab [#165] Add list session cache
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
29ac91dfd5 [#165] Support streaming listing
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 16:09:48 +03:00
84af85ed67 [#302] Update APE to support chain id as bytes
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-02-02 12:53:45 +03:00
4804904d9d [#298] journald update version
We want to have less useless fields in logs

Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2024-01-29 12:49:22 +00:00
e23cc43824 [#299] Drop unused legacy minio code
736d8cbac4 (diff-f5a8931b4d5f3b7f583e4cd719bfd2904980518a6f338d463ec76aea814db772)
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-01-29 12:57:18 +03:00
12434d5f4d [#297] .forgejo: Check only PR commits in dco-go
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-01-26 15:11:52 +00:00
2e870e99c7 [#297] .forgejo: Update dco-go to v3
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-01-26 15:11:52 +00:00
45e025320f [#296] Port v0.28.1 release changelog
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-01-24 17:50:24 +03:00
eae49908da [#292] authmate: Support custom attributes
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-01-22 10:02:43 +03:00
c32220762f [#288] Fix possibility of panic during SIGHUP
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-01-09 10:53:54 +03:00
6f9ee3da76 [#275] Change logic delete multipart upload
In order not to accidentally take outdated
information about downloaded parts from other
nodes, now when the multipart is abort or complete,
the root node of the multipart upload with the
finish flag remains in the tree.

Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2023-12-27 13:06:45 +03:00
08019f1574 [#280] Add put requests to duration metric
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-12-22 16:33:05 +03:00
899213b3f3 [#287] Support proxy for frostfsid and policy contracts
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-22 15:45:10 +03:00
3b6d2bc522 [#287] authmate: Support frostfsid proxy and namespace
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-22 15:43:14 +03:00
6509639540 [#283] control: Changed type of chainID to bytes
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-21 18:07:17 +03:00
5698d5844e [#283] Support frostfsid groups in policy request checking
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-21 14:21:36 +03:00
43cae9ee04 [#248] Correct object versions response markers
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-21 10:53:50 +03:00
a17ff66975 [#282] policy: Use prefixes to distinguish s3/iam actions/resources
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-20 10:41:15 +03:00
38c5503a02 [#261] alc: Remove unused
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-18 15:49:58 +03:00
8273af8bf8 [#261] Make PutBucketPolicy handler use policy contract
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-18 15:49:54 +03:00
6dbb07f0fa [#261] Update policy-engine dependency
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-14 17:52:13 +03:00
340e6b807b [#266] Update params that being reloaded in one place
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-14 16:21:09 +03:00
0850d21ff3 [#266] Move trimming namespaces name into fetch function
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-14 16:12:10 +03:00
9272f4e108 [#259] Support contract based policies
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-11 10:01:46 +03:00
be6a37ada5 [#262] Support configuring max tree request attempts
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-12-07 16:41:21 +03:00
836874a761 [#262] Set tree request id
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-07 16:40:56 +03:00
7a285d1464 [#267] Update api-go to fix stable marshal of empty structs
Newer version of api-go does not ignore non-nil empty
structures in protobuf messages, so compatibility with
previous version is preserved.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-12-07 16:40:56 +03:00
f58a0d04ff [#233] Clean tag node in the tree service instead of removal
With new retry policy of tree service pool, gateway should avoid
deletion of system nodes from tree. Absence of node in the tree
will trigger retry. Other storage in the network may return already
deleted node while tree is not completely synced, and client will
get unexpected result.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-12-07 16:40:56 +03:00
94c2674f44 [#233] Update tree service pool in SDK
Tree service pool now produces retries if
tree service returns 'not found' errors
or empty result on 'GetNodeByPath'

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-12-07 16:40:53 +03:00
6b1b43a364 Release v0.28.0
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-12-07 15:47:31 +03:00
43abf58068 [#257] Support flag to deny access if policy rules not found
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-06 17:47:58 +03:00
ca15acf1d3 [#257] router: Use named constants
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-06 17:47:58 +03:00
473239bf36 [#257] Add policy checker
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-06 17:47:51 +03:00
93cf7c462b [#271] Add namespace label to billing metrics
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-12-04 14:54:40 +03:00
6c5f9b2764 [#266] Fix namespace config initialization
Don't use nil Namespaces map in case when file isn't provided or invalid

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-04 09:40:47 +00:00
42862fd69e [#258] Support policy management in control svc
Add PutPolicies, RemovePolicies, GetPolicy, ListPolicies methods

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-01 15:47:12 +03:00
c7a65bd075 [#258] Add control service
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-12-01 14:17:06 +03:00
28c6bb4cb8 [#266] Support per namespace placement policies configuration
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-11-28 16:12:42 +03:00
0db6cd6727 [#268] Add dirty version check
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-11-28 10:42:49 +00:00
ff1ec56d24 [#260] Use namespace as domain when create bucket
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-11-27 15:30:12 +03:00
9ebfca654b [#260] Update CHANGELOG.md
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-11-23 11:00:19 +03:00
055cc6a22a [#260] Use namespace as domain when resolve bucket
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-11-23 11:00:11 +03:00
a61ff3b8cb [#260] authmate: Support key registration in frostfsid contract
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-11-23 11:00:11 +03:00
6304d7bfda [#260] Support frostfsid validation
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-11-23 11:00:11 +03:00
cf7254f8cd [#260] Refactor api/auth/center.go
Move the Center interface to middleware package where it's used

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-11-23 11:00:09 +03:00
861454e499 [#218] Add check content sha256 header
The X-Amz-Content-Sha256 header check is done only for unencrypted payload.

Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2023-11-22 11:33:52 +00:00
b28ecef43b [#219] Return ETag in quotes
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-11-22 11:12:32 +00:00
406075aebb [#236] Add support zapjournald logger configuration
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2023-11-13 16:31:11 +03:00
fe796ba538 [#217] Consider Copy-Source-SSE-* headers during copy
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-11-13 13:22:58 +00:00
5ee73fad6a [#248] Correct NextVersionIDMarker in listing versions
Despite the spec https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectVersions.html#API_ListObjectVersions_ResponseElements
says that
"When the number of responses exceeds the value of MaxKeys,
NextVersionIdMarker specifies the first object version not returned
 that satisfies the search criteria. Use this value for the
 version-id-marker request parameter in a subsequent request."
 the actual behavior of AWS S3 is returning NextVersionIdMarker as the last returned object version

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-10-31 17:36:24 +03:00
890a8ed237 [#227] Add versionID header after complete multipart
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-10-31 14:07:08 +00:00
0bed25816c [#224] Add conditional escaping for object name
Chi gives inconsistent results in terms of whether
the strings returned are URL coded or not
See:
* https://github.com/go-chi/chi/issues/641
* https://github.com/go-chi/chi/issues/642

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-10-31 13:58:51 +00:00
b169c5e6c3 [#239] Update test for check goroutines leak
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-10-31 13:51:23 +00:00
122af0b5a7 [#220] Support configuring web server timeout params
Set IdleTimeout and ReadHeaderTimeout to `30s`.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-10-31 13:48:08 +00:00
cf13aae342 [#225] Add default storage class to responses
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-10-31 13:37:07 +00:00
0938d7ee82 [#226] Fix status code in GET/HEAD delete marker
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-10-27 10:58:57 +03:00
4f5f5fb5c8 [#222] Fix marshaling errors in DeleteObjects method
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-10-25 14:54:02 +00:00
25bb581fee [#205] Add md5 checksum in header
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-10-25 11:04:19 +03:00
8d6aa0d40a [#243] Fix list object versions marker param
According to https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectVersions.html
we have to use `key-marker`

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-10-18 10:35:47 +03:00
7e91f62c28 [#223] Add store content language
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2023-10-17 14:42:02 +00:00
01323ca8e0 [#216] Add check tag key uniqueness
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2023-10-17 14:40:29 +00:00
298662df9d [#221] Expand xmlns field ignore
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-10-13 16:21:13 +03:00
10a03faeb4 [#197] Update CHANGELOG.md
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-10-11 12:32:48 +00:00
65412ce1d3 [#197] Configure buffer max size for PUT
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-10-11 12:32:48 +00:00
7de73f6b73 [#197] Disable homomorphic hash for PUT
Disable TZ hash for PUT if it's disabled for container itself

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-10-11 12:32:48 +00:00
8fc9d93f37 [#197] Update SDK
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-10-11 12:32:48 +00:00
7301ca52ab [#154] Rename OwnerPublicKey to SeedKey
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2023-10-06 14:00:37 +03:00
e1ec61ddfc [#215] Fix get latest version node
When the object version is received,
the node of the secondary object may return.
Now we choose the right node ourselves.

Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2023-10-06 09:21:41 +00:00
e3f2d59565 [#154] Rename access key to secret key
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2023-10-06 09:20:39 +00:00
c4af1dc4ad [#171] Update message error auth header malformed
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2023-10-04 11:13:12 +00:00
b8c93ed391 [#172] Convert handler config to interface
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-10-04 11:01:27 +00:00
51e591877b [#207] Fix list parts with empty list
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-09-21 11:27:20 +03:00
a4c612614a [#210] Fix multipart object reader
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-09-19 16:30:08 +03:00
12cf29aed2 [#207] Fix part-number-marker handling
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-09-19 12:43:07 +03:00
16840f1256 [#177] Add release instructions page
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-09-07 12:32:12 +00:00
066b9a0250 [#142] Add trace ID into log when tracing is enabled
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-09-07 14:19:37 +03:00
54e1c333a1 [#152] authmate: Add basic error types and exit codes
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2023-09-06 23:56:56 +03:00
69227b4845 [#199] Add metrics for HTTP endpoint status
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-09-05 13:30:27 +00:00
c66c09765d [#196] Support soft memory limit setting
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2023-09-05 13:13:56 +00:00
9120e97ac5 [#203] Add go1.21 to CI
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-08-31 15:26:07 +03:00
2fc328a6d2 [#195] Add log constants linter
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-08-28 12:58:44 +03:00
b5fce5c8d2 [#168] Skip only invalid policies and copies instead of ignoring all of them
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-08-25 12:05:30 +03:00
41a128b1aa [#185] Update CHANGELOG.md
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-08-24 18:03:11 +03:00
6617adc22b [#185] Use correct object size when object is combined or encrypted
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-08-24 18:03:08 +03:00
631d9d83b6 [#185] Fix payload reader
When we use io.CopyBuffer it check for exact io.EOF matching,
so we need keep original EOF error otherwise io.CopyBuffer returns error

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-08-24 18:01:08 +03:00
adec93af54 [#185] tree: Fix getSubTreeMultipartUploads
Every tree node contains only FileName
but key in multipart info must contain FilePath

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-08-24 18:01:08 +03:00
8898c2ec08 [#185] Add tests for list multipart uploads
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-08-24 18:01:08 +03:00
8efcc957ea [#96] Move log messages to constants
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2023-08-23 18:32:31 +03:00
6b728fef87 [#192] Add tests to make sure client_cut flag is passed to sdk
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-08-23 06:26:55 +00:00
6b1f365e65 [#192] Support client cut
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-08-23 06:26:55 +00:00
fcf1c45ad2 [#188] Fix url escaping
Url escaping has already been done in `net/http/request.go`

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-08-22 11:27:39 +03:00
6a9d3261a7 [#117] Refactor fetch/parse config parameters functions
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2023-08-22 08:05:26 +00:00
012ece40bb [#180] Fix linter issues
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-08-21 17:23:24 +03:00
c750c87a61 [#51] metrics: Add a metric of currently used nodes
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2023-08-17 14:26:25 +03:00
94a42fa273 [#51] Update frostfs-sdk-go
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2023-08-17 14:26:25 +03:00
40d7f844e3 [#137] Refactor context data retrievers
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2023-08-16 14:05:38 +00:00
52b89d3497 [#153] Add labels in metrics of total bytes
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-08-16 10:30:40 +03:00
6b109eee92 [#182] Fix parsing signed headers in presigned urls
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-08-08 13:04:53 +03:00
18878b66d3 [#175] Use gate owner as object owner
This is required because node check session token owner
TrueCloudLab/frostfs-node#528
For client cut TrueCloudLab/frostfs-sdk-go#114
such owner will be gate owner

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-08-08 12:22:33 +03:00
46eae4a356 [#179] Fix GetSubTree failures with updated SDK
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-08-02 14:56:23 +00:00
fe897ec588 [#178] wrapReader: Fix goroutine leak
In case of error in FrostFS.CreateObject wrapped reader
can be blocked because of synchronous pipe. We have to read out all payload in such case.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-08-01 17:59:34 +03:00
52931663e1 [#176] multipart: Replace part on re-upload
We want to have exactly one object and tree node for each part number

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-21 16:20:04 +03:00
1a09041cd1 [#63] Simplify multiObjectReader and add tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 14:00:15 +03:00
631b7f67b4 [#63] multipart: Log upload id for every failed request
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 14:00:15 +03:00
8ca2998297 [#63] Update CHANGELOG.md
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 14:00:15 +03:00
bcf5a85aab [#63] multipart: Fix copying
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 14:00:15 +03:00
ad81b599dd [#63] Add fast multipart upload
Add new flag to object tree meta `isCombined` that means
the object payload is list of parts that forms real payload.
Set this attribute when complete multipart upload not to do unnecessary copying.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 14:00:15 +03:00
361d10cc78 [#174] Fix query for listing multipart uploads
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 12:30:17 +03:00
62e6b49254 [#174] Log unmatched requests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 12:30:17 +03:00
80c4982bd4 [#174] Add router tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 12:30:17 +03:00
73ed3f7782 [#174] Fix router filter query matching
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 12:30:17 +03:00
6e3595e35b [#174] Fix object keys with slashes in chi
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 12:30:17 +03:00
57add29643 [#173] Use forked actions in workflow
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-07-18 15:18:34 +03:00
b59aa06637 [#146] Add kludge.bypass_content_encoding_check_in_chunks flag
Flag allows to skip checking `Content-Encoding` for `aws-chunked` value

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-18 14:49:52 +03:00
d62aa7b979 [#146] Fix preconditions: trim quotes in etags
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-18 14:49:03 +03:00
751a9be7cc [#146] Move getting chunk payload reader to separate function
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-18 14:49:03 +03:00
e58ea40463 Release v0.27.0
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-07-14 10:53:28 +03:00
14ef9ff091 [#158] Separate init object reader from read itself
To be able to handle cases and return appropriate http status code
when object missed in storage but gate cache contains its metadata
we need write code after init object reader.
So we separate init reader from actual reading.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-11 17:32:05 +03:00
fc90981c03 [#149] Update inner imports after moving middlewares
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-11 17:25:09 +03:00
83cdfbee78 [#149] Move middlewares to separate package
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-11 17:25:09 +03:00
37f2f468fe [#149] Add host bucket router
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-11 17:25:09 +03:00
e30a452cdb [#149] Use chi instead of gorilla mux
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-11 17:25:09 +03:00
7be70243f7 [#166] Update sdk to support grpc schemes in tree pool
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-11 17:25:03 +03:00
7f708b3a2d [#111] auth: Get log from real request context
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-06 11:57:04 +00:00
d531b13866 [#143] Add more context to some s3 errors
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-30 12:08:33 +03:00
f921bc8af5 [#143] Fix typo
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-30 12:04:52 +03:00
be03c5178f [#143] Fix NoSuchKey error on get/head
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-30 12:04:52 +03:00
499f4c6495 [#155] metrics: Use default registerer for app metrics
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-28 12:00:47 +03:00
2cbe3b9a27 [#131] Update docs
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
c588d485fa [#131] authmate: Add update-secret cobra cmd
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
3927223bb0 [#131] authmate: Add generate-presigned-url cobra cmd
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
eba85b50b6 [#131] authmate: Add obtain-secret corba cmd
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
043447600e [#131] authmate: Add issue-secret cobra cmd
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
0cd353707a [#131] authmate: Make authmate use cobra
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
f74ab12f91 [#131] authmate: Add agent.UpdateSecret
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
dea7b39805 [#131] Fix session token limit by container
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
9df8695463 [#143] Fix transformToS3Error function
Unwrap error before checking for s3 error

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-21 17:16:40 +03:00
614d703726 [#106] Add chunk uploading
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2023-06-21 10:23:57 +03:00
23593eee3d [#111] Use request scope logger
Store child zap logger with request scope fields into context.
Request scoped fields: request_id, api/method, bucket, object

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-19 13:54:51 +03:00
dfc4476afd [#135] authmate: Update docs
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-16 09:42:49 +03:00
84358f6742 [#135] authmate: Support CRDT GSet for credentials
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-16 09:42:49 +03:00
7a380fa46c [#135] frostfs: Add SEARCH operation
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-16 09:42:41 +03:00
0590f84d68 [#135] crdt: Add GSet
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-16 09:39:43 +03:00
751d66bde0 [#140] Use gRPC interceptor from observability package
Previous SDK implementation had implicit gRPC interceptor
for tracing. Now pool constructors allow any dial options,
so gateway should manually pass tracing interceptors from
observability package.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-14 15:46:43 +00:00
462589fc0c [#103] Return 504 http code on timeout errors
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-14 09:16:21 +03:00
8fcaf76f41 [#132] authmate: Add bearer token to obtain-secret result
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-13 15:14:19 +00:00
19c89b38e6 [#133] Update docs
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-09 09:34:49 +03:00
0bcda6ea37 [#133] Drop sync-tree
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-09 09:34:36 +03:00
9dabaf6ecd [#133] Use tree pool from SDK
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-09 09:34:04 +03:00
840d457cb9 [#2] Keep issue templates in .github
The only thing supported in .forgejo dir is a workflows.
It might be useful to keep .github dir for repo mirrors.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-07 15:35:57 +00:00
c2fae4c199 [#2] Update CONTRIBUTING
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-07 15:35:57 +00:00
9e0c39dcc2 [#2] Return shields
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-07 15:35:57 +00:00
e4b1d07185 [#2] Update CHANGELOG
Replace changelog history before the fork
with the link to the fork source.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-07 15:35:57 +00:00
3b5fcc3c8a [#2] Update CODEOWNERS
Codeownders feature isn't supported yet in forgejo nor
gitea. Looking for github.com/go-gitea/gitea/pull/24910

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-07 15:35:57 +00:00
c75add64ec [#124] Add govulncheck in CI
Check dependency issues on every PR.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-06 18:00:31 +03:00
acb6e8cbca [#127] Increase golangci-lint timeout
For slow actions runners.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-06 08:11:36 +00:00
4e1fd9589b [#84] add tracing support
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2023-06-06 10:25:27 +03:00
bd898ad59e [#125] Update CHANGELOG.md
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-02 14:16:19 +00:00
43bee561cf [#125] Fix trailing whitespaces
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-02 14:16:19 +00:00
4a6e3a19ce [#125] Handle negative Content-Length on put
Add computing actual object size during calculating hash on put.
Use this actual value to save in tree and cache

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-02 14:16:19 +00:00
b445f7bbf9 [#125] api/auth: DisableURIPathEscaping for presign
Don't use escaping when presign url.
Escape manually before.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-02 14:16:19 +00:00
868edfdb31 [#78] Add test of bucket removal with object not found error
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-01 16:13:28 +03:00
34bbbcf1ed [#78] Do not show objects missing from object service
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-01 16:13:28 +03:00
fae03c2b50 [#78] Process 'not found' error when object exists in tree
When object exists in tree but missing in storage, we can't remove
bucket. While storage node does not sync tree service and object
service, the only way to delete such broken bucket is to ignore
'object not found' error, clear cache and do not include missing
objects in the listing result.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-01 15:31:22 +03:00
81e860481d [#122] Fix linter warnings
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-05-31 19:43:03 +03:00
e97fea30ea [#122] Update actions for forgejo ci
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-05-31 19:43:00 +03:00
cfc94e39ef [#122] Move files from .github to .forgejo
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-05-31 19:42:58 +03:00
ce9294685c [#101] Update docs for frostfs.set_copies_number
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-05-25 09:11:09 +00:00
a0f0d792b8 [#78] layer: Clean up already removed object from tree
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2023-05-24 17:42:46 +03:00
43e336e155 [#118] go.mod: Update min go version to 1.19
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-05-23 17:32:05 +03:00
9f186d9aba [#104] app: Reload copies numbers on SIGHUP
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2023-05-23 13:19:58 +03:00
11f30a037b [#114] tree: Fix retry tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-05-23 11:35:44 +03:00
136a186c14 [#114] tree: Don't ignore unhealthy endpoints
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-05-19 17:22:39 +03:00
24390fdec8 [#110] tree: Add more logs for switching tree endpoints
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-05-19 17:20:38 +03:00
1fdbfb0dab [#110] tree: Update errors to switch endpoint
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-05-19 17:20:34 +03:00
1406f57bba [#1] Update comment lines
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-05-19 07:30:04 +00:00
3fbad97ae4 [#1] Rename files with mentions of previous project
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-05-19 07:30:04 +00:00
fa5889a0f5 [#94] Update prometheus to v1.15.0
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2023-05-18 17:36:32 +03:00
e24bc3f2ce [#101] app: Refactor the default copies number setting
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2023-05-17 11:36:28 +03:00
bf47978cfe [#105] Update SDK to fix impersonated token
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-05-05 18:11:33 +03:00
Denis Kirillov
b366e75366 [#81] Use impersonate bearer token
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-05-03 17:22:52 +03:00
e487ee5b7d [#70] Add arrays of copies numbers for location constraints
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2023-05-03 13:48:26 +03:00
69d8779daf [#74] tree: Simplify retry
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-26 16:39:57 +03:00
9da77667f3 [#74] Add round tree retry
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-26 16:39:57 +03:00
f200dd310e [#74] Update docs
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-26 16:39:57 +03:00
ee8cce662b [#74] service/tree: Add logger
Log error instead of failing when multiple unversioned nodes are found

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-26 16:39:57 +03:00
ca8791a5fd [#74] Support multiple tree endpoints
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-26 16:39:57 +03:00
2ab6f004f1 Makefile: Add syncTree folder clean up to 'make clean'
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2023-04-21 17:09:53 +03:00
3da2d40fa8 syncTree: Update file filter to 'frostfs'
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2023-04-21 17:08:37 +03:00
1cbc7f323f [#80] metrics: Use map for constant labels
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-20 11:14:52 +03:00
c154f934e4 [#80] Add type to metrics description
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-20 11:14:52 +03:00
644524e8a5 [#80] metrics: Make global description unexported
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-20 11:14:52 +03:00
ee6118c3d7 [#80] Update CHANGELOG.md
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-20 11:14:52 +03:00
9e72fe1662 [#80] Refactor metrics, support dump descriptions
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-20 11:14:52 +03:00
70ec5a0a5b [#83] Don't create extra delete marker
We shouldn't create delete marker if:
1. object doesn't exist at all
2. last version is already a delete marker

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-19 17:56:11 +03:00
4a8c382491 [#91] Update values for health metric
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-18 10:25:11 +03:00
299 changed files with 39928 additions and 14192 deletions

View file

@ -1,13 +1,14 @@
FROM golang:1.19 as builder
FROM golang:1.22 AS builder
ARG BUILD=now
ARG REPO=git.frostfs.info/TrueCloudLab/frostfs-s3-gw
ARG VERSION=dev
ARG GOFLAGS=""
WORKDIR /src
COPY . /src
RUN make
RUN make GOFLAGS=${GOFLAGS}
# Executable image
FROM alpine AS frostfs-s3-gw

View file

@ -1,3 +1,3 @@
.git
.cache
.github
.forgejo

View file

Before

Width:  |  Height:  |  Size: 5.5 KiB

After

Width:  |  Height:  |  Size: 5.5 KiB

View file

@ -0,0 +1,27 @@
on:
pull_request:
push:
branches:
- master
jobs:
builds:
name: Builds
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '${{ matrix.go_versions }}'
- name: Build binary
run: make
- name: Check dirty suffix
run: if [[ $(make version) == *"dirty"* ]]; then echo "Version has dirty suffix" && exit 1; fi

View file

@ -0,0 +1,20 @@
on: [pull_request]
jobs:
dco:
name: DCO
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
with:
from: 'origin/${{ github.event.pull_request.base.ref }}'

View file

@ -0,0 +1,45 @@
on:
pull_request:
push:
branches:
- master
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
cache: true
- name: Install linters
run: make lint-install
- name: Run linters
run: make lint
tests:
name: Tests
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '${{ matrix.go_versions }}'
- name: Update Go modules
run: make dep
- name: Run tests
run: make test

View file

@ -0,0 +1,25 @@
on:
pull_request:
push:
branches:
- master
jobs:
vulncheck:
name: Vulncheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Run govulncheck
run: govulncheck ./...

1
.github/CODEOWNERS vendored
View file

@ -1 +0,0 @@
* @alexvanin @KirillovDenis

View file

@ -1,73 +0,0 @@
name: Builds
on:
pull_request:
branches:
- master
- 'support/*'
types: [ opened, synchronize ]
paths-ignore:
- '**/*.md'
jobs:
build_cli:
name: Build CLI
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: 1.19
- name: Restore Go modules from cache
uses: actions/cache@v2
with:
path: /home/runner/go/pkg/mod
key: deps-${{ hashFiles('go.sum') }}
- name: Get tree-service client
run: make sync-tree
- name: Update Go modules
run: make dep
- name: Build CLI
run: make
- name: Check version
run: if [[ $(make version) == *"dirty"* ]]; then exit 1; fi
build_image:
needs: build_cli
name: Build Docker image
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.19
- name: Restore Go modules from cache
uses: actions/cache@v2
with:
path: /home/runner/go/pkg/mod
key: deps-${{ hashFiles('go.sum') }}
- name: Get tree-service client
run: make sync-tree
- name: Update Go modules
run: make dep
- name: Build Docker image
run: make image

View file

@ -1,67 +0,0 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ master, 'support/*' ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master, 'support/*' ]
schedule:
- cron: '35 8 * * 1'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

View file

@ -1,22 +0,0 @@
name: DCO check
on:
pull_request:
branches:
- master
- 'support/*'
jobs:
commits_check_job:
runs-on: ubuntu-latest
name: Commits Check
steps:
- name: Get PR Commits
id: 'get-pr-commits'
uses: tim-actions/get-pr-commits@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: DCO Check
uses: tim-actions/dco@master
with:
commits: ${{ steps.get-pr-commits.outputs.commits }}

View file

@ -1,96 +0,0 @@
name: Tests
on:
pull_request:
branches:
- master
- 'support/*'
types: [opened, synchronize]
paths-ignore:
- '**/*.md'
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Get tree-service client
run: make sync-tree
- name: golangci-lint
uses: golangci/golangci-lint-action@v2
with:
version: latest
cover:
name: Coverage
runs-on: ubuntu-20.04
env:
CGO_ENABLED: 1
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.19
- name: Restore Go modules from cache
uses: actions/cache@v2
with:
path: /home/runner/go/pkg/mod
key: deps-${{ hashFiles('go.sum') }}
- name: Get tree-service client
run: make sync-tree
- name: Update Go modules
run: make dep
- name: Test and write coverage profile
run: make cover
- name: Upload coverage results to Codecov
uses: codecov/codecov-action@v1
with:
fail_ci_if_error: false
path_to_write_report: ./coverage.txt
verbose: true
tests:
name: Tests
runs-on: ubuntu-20.04
strategy:
matrix:
go_versions: [ '1.18.x', '1.19.x' ]
fail-fast: false
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: '${{ matrix.go_versions }}'
- name: Restore Go modules from cache
uses: actions/cache@v2
with:
path: /home/runner/go/pkg/mod
key: deps-${{ hashFiles('go.sum') }}
- name: Get tree-service client
run: make sync-tree
- name: Update Go modules
run: make dep
- name: Run tests
run: make test

3
.gitignore vendored
View file

@ -2,9 +2,6 @@
.idea
.vscode
# Tree service
internal/frostfs/services/tree/
# Vendoring
vendor

View file

@ -4,7 +4,7 @@
# options for analysis running
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 5m
timeout: 15m
# include test files or not, default is true
tests: true
@ -12,7 +12,8 @@ run:
# output configuration options
output:
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
format: tab
formats:
- format: tab
# all available settings of specific linters
linters-settings:
@ -24,6 +25,16 @@ linters-settings:
govet:
# report about shadowed variables
check-shadowing: false
custom:
truecloudlab-linters:
path: bin/external_linters.so
original-url: git.frostfs.info/TrueCloudLab/linters.git
settings:
noliteral:
enable: true
target-methods: ["Fatal"]
disable-packages: ["codes", "tc"]
constants-package: "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
linters:
enable:
@ -45,6 +56,7 @@ linters:
- gofmt
- whitespace
- goimports
- truecloudlab-linters
disable-all: true
fast: false
@ -54,3 +66,6 @@ issues:
- EXC0003 # test/Test ... consider calling this
- EXC0004 # govet
- EXC0005 # C-style breaks
exclude-dirs:
- api/auth/signer/v4asdk2
- api/auth/signer/v4sdk2

View file

@ -30,13 +30,20 @@ repos:
hooks:
- id: shellcheck
- repo: https://github.com/golangci/golangci-lint
rev: v1.51.2
hooks:
- id: golangci-lint
- repo: local
hooks:
- id: make-lint-install
name: install linters
entry: make lint-install
language: system
pass_filenames: false
- id: make-lint
name: run linters
entry: make lint
language: system
pass_filenames: false
- id: go-unit-tests
name: go unit tests
entry: make test

View file

@ -4,433 +4,349 @@ This document outlines major changes between releases.
## [Unreleased]
## [0.31.1] - 2024-11-28
### Fixed
- Clean up List and Name caches when object is missing in Tree service (#57)
- Get empty bucket CORS from frostfs (TrueCloudLab#36)
- Don't count pool error on client abort (#35)
- Ignore precondition headers with invalid date format (#563)
- MD5 calculation of object-part with SSE-C (#543)
## [0.31.0] - Rongbuk - 2024-11-20
### Fixed
- Docker warnings during image build (#421)
- `PartNumberMarker` in ListMultipart response (#451)
- PostObject handling (#456)
- Tag logging errors (#452)
- Removing of duplicated parts in tree service during split brain (#448)
- Container resolving (#482)
- FrostFS to S3 error transformation (#488)
- Default bucket routing (#507)
- encoding-type in ListBucketObjectVersions (#404)
- SIGHUP support for `tracing.enabled` config parameter (#520)
- `trace_id` parameter in logs (#501)
- Listing marker processing (#539)
- Content-MD5 header check (#540)
- Precondition check (#538)
- Bucket name check during all S3 operations (#556)
### Added
- Return `X-Owner-Id` in `head-bucket` response (#79)
- Return container name in `head-bucket` response (TrueCloudLab#18)
- Billing metrics (TrueCloudLab#5)
- Multiple configs support (TrueCloudLab#21)
- Bucket name resolving policy (TrueCloudLab#25)
- Support string `Action` and `Resource` fields in `bucketPolicy.Statement` (TrueCloudLab#32)
- Add new `kludge.use_default_xmlns_for_complete_multipart` config param (TrueCloudLab#40)
- Support for separate container for all CORS settings (#422)
- `X-Amz-Force-Delete-Bucket` header for forced bucket removal (#31)
- `Location` support in CompleteMultipart response (#451)
- Tree pool request duration metric (#447)
- Expiration lifecycle configuration support (#42, #412, #459, #460, #516, #536)
- Add support for virtual hosted style addressing (#446, #449, #493)
- Support `frostfs.graceful_close_on_switch_timeout` (#475)
- Vulnerability report document (#413)
- Support patch object method (#462, #473, #466, #479)
- Enhanced logging and request reproducer (#369)
- Root CA configuration for tracing (#484)
- Log sampling policy configuration (#461)
- `sign` command to `frostfs-s3-authmate` (#467)
- Support custom aws credentials (#509)
- Source IP binding configuration for FrostFS requests (#521)
- Tracing attributes (#549)
### Changed
- Split `FrostFS` interface into separate read methods (#427)
- golangci-lint v1.60 support (#474)
- Updated Go version to 1.22 (#470)
- Container removal after failed bucket creation (#434)
- Explicit check for `.` symbol in bucket name (#506)
- Transaction waiter in contract clients (#522)
- Avoid maintenance mode storage node during object operations (#524)
- Content-Type does not include in Presigned URL of s3-authmate (#505)
- Check owner ID before deleting bucket (#528)
- S3-Authmate now uses APE instead basic-ACL (#553)
### Removed
- Reduce using mutex when update app settings (#329)
## [0.30.8] - 2024-10-18
### Fixed
- Error handling for correct connection switch in SDK Pool (#517)
## [0.30.7] - 2024-10-03
### Fixed
- Correct aws-chunk encoding size handling (#511)
## [0.30.6] - 2024-09-17
### Fixed
- Object size of objects upload with aws-chunked encoding (#450)
- Object size of objects upload with negative Content-Length (#486)
## [0.30.5] - 2024-09-16
### Fixed
- Panic catchers for fuzzing tests (#492)
## [0.30.4] - 2024-09-03
### Added
- Fuzzing tests (#480)
## [0.30.3] - 2024-08-27
### Fixed
- Empty listing when multipart upload contains more than 1000 parts (#471)
## [0.30.2] - 2024-08-20
### Fixed
- Error counting in pool component before connection switch (#468)
### Added
- Log of endpoint address during tree pool errors (#468)
## [0.30.1] - 2024-07-25
### Fixed
- Redundant system node removal in tree service (#437)
### Added
- Log details on SDK Pool health status change (#439)
## [0.30.0] - Kangshung -2024-07-19
### Fixed
- Fix HTTP/2 requests (#341)
- Fix Decoder.CharsetReader is nil (#379)
- Fix flaky ACL encode test (#340)
- Docs grammar (#432)
### Added
- Add new `reconnect_interval` config param for server rebinding (#291)
- Support `GetBucketPolicyStatus` (#301)
- Support request IP filter with policy (#371, #377)
- Support tag checks in policies (#357, #365, #392, #403, #411)
- Support IAM-MFA checks (#367)
- More docs (#334, #353)
- Add `register-user` command to `authmate` (#414)
- `User` field in request log (#396)
- Erasure coding support in placement policy (#400)
- Improved test coverage (#402)
### Changed
- Update dependencies noted by govulncheck (#368)
- Improve test coverage (#380, #387)
- Support updated naming in native policy JSON (#385)
- Improve determining AccessBox latest version (#335)
- Don't set full_control policy for bucket owner (#407)
### Removed
- Remove control api (#406)
- Remove notifications (#401)
- Remove `layer.Client` interface (#410)
- Remove extended ACL related code (#372)
## [0.29.3] - 2024-07-19
### Fixed
- Support tree split environment when multiple nodes
may be part of the same sub path (#430)
- Collision of multipart name and system data in the tree (#430)
- Workaround for removal of multiple null versions in unversioned bucket (#430)
## [0.29.2] - 2024-07-03
### Fixed
- Parsing of put-bucket-setting retry configuration (#398)
## [0.29.1] - 2024-06-20
### Fixed
- OPTIONS request processing for object operations (#399)
### Added
- Retries of put-bucket-setting operation during container creation (#398)
## [0.29.0] - Zemu - 2024-05-27
### Fixed
- Fix marshaling errors in `DeleteObjects` method (#222)
- Fix status code in GET/HEAD delete marker (#226)
- Fix `NextVersionIDMarker` in `list-object-versions` (#248)
- Fix possibility of panic during SIGHUP (#288)
- Fix flaky `TestErrorTimeoutChecking` (`make test` sometimes failed) (#290)
- Fix log-level change on SIGHUP (#313)
- Fix anonymous put request (#311)
- Fix routine leak from nns resolver (#324)
- Fix svace errors (#325, #328)
### Added
- Add new `frostfs.buffer_max_size_for_put` config param and sync TZ hash for PUT operations (#197)
- Add `X-Amz-Version-Id` header after complete multipart upload (#227)
- Add handling of `X-Amz-Copy-Source-Server-Side-Encryption-Customer-*` headers during copy (#217)
- Add new `logger.destination` config param (#236)
- Add `X-Amz-Content-Sha256` header validation (#218)
- Support frostfsid contract. See `frostfsid` config section (#260)
- Support per namespace placement policies configuration (see `namespaces.config` config param) (#266)
- Support control api to manage policies. See `control` config section (#258)
- Add `namespace` label to billing metrics (#271)
- Support policy-engine (#257, #259, #282, #283, #302, #307, #345, #351, #358, #360, #362, #383, #354)
- Support `proxy` contract (#287)
- Authmate: support custom attributes (#292)
- Add FrostfsID cache (#269)
### Changed
- Generalise config param `use_default_xmlns_for_complete_multipart` to `use_default_xmlns` so that use default xmlns for all requests (#221)
- Set server IdleTimeout and ReadHeaderTimeout to `30s` and allow to configure them (#220)
- Return `ETag` value in quotes (#219)
- Use tombstone when delete multipart upload (#275)
- Support new parameter `cache.accessbox.removing_check_interval` (#305)
- Use APE rules instead of eACL in container creation (#306)
- Rework bucket policy with policy-engine (#261)
- Improved object listing speed (#165, #347)
- Logging improvement (#300, #318)
### Removed
- Drop sending whitespace characters during complete multipart upload and related config param `kludge.complete_multipart_keepalive` (#227)
- Unused legacy minio related code (#299)
- Redundant output with journald logging (#298)
## [0.28.2] - 2024-05-27
### Fixed
- `anon` user in billing metrics (#321)
- Parts are not removed when multipart object removed (#370)
### Added
- Put request in duration metrics (#280)
## [0.28.1] - 2024-01-24
### Added
- MD5 hash as ETag and response header (#205)
- Tree pool traversal limit (#262)
### Updating from 0.28.0
See new `features.md5.enabled` and `frostfs.tree_pool_max_attempts` config
parameters.
## [0.28.0] - Academy of Sciences - 2023-12-07
### Fixed
- Handle negative `Content-Length` on put (#125)
- Use `DisableURIPathEscaping` to presign urls (#125)
- Use specific s3 errors instead of `InternalError` where possible (#143)
- `grpc` schemas in tree configuration (#166)
- Return appropriate 404 code when object missed in storage but there is in gate cache (#158)
- Replace part on re-upload when use multipart upload (#176)
- Fix goroutine leak on put object error (#178)
- Fix parsing signed headers in presigned urls (#182)
- Fix url escaping (#188)
- Use correct keys in `list-multipart-uploads` response (#185)
- Fix parsing `key-marker` for object list versions (#237)
- `GetSubTree` failures (#179)
- Unexpected EOF during multipart download (#210)
- Produce clean version in debian build (#245)
### Added
- Add `trace_id` value into log record when tracing is enabled (#142)
- Add basic error types and exit codes to `frostfs-s3-authmate` (#152)
- Add a metric with addresses of nodes of the same and highest priority that are currently healthy (#186)
- Support dump metrics descriptions (#80)
- Add `copies_numbers` section to `placement_policy` in config file and support vectors of copies numbers (#70, #101)
- Support impersonate bearer token (#81, #105)
- Reload default and custom copies numbers on SIGHUP (#104)
- Tracing support (#84, #140)
- Return bearer token in `s3-authmate obtain-secret` result (#132)
- Support multiple version credentials using GSet (#135)
- Implement chunk uploading (#106)
- Add new `kludge.bypass_content_encoding_check_in_chunks` config param (#146)
- Add new `frostfs.client_cut` config param (#192)
- Add selection of the node of the latest version of the object (#231)
- Soft memory limit with `runtime.soft_memory_limit` (#196)
- `server_health` metric for every S3 endpoint status (#199)
### Changed
- Update prometheus to v1.15.0 (#94)
- Update go version to go1.19 (#118)
- Remove object from tree and reset its cache on object deletion when it is already removed from storage (#78)
- Finish rebranding (#2)
- Timeout errors has code 504 now (#103)
- Use request scope logger (#111)
- Add `s3-authmate update-secret` command (#131)
- Use default registerer for app metrics (#155)
- Use chi router instead of archived gorlilla/mux (#149, #174, #188)
- Complete multipart upload doesn't unnecessary copy now. Thus, the total time of multipart upload was reduced by 2 times (#63)
- Use gate key to form object owner (#175)
- Apply placement policies and copies if there is at least one valid value (#168)
- `statistic_tx_bytes_total` and `statistic_rx_bytes_total` metric to `statistic_bytes_total` metric with `direction` label (#153)
- Refactor of context-stored data receivers (#137)
- Refactor fetch/parse config parameters functions (#117)
- Move all log messages to constants (#96)
- Allow zero value of `part-number-marker` (#207)
- Clean tag node in the tree service instead of removal (#233)
### Removed
- Drop `tree.service` param (now endpoints from `peers` section are used) (#133)
## [0.27.0] - Karpinsky - 2023-07-12
This is a first FrostFS S3 Gateway release named after
[Karpinsky glacier](https://en.wikipedia.org/wiki/Karpinsky_Glacier).
### Fixed
- Using multiple servers require only one healthy (#12)
- Renew token before it expires (#20)
- Add generated deb builder files to .gitignore, and fix typo (#28)
- Get empty bucket CORS from frostfs (#36)
- Don't count pool error on client abort (#35)
- Handle request cancelling (#69)
- Clean up List and Name caches when object is missing in Tree service (#57)
- Don't create unnecessary delete-markers (#83)
- `Too many pings` error (#145)
### Added
- Billing metrics (#5, #26, #29)
- Return container name in `head-bucket` response (#18)
- Multiple configs support (#21)
- Bucket name resolving policy (#25)
- Support string `Action` and `Resource` fields in `bucketPolicy.Statement` (#32)
- Add new `kludge.use_default_xmlns_for_complete_multipart` config param (#40)
- Return `X-Owner-Id` in `head-bucket` response (#79)
- Support multiple tree service endpoints (#74, #110, #114)
### Changed
- Repository rebranding (#1)
- Update neo-go to v0.101.0 (#14)
- Update viper to v1.15.0 (#14)
- Using multiple servers require only one healthy (TrueCloudLab#12)
- Update go version to go1.18 (TrueCloudLab#16)
- Return error on invalid LocationConstraint (TrueCloudLab#23)
- Place billing metrics to separate url path (TrueCloudLab#26)
- Add generated deb builder files to .gitignore, and fix typo (TrueCloudLab#28)
- Limit number of objects to delete at one time (TrueCloudLab#37)
- Update go version to go1.18 (#16)
- Return error on invalid LocationConstraint (#23)
- Limit number of objects to delete at one time (#37)
- CompleteMultipartUpload handler now sends whitespace characters to keep alive client's connection (#60)
- Support new system attributes (#64)
## [0.26.0] - 2022-12-28
### Added
- Use client time as `now` in some requests (#726)
- Reload policies on SIGHUP (#747)
- Authmate flags for pool timeouts (#760)
- Multiple server listeners (#742)
### Changed
- Placement policy configuration (#568)
- Improved debug logging of CID and OID values (#754)
### Removed
- Deprecated linters (#755)
### Updating from v0.25.1
New config parameters were added. And old one `defaul_policy` were changed.
```yaml
placement_policy:
default: "REP 3"
region_mapping: /path/to/container/policies.json
```
Make sure you update the config accordingly:
If you configure application using environment variables change:
* `S3_GW_DEFAULT_POLICY` -> `S3_GW_PLACEMENT_POLICY_DEFAULT_POLICY`
* `S3_GW_LISTEN_ADDRESS` -> `S3_GW_SERVER_0_ADDRESS`
* `S3_GW_TLS_CERT_FILE` -> `S3_GW_SERVER_0_TLS_CERT_FILE` (and set `S3_GW_SERVER_0_TLS_ENABLED=true`)
* `S3_GW_TLS_KEY_FILE` -> `S3_GW_SERVER_0_TLS_KEY_FILE` (and set `S3_GW_SERVER_0_TLS_ENABLED=true`)
If you configure application using `.yaml` file change:
* `defaul_policy` -> `placement_policy.default`
* `listen_address` -> `server.0.address`
* `tls.cert_file` -> `server.0.tls.cert_file` (and set `server.0.tls.enabled: true`)
* `tls.key_file` -> `server.0.tls.key_file` (and set `server.0.tls.enabled: true`)
## [0.25.1] - 2022-10-30
### Fixed
- Empty bucket policy (#740)
- Big object removal (#749)
- Checksum panic (#741)
### Added
- Debian packaging (#737)
- Timeout for individual operations in streaming RPC (#750)
## [0.25.0] - 2022-10-31
### Fixed
- Legal hold object lock enabling (#709)
- Errors at object locking (#719)
- Unrestricted access to not owned objects via cache (#713)
- Check tree service health (#699)
- Bucket names in listing (#733)
### Added
- Config reloading on SIGHUP (#702, #715, #716)
- Stop pool dial on SIGINT (#712)
### Changed
- GitHub actions update (#710)
- Makefile help (#725)
- Optimized object tags setting (#669)
- Improved logging (#728)
- Unified unit test names (#617)
- Improved docs (#732)
### Removed
- Unused cache methods (#650)
### Updating from v0.24.0
New config parameters were added. Make sure the default parameters are appropriate for you.
```yaml
cache:
accesscontrol:
lifetime: 1m
size: 100000
```
## [0.24.0] - 2022-09-14
### Added
- Exposure of pool metrics (#615, #680)
- Configuration of `set_copies_number` (#634, #637)
- Configuration of list of allowed `AccessKeyID` prefixes (#674)
- Tagging directive for `CopyObject` (#666, #683)
- Customer encryption (#595)
- `CopiesNumber` configuration (#634, #637)
### Changed
- Improved wallet configuration via `.yaml` config and environment variables (#607)
- Update go version for build to 1.19 (#694, #705)
- Update version calculation (#653, #697)
- Optimized lock creation (#692)
- Update way to configure `listen_domains` (#667)
- Use `FilePath` instead of `FileName` for object keys (#657)
- Optimize listing (#625, #616)
### Removed
- Drop any object search logic (#545)
### Fixed
- Responses to `GetObject` and `HeadObject`: removed redundant `VersionID` (#577, #682)
- Replacement of object tagging in case of overwriting of an object (#645)
- Using tags cache with empty `versionId` (#643)
- Fix panic on go1.19 (#678)
- Fix panic on invalid versioning status (#660)
- Fix panic on missing decrypt reader (#704)
- Using multipart uploads with `/` in name (#671)
- Don't update settings cache when request fails (#661)
- Fix handling `X-Amz-Copy-Source` header (#672)
- ACL related problems (#676, #606)
- Using `ContinuationToken` for "directories" (#684)
- Fix `connection was closed` error (#656)
- Fix listing for nested objects (#624)
- Fix anon requests to tree service (#504, #505)
### Updating from v0.23.0
Make sure your configuration is valid:
If you configure application using environment variables change:
* `S3_GW_WALLET` -> `S3_GW_WALLET_PATH`
* `S3_GW_ADDRESS` -> `S3_GW_WALLET_ADDRESS`
* `S3_GW_LISTEN_DOMAINS_N` -> `S3_GW_LISTEN_DOMAINS` (use it as array variable)
If you configure application using `.yaml` file change:
* `wallet` -> `wallet.path`
* `address` -> `wallet.address`
* `listen_domains.n` -> `listen_domains` (use it as array param)
## [0.23.0] - 2022-08-01
### Fixed
- System metadata are filtered now (#619)
- List objects in corner cases (#612, #627)
- Correct removal of a deleted object (#610)
- Bucket creation could lead to "no healthy client" error (#636)
### Added
- New param to configure pool error threshold (#633)
### Changed
- Pprof and prometheus metrics configuration (#591)
- Don't set sticky bit in authmate container (#540)
- Updated compatibility table (#638)
- Rely on string sanitizing from zap (#498)
### Updating from v0.22.0
1. To enable pprof use `pprof.enabled` instead of `pprof` in config.
To enable prometheus metrics use `prometheus.enabled` instead of `metrics` in config.
If you are using the command line flags you can skip this step.
## [0.22.0] - 2022-07-25
Tree service support
### Fixed
- Error logging (#450)
- Default bucket location constraint (#463)
- Suspended versioning status (#462)
- CodeQL warnings (#489, #522, #539)
- Bearer token behaviour with non-owned buckets (#459)
- ACL issues (#495, #553, #571, #573, #574, #580)
- Authmate policy parsing (#558)
### Added
- Public key output in authmate issue-secret command (#482)
- Support of conditional headers (#484)
- Cache type cast error logging (#465)
- `docker/*` target in Makefile (#471)
- Pre signed requests (#529)
- Tagging and ACL notifications (#361)
- AWSv4 signer package to improve compatibility with S3 clients (#528)
- Extension mimetype detector (#289)
- Default params documentation (#592)
- Health metric (#600)
- Parallel object listing (#525)
- Tree service (see commit links from #609)
### Changed
- Reduce number of network requests (#439, #441)
- Renamed authmate to s3-authmate (#518)
- Version output (#578)
- Improved error messages (#539)
### Removed
- `layer/neofs` package (#438)
## [0.21.1] - 2022-05-16
### Changed
- Update go version to go1.17 (#427)
- Set homomorphic hashing disable attribute in container if required (#435)
## [0.21.0] - 2022-05-13
### Added
- Support of get-object-attributes (#430)
### Fixed
- Reduced time of bucket creation (#426)
- Bucket removal (#428)
- Obtainment of ETag value (#431)
### Changed
- Authmate doesn't parse session context anymore, now it accepts application defined
flexible structure with container ID in human-readable format (#428)
## [0.20.0] - 2022-04-29
### Added
- Support of object locking (#195)
- Support of basic notifications (#357, #358, #359)
### Changed
- Logger behavior: now it writes to stderr instead of stdout, app name and
version are always presented and fixed, all user options except of `level` are
dropped (#380)
- Improved docs, added config examples (#396, #398)
- Updated NeoFS SDK (#365, #409)
### Fixed
- Added check of `SetEACL` tokens before processing of requests (#347)
- Authmate: returned lost session tokens when a parameter `--session-token` is
omitted (#387)
- Error when a bucket hasn't a settings file (#389)
- Response to a request to delete not existing object (#392)
- Replaced gate key in ACL Grantee by key of bearer token issuer (#395)
- Missing attach of bearer token to requests to put system object (#399)
- Deletion of system object while CompleteMultipartUpload (#400)
- Improved English in docs and comments (#405)
- Authmate: reconsidered default bearer token rules (#406)
## [0.19.0] - 2022-03-16
### Added
- Authmate: support placement policy overriding (#343, #364)
- Managing bucket notification configuration (#340)
- Unit tests in go1.17 (#265)
- NATS settings in application config (#341)
- Support `Expires` and `Cache-Control` headers (#312)
- Support `%` as delimiter (#313)
- Support `null` version deletion (#319)
- Bucket name resolving order (#285)
- Authmate: added `timeout` flag (#290)
- MinIO results in s3 compatibility tables (#304)
- Support overriding response headers (#310)
### Changed
- Authmate: check parameters before container creation (#372)
- Unify cache invalidation on deletion (#368)
- Updated NeoFS SDK to v1.0.0-rc.3 (#297, #333, #346, #376)
- Authmate: changed session token rules handling (#329, #336, #338, #352)
- Changed status code for some failed requests (#308)
- GetBucketLocation returns policy name used at bucket creation (#301)
### Fixed
- Waiting for bucket to be deleted (#366)
- Authmate: changed error message for session context building (#348)
- Authmate: fixed access key parsing in `obtain-secret` command (#295)
- Distinguishing `BucketAlreadyExists` errors (#354)
- Incorrect panic if handler not found (#305)
- Authmate: use container friendly name as system name (#299, #324)
- Use UTC `Last-Modified` timestamps (#331)
- Don't return object system metadata (#307)
- Handling empty post policy (#306)
- Use `X-Amz-Verion-Id` in `CompleteMulipartUpload` (#318)
### Removed
- Drop MinIO related errors (#316)
## [0.18.0] - 2021-12-16
### Added
- Support for MultipartUpload (#186, #187)
- CORS support (#217)
- Authmate supports setting of tokens lifetime in a more convenient format (duration) (#258)
- Generation of a random key for `--no-sign-request` (#276)
### Changed
- Bucket name resolving mechanism from listing owner's containers to using DNS (#219)
### Removed
- Deprecated golint, replaced by revive (#272)
## 0.17.0 (24 Sep 2021)
With this release we introduce [ceph-based](https://github.com/ceph/s3-tests) S3 compatibility results.
### Added
* Versioning support (#122, #242, #263)
* Ceph S3 compatibility results (#150, #249, #266)
* Handling `X-Amz-Expected-Bucket-Owner` header (#216)
* `X-Container-Id` header for `HeadBucket` response (#220)
* Basic ACL support (#49, #213)
* Caching (#179, #206, #231, #236, #253)
* Metadata directive when copying (#191)
* Bucket name checking (189)
* Continuation token support (#112, #154, #180)
* Mapping `LocationConstraint` to `PlacementPolicy` (#89)
* Tagging support (#196)
* POST uploading support (#190)
* Delete marker support (#248)
* Expiration for access box (#255)
* AWS CLI credential generating by authmate (#241)
### Changed
* Default placement policy is now configurable (#218)
* README is split into different files (#210)
* Unified error handling (#89, #149, #184)
* Authmate issue-secret response contains container id (#163)
* Removed "github.com/nspcc-dev/neofs-node" dependency (#234)
* Removed GitHub workflow of image publishing (#243)
* Changed license to AGPLv3 (#264)
### Fixed
* ListObjects results are now the same for different users (#230)
* Error response for invalid authentication header is now correct (#199)
* Saving object metadata (#198)
* Range header handling (#194)
* Correct status codes (#118, #262)
* HeadObject for "directories" (#160)
* Fetch-owner parameter support (#159)
## 0.16.0 (16 Jul 2021)
With this release we publish S3 gateway source code. It includes various S3
compatibility improvements, support of bucket management, unified secp256r1
cryptography with NEP-6 wallet support.
### Fixed
* Allowed no-sign request (#65)
* Bearer token attached to all requests (#84)
* Time format in responses (#133)
* Max-keys checked in ListObjects (#135)
* Lost metadat in the objects (#131)
* Unique bucket name check (#125)
### Added
* Bucket management operations (#47, #72)
* Node-specific owner IDs in bearer tokens (#83)
* AWS CLI usage section in README (#77)
* List object paging (#97)
* Lifetime for the tokens in auth-mate (#108)
* Support of range in GetObject request (#96)
* Support of NEP-6 wallets instead of binary encoded keys (#92)
* Support of JSON encoded rules in auth-mate (#71)
* Support of delimiters in ListObjects (#98)
* Support of object ETag (#93)
* Support of time-based conditional CopyObject and GetObject (#94)
### Changed
* Accesskey format: now `0` used as a delimiter between container ID and object
ID instead of `_` (#164)
* Accessbox is encoded in protobuf format (#48)
* Authentication uses secp256r1 instead of ed25519 (#75)
* Improved integration with NeoFS SDK and NeoFS API Go (#78, #88)
* Optimized object put execution (#155)
### Removed
* GRPC keepalive options (#73)
## 0.15.0 (10 Jun 2021)
This release brings S3 gateway to the current state of NeoFS and fixes some
bugs, no new significant features introduced (other than moving here already
existing authmate component).
New features:
* authmate was moved into this repository and is now built along with the
gateway itself (#46)
Behavior changes:
* neofs-s3-gate was renamed to neofs-s3-gw (#50)
Improvements:
* better Makefile (#43, #45, #55)
* stricter linters (#45)
* removed non-standard errors package from dependencies (#54)
* refactoring, reusing new sdk-go component (#60, #62, #63)
* updated neofs-api-go for compatibility with current NeoFS node 0.21.0 (#60, #68)
* extended README (#67, #76)
Bugs fixed:
* wrong (as per AWS specification) access key ID generated (#64)
- Abstract network communication in TreeClient (#59, #75)
- Changed values for `frostfs_s3_gw_state_health` metric (#91)
## Older versions
Please refer to [Github
releases](https://github.com/nspcc-dev/neofs-s3-gw/releases/) for older
releases.
This project is a fork of [NeoFS S3 Gateway](https://github.com/nspcc-dev/neofs-s3-gw) from version v0.26.0.
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-s3-gw/blob/master/CHANGELOG.md.
[0.18.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.17.0...v0.18.0
[0.19.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.18.0...v0.19.0
[0.20.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.19.0...v0.20.0
[0.21.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.20.0...v0.21.0
[0.21.1]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.21.0...v0.21.1
[0.22.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.21.1...v0.22.0
[0.23.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.22.0...v0.23.0
[0.24.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.23.0...v0.24.0
[0.25.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.24.0...v0.25.0
[Unreleased]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.25.0...master
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/b2148cc3...v0.27.0
[0.28.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.27.0...v0.28.0
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.0...v0.28.1
[0.28.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.1...v0.28.2
[0.29.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.2...v0.29.0
[0.29.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.0...v0.29.1
[0.29.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.1...v0.29.2
[0.29.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.2...v0.29.3
[0.30.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.3...v0.30.0
[0.30.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.0...v0.30.1
[0.30.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.1...v0.30.2
[0.30.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.2...v0.30.3
[0.30.4]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.3...v0.30.4
[0.30.5]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.4...v0.30.5
[0.30.6]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.5...v0.30.6
[0.30.7]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.6...v0.30.7
[0.30.8]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.7...v0.30.8
[0.31.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.8...v0.31.0
[0.31.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.0...v0.31.1
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.1...master

3
CODEOWNERS Normal file
View file

@ -0,0 +1,3 @@
.* @TrueCloudLab/storage-services-developers @TrueCloudLab/storage-services-committers
.forgejo/.* @potyarkin
Makefile @potyarkin

View file

@ -3,8 +3,8 @@
First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines:
- Check the open [issues](https://github.com/TrueCloudLab/frostfs-s3-gw/issues) and
[pull requests](https://github.com/TrueCloudLab/frostfs-s3-gw/pulls) for existing
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pulls) for existing
discussions.
- Open an issue first, to discuss a new feature or enhancement.
@ -27,20 +27,20 @@ Start by forking the `frostfs-s3-gw` repository, make changes in a branch and th
send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details:
### Set up your GitHub Repository
### Set up your git repository
Fork [FrostFS S3 Gateway
upstream](https://github.com/TrueCloudLab/frostfs-s3-gw/fork) source repository
upstream](https://git.frostfs.info/repo/fork/15) source repository
to your own personal repository. Copy the URL of your fork (you will need it for
the `git clone` command below).
```sh
$ git clone https://github.com/TrueCloudLab/frostfs-s3-gw
$ git clone https://git.frostfs.info/<username>/frostfs-s3-gw.git
```
### Set up git remote as ``upstream``
```sh
$ cd frostfs-s3-gw
$ git remote add upstream https://github.com/TrueCloudLab/frostfs-s3-gw
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw.git
$ git fetch upstream
$ git merge upstream/master
...
@ -90,8 +90,8 @@ $ git push origin feature/123-something_awesome
```
### Create a Pull Request
Pull requests can be created via GitHub. Refer to [this
document](https://help.github.com/articles/creating-a-pull-request/) for
Pull requests can be created via Forgejo. Refer to [this
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged.

View file

@ -3,19 +3,34 @@
# Common variables
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
GO_VERSION ?= 1.19
LINT_VERSION ?= 1.49.0
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.60.1
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
BINDIR = bin
METRICS_DUMP_OUT ?= ./metrics-dump.json
# Binaries to build
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
BINS = $(addprefix $(BINDIR)/, $(CMDS))
GOFLAGS ?=
# Variables for docker
REPO_BASENAME = $(shell basename `go list -m`)
HUB_IMAGE ?= "truecloudlab/$(REPO_BASENAME)"
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
OUTPUT_LINT_DIR ?= $(shell pwd)/bin
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache
# Variables for fuzzing
FUZZ_NGFUZZ_DIR ?= ""
FUZZ_TIMEOUT ?= 30
FUZZ_FUNCTIONS ?= "all"
FUZZ_AUX ?= ""
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean protoc
# .deb package versioning
@ -28,9 +43,10 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
# Make all binaries
all: $(BINS)
$(BINS): sync-tree $(BINDIR) dep
$(BINS): $(BINDIR) dep
@echo "⇒ Build $@"
CGO_ENABLED=0 \
GOFLAGS=$(GOFLAGS) \
go build -v -trimpath \
-ldflags "-X $(REPO)/internal/version.Version=$(VERSION)" \
-o $@ ./cmd/$(subst frostfs-,,$(notdir $@))
@ -39,10 +55,6 @@ $(BINDIR):
@echo "⇒ Ensure dir: $@"
@mkdir -p $@
# Synchronize tree service
sync-tree:
@./syncTree.sh
# Pull go dependencies
dep:
@printf "⇒ Download requirements: "
@ -61,7 +73,7 @@ docker/%:
-w /src \
-u `stat -c "%u:%g" .` \
--env HOME=/src \
golang:$(GO_VERSION) make $*,\
golang:$(GO_VERSION) make GOFLAGS=$(GOFLAGS) $*,\
@echo "supported docker targets: all $(BINS) lint")
# Run tests
@ -73,6 +85,34 @@ cover:
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
@go tool cover -html=coverage.txt -o coverage.html
# Run fuzzing
CLANG := $(shell which clang-17 2>/dev/null)
.PHONY: check-clang all
check-clang:
ifeq ($(CLANG),)
@echo "clang-17 is not installed. Please install it before proceeding - https://apt.llvm.org/llvm.sh "
@exit 1
endif
.PHONY: check-ngfuzz all
check-ngfuzz:
@if [ -z "$(FUZZ_NGFUZZ_DIR)" ]; then \
echo "Please set a variable FUZZ_NGFUZZ_DIR to specify path to the ngfuzz"; \
exit 1; \
fi
.PHONY: install-fuzzing-deps
install-fuzzing-deps: check-clang check-ngfuzz
.PHONY: fuzz
fuzz: install-fuzzing-deps
@START_PATH=$$(pwd); \
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
cd $(FUZZ_NGFUZZ_DIR) && \
./ngfuzz -clean && \
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
./ngfuzz -report
# Reformat code
format:
@echo "⇒ Processing gofmt check"
@ -84,6 +124,7 @@ image:
@docker build \
--build-arg REPO=$(REPO) \
--build-arg VERSION=$(VERSION) \
--build-arg GOFLAGS=$(GOFLAGS) \
--rm \
-f .docker/Dockerfile \
-t $(HUB_IMAGE):$(HUB_TAG) .
@ -103,9 +144,23 @@ dirty-image:
-f .docker/Dockerfile.dirty \
-t $(HUB_IMAGE)-dirty:$(HUB_TAG) .
# Install linters
lint-install:
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters
lint:
@golangci-lint --timeout=5m run
@if [ ! -d "$(LINT_DIR)" ]; then \
echo "Run make lint-install"; \
exit 1; \
fi
$(LINT_DIR)/golangci-lint --timeout=5m run
# Run linters in Docker
docker/lint:
@ -134,10 +189,16 @@ clean:
# Generate code from .proto files
protoc:
# Install specific version for protobuf lib
@GOBIN=$(abspath $(BINDIR)) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen
@for f in `find . -type f -name '*.proto' -not -path './vendor/*'`; do \
echo "⇒ Processing $$f "; \
protoc \
--go_out=paths=source_relative:. $$f; \
--go_out=paths=source_relative:. \
--plugin=protoc-gen-go-frostfs=$(BINDIR)/protogen \
--go-frostfs_out=. --go-frostfs_opt=paths=source_relative \
--go-grpc_opt=require_unimplemented_servers=false \
--go-grpc_out=. --go-grpc_opt=paths=source_relative $$f; \
done
rm -rf vendor
@ -153,4 +214,9 @@ debpackage:
debclean:
dh clean
# Dump metrics (use METRICS_DUMP_OUT variable to override default out file './metrics-dump.json')
.PHONY: dump-metrics
dump-metrics:
@go test ./metrics -run TestDescribeAll --tags=dump_metrics --out=$(abspath $(METRICS_DUMP_OUT))
include help.mk

View file

@ -1,5 +1,5 @@
<p align="center">
<img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo">
</p>
<p align="center">
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
@ -7,6 +7,8 @@
---
[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-s3-gw)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-s3-gw)
![Release](https://img.shields.io/badge/dynamic/json.svg?label=release&url=https://git.frostfs.info/api/v1/repos/TrueCloudLab/frostfs-s3-gw/releases&query=$[0].tag_name&color=orange)
![License](https://img.shields.io/badge/license-GPL--3.0-orange.svg)
# FrostFS S3 Gateway
@ -91,6 +93,24 @@ HTTP/1.1 200 OK
Also, you can configure domains using `.env` variables or `yaml` file.
## Fuzzing
To run fuzzing tests use the following command:
```shell
$ make fuzz
```
This command will install dependencies for the fuzzing process and run existing fuzzing tests.
You can also use the following arguments:
```
FUZZ_TIMEOUT - time to run each fuzzing test (default 30)
FUZZ_FUNCTIONS - fuzzing tests that will be started (default "all")
FUZZ_AUX - additional parameters for the fuzzer (for example, "-debug")
FUZZ_NGFUZZ_DIR - path to ngfuzz tool
````
## Documentation
- [Configuration](./docs/configuration.md)

26
SECURITY.md Normal file
View file

@ -0,0 +1,26 @@
# Security Policy
## How To Report a Vulnerability
If you think you have found a vulnerability in this repository, please report it to us through coordinated disclosure.
**Please do not report security vulnerabilities through public issues, discussions, or change requests.**
Instead, you can report it using one of the following ways:
* Contact the [TrueCloudLab Security Team](mailto:security@frostfs.info) via email
Please include as much of the information listed below as you can to help us better understand and resolve the issue:
* The type of issue (e.g., buffer overflow, or cross-site scripting)
* Affected version(s)
* Impact of the issue, including how an attacker might exploit the issue
* Step-by-step instructions to reproduce the issue
* The location of the affected source code (tag/branch/commit or direct URL)
* Full paths of source file(s) related to the manifestation of the issue
* Any special configuration required to reproduce the issue
* Any log files that are related to this issue (if possible)
* Proof-of-concept or exploit code (if possible)
This information will help us triage your report more quickly.

View file

@ -1 +1 @@
v0.26.0
v0.31.1

View file

@ -2,162 +2,213 @@ package auth
import (
"context"
"crypto"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"net/url"
"regexp"
"strings"
"time"
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/v4"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/credentials"
)
// authorizationFieldRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
var authorizationFieldRegexp = regexp.MustCompile(`AWS4-HMAC-SHA256 Credential=(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request,\s*SignedHeaders=(?P<signed_header_fields>.+),\s*Signature=(?P<v4_signature>.+)`)
var (
// AuthorizationFieldRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
AuthorizationFieldRegexp = regexp.MustCompile(`AWS4-HMAC-SHA256 Credential=(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request,\s*SignedHeaders=(?P<signed_header_fields>.+),\s*Signature=(?P<v4_signature>.+)`)
// postPolicyCredentialRegexp -- is regexp for credentials when uploading file using POST with policy.
var postPolicyCredentialRegexp = regexp.MustCompile(`(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request`)
// authorizationFieldV4aRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
authorizationFieldV4aRegexp = regexp.MustCompile(`AWS4-ECDSA-P256-SHA256 Credential=(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<service>[^/]+)/aws4_request,\s*SignedHeaders=(?P<signed_header_fields>.+),\s*Signature=(?P<v4_signature>.+)`)
// postPolicyCredentialRegexp -- is regexp for credentials when uploading file using POST with policy.
postPolicyCredentialRegexp = regexp.MustCompile(`(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request`)
)
type (
// Center is a user authentication interface.
Center interface {
Authenticate(request *http.Request) (*Box, error)
}
// Box contains access box and additional info.
Box struct {
AccessBox *accessbox.Box
ClientTime time.Time
}
center struct {
Center struct {
reg *RegexpSubmatcher
regV4a *RegexpSubmatcher
postReg *RegexpSubmatcher
cli tokens.Credentials
allowedAccessKeyIDPrefixes []string // empty slice means all access key ids are allowed
settings CenterSettings
}
prs int
CenterSettings interface {
AccessBoxContainer() (cid.ID, bool)
}
authHeader struct {
//nolint:revive
AuthHeader struct {
AccessKeyID string
Service string
Region string
SignatureV4 string
Signature string
SignedFields []string
Date string
IsPresigned bool
Expiration time.Duration
Preamble string
PayloadHash string
}
)
const (
accessKeyPartsNum = 2
authHeaderPartsNum = 6
authHeaderV4aPartsNum = 5
maxFormSizeMemory = 50 * 1048576 // 50 MB
AmzAlgorithm = "X-Amz-Algorithm"
AmzCredential = "X-Amz-Credential"
AmzSignature = "X-Amz-Signature"
AmzSignedHeaders = "X-Amz-SignedHeaders"
AmzRegionSet = "X-Amz-Region-Set"
AmzExpires = "X-Amz-Expires"
AmzDate = "X-Amz-Date"
AmzContentSHA256 = "X-Amz-Content-Sha256"
AuthorizationHdr = "Authorization"
ContentTypeHdr = "Content-Type"
UnsignedPayload = "UNSIGNED-PAYLOAD"
StreamingUnsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
StreamingContentSHA256Trailer = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
StreamingContentECDSASHA256 = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"
StreamingContentECDSASHA256Trailer = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER"
)
// ErrNoAuthorizationHeader is returned for unauthenticated requests.
var ErrNoAuthorizationHeader = errors.New("no authorization header")
func (p prs) Read(_ []byte) (n int, err error) {
panic("implement me")
var ContentSHA256HeaderStandardValue = map[string]struct{}{
UnsignedPayload: {},
StreamingUnsignedPayloadTrailer: {},
StreamingContentSHA256: {},
StreamingContentSHA256Trailer: {},
StreamingContentECDSASHA256: {},
StreamingContentECDSASHA256Trailer: {},
}
func (p prs) Seek(_ int64, _ int) (int64, error) {
panic("implement me")
}
var _ io.ReadSeeker = prs(0)
// New creates an instance of AuthCenter.
func New(frostFS tokens.FrostFS, key *keys.PrivateKey, prefixes []string, config *cache.Config) Center {
return &center{
cli: tokens.New(frostFS, key, config),
reg: NewRegexpMatcher(authorizationFieldRegexp),
func New(creds tokens.Credentials, prefixes []string, settings CenterSettings) *Center {
return &Center{
cli: creds,
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
regV4a: NewRegexpMatcher(authorizationFieldV4aRegexp),
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
allowedAccessKeyIDPrefixes: prefixes,
settings: settings,
}
}
func (c *center) parseAuthHeader(header string) (*authHeader, error) {
submatches := c.reg.GetSubmatches(header)
const (
signaturePreambleSigV4 = "AWS4-HMAC-SHA256"
signaturePreambleSigV4A = "AWS4-ECDSA-P256-SHA256"
)
func (c *Center) parseAuthHeader(authHeader string, headers http.Header) (*AuthHeader, error) {
preamble, _, _ := strings.Cut(authHeader, " ")
var (
submatches map[string]string
region string
)
switch preamble {
case signaturePreambleSigV4:
submatches = c.reg.GetSubmatches(authHeader)
if len(submatches) != authHeaderPartsNum {
return nil, apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed)
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), authHeader)
}
region = submatches["region"]
case signaturePreambleSigV4A:
submatches = c.regV4a.GetSubmatches(authHeader)
if len(submatches) != authHeaderV4aPartsNum {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), authHeader)
}
region = headers.Get(AmzRegionSet)
default:
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), authHeader)
}
accessKey := strings.Split(submatches["access_key_id"], "0")
if len(accessKey) != accessKeyPartsNum {
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID)
}
signedFields := strings.Split(submatches["signed_header_fields"], ";")
return &authHeader{
return &AuthHeader{
AccessKeyID: submatches["access_key_id"],
Service: submatches["service"],
Region: submatches["region"],
SignatureV4: submatches["v4_signature"],
SignedFields: signedFields,
Region: region,
Signature: submatches["v4_signature"],
SignedFields: strings.Split(submatches["signed_header_fields"], ";"),
Date: submatches["date"],
Preamble: preamble,
PayloadHash: headers.Get(AmzContentSHA256),
}, nil
}
func (a *authHeader) getAddress() (oid.Address, error) {
var addr oid.Address
if err := addr.DecodeString(strings.ReplaceAll(a.AccessKeyID, "0", "/")); err != nil {
return addr, apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID)
}
return addr, nil
func IsStandardContentSHA256(key string) bool {
_, ok := ContentSHA256HeaderStandardValue[key]
return ok
}
func (c *center) Authenticate(r *http.Request) (*Box, error) {
func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
var (
err error
authHdr *authHeader
authHdr *AuthHeader
signatureDateTimeStr string
needClientTime bool
)
queryValues := r.URL.Query()
if queryValues.Get(AmzAlgorithm) == "AWS4-HMAC-SHA256" {
if queryValues.Get(AmzAlgorithm) == signaturePreambleSigV4 {
creds := strings.Split(queryValues.Get(AmzCredential), "/")
if len(creds) != 5 || creds[4] != "aws4_request" {
return nil, fmt.Errorf("bad X-Amz-Credential")
}
authHdr = &authHeader{
authHdr = &AuthHeader{
AccessKeyID: creds[0],
Service: creds[3],
Region: creds[2],
SignatureV4: queryValues.Get(AmzSignature),
SignedFields: queryValues[AmzSignedHeaders],
Signature: queryValues.Get(AmzSignature),
SignedFields: strings.Split(queryValues.Get(AmzSignedHeaders), ";"),
Date: creds[1],
IsPresigned: true,
Preamble: signaturePreambleSigV4,
PayloadHash: r.Header.Get(AmzContentSHA256),
}
authHdr.Expiration, err = time.ParseDuration(queryValues.Get(AmzExpires) + "s")
if err != nil {
return nil, fmt.Errorf("couldn't parse X-Amz-Expires: %w", err)
return nil, fmt.Errorf("%w: couldn't parse X-Amz-Expires %v", apierr.GetAPIError(apierr.ErrMalformedExpires), err)
}
signatureDateTimeStr = queryValues.Get(AmzDate)
} else if queryValues.Get(AmzAlgorithm) == signaturePreambleSigV4A {
creds := strings.Split(queryValues.Get(AmzCredential), "/")
if len(creds) != 4 || creds[3] != "aws4_request" {
return nil, fmt.Errorf("bad X-Amz-Credential")
}
authHdr = &AuthHeader{
AccessKeyID: creds[0],
Service: creds[2],
Region: queryValues.Get(AmzRegionSet),
Signature: queryValues.Get(AmzSignature),
SignedFields: strings.Split(queryValues.Get(AmzSignedHeaders), ";"),
Date: creds[1],
IsPresigned: true,
Preamble: signaturePreambleSigV4A,
PayloadHash: r.Header.Get(AmzContentSHA256),
}
authHdr.Expiration, err = time.ParseDuration(queryValues.Get(AmzExpires) + "s")
if err != nil {
return nil, fmt.Errorf("%w: couldn't parse X-Amz-Expires %v", apierr.GetAPIError(apierr.ErrMalformedExpires), err)
}
signatureDateTimeStr = queryValues.Get(AmzDate)
} else {
@ -166,9 +217,9 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
if strings.HasPrefix(r.Header.Get(ContentTypeHdr), "multipart/form-data") {
return c.checkFormData(r)
}
return nil, ErrNoAuthorizationHeader
return nil, fmt.Errorf("%w: %v", middleware.ErrNoAuthorizationHeader, authHeaderField)
}
authHdr, err = c.parseAuthHeader(authHeaderField[0])
authHdr, err = c.parseAuthHeader(authHeaderField[0], r.Header)
if err != nil {
return nil, err
}
@ -181,26 +232,38 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
return nil, fmt.Errorf("failed to parse x-amz-date header field: %w", err)
}
if err := c.checkAccessKeyID(authHdr.AccessKeyID); err != nil {
if err = c.checkAccessKeyID(authHdr.AccessKeyID); err != nil {
return nil, err
}
addr, err := authHdr.getAddress()
cnrID, err := c.getAccessBoxContainer(authHdr.AccessKeyID)
if err != nil {
return nil, err
}
box, err := c.cli.GetBox(r.Context(), addr)
box, attrs, err := c.cli.GetBox(r.Context(), cnrID, authHdr.AccessKeyID)
if err != nil {
return nil, fmt.Errorf("get box: %w", err)
return nil, fmt.Errorf("get box by access key '%s': %w", authHdr.AccessKeyID, err)
}
if err = checkFormatHashContentSHA256(r.Header.Get(AmzContentSHA256)); err != nil {
return nil, err
}
clonedRequest := cloneRequest(r, authHdr)
if err = c.checkSign(authHdr, box, clonedRequest, signatureDateTime); err != nil {
if err = c.checkSign(r.Context(), authHdr, box, clonedRequest, signatureDateTime); err != nil {
return nil, err
}
result := &Box{AccessBox: box}
result := &middleware.Box{
AccessBox: box,
AuthHeaders: &middleware.AuthHeader{
AccessKeyID: authHdr.AccessKeyID,
Region: authHdr.Region,
SignatureV4: authHdr.Signature,
},
Attributes: attrs,
}
if needClientTime {
result.ClientTime = signatureDateTime
}
@ -208,7 +271,36 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
return result, nil
}
func (c center) checkAccessKeyID(accessKeyID string) error {
func (c *Center) getAccessBoxContainer(accessKeyID string) (cid.ID, error) {
var addr oid.Address
if err := addr.DecodeString(strings.ReplaceAll(accessKeyID, "0", "/")); err == nil {
return addr.Container(), nil
}
cnrID, ok := c.settings.AccessBoxContainer()
if ok {
return cnrID, nil
}
return cid.ID{}, fmt.Errorf("%w: unknown container for creds '%s'", apierr.GetAPIError(apierr.ErrInvalidAccessKeyID), accessKeyID)
}
func checkFormatHashContentSHA256(hash string) error {
if !IsStandardContentSHA256(hash) {
hashBinary, err := hex.DecodeString(hash)
if err != nil {
return fmt.Errorf("%w: decode hash: %s: %s", apierr.GetAPIError(apierr.ErrContentSHA256Mismatch),
hash, err.Error())
}
if len(hashBinary) != sha256.Size && len(hash) != 0 {
return fmt.Errorf("%w: invalid hash size %d", apierr.GetAPIError(apierr.ErrContentSHA256Mismatch), len(hashBinary))
}
}
return nil
}
func (c Center) checkAccessKeyID(accessKeyID string) error {
if len(c.allowedAccessKeyIDPrefixes) == 0 {
return nil
}
@ -219,12 +311,12 @@ func (c center) checkAccessKeyID(accessKeyID string) error {
}
}
return apiErrors.GetAPIError(apiErrors.ErrAccessDenied)
return fmt.Errorf("%w: accesskeyID prefix isn't allowed", apierr.GetAPIError(apierr.ErrAccessDenied))
}
func (c *center) checkFormData(r *http.Request) (*Box, error) {
func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
if err := r.ParseMultipartForm(maxFormSizeMemory); err != nil {
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidArgument)
return nil, fmt.Errorf("%w: parse multipart form with max size %d", apierr.GetAPIError(apierr.ErrInvalidArgument), maxFormSizeMemory)
}
if err := prepareForm(r.MultipartForm); err != nil {
@ -233,12 +325,13 @@ func (c *center) checkFormData(r *http.Request) (*Box, error) {
policy := MultipartFormValue(r, "policy")
if policy == "" {
return nil, ErrNoAuthorizationHeader
return nil, fmt.Errorf("%w: missing policy", middleware.ErrNoAuthorizationHeader)
}
submatches := c.postReg.GetSubmatches(MultipartFormValue(r, "x-amz-credential"))
creds := MultipartFormValue(r, "x-amz-credential")
submatches := c.postReg.GetSubmatches(creds)
if len(submatches) != 4 {
return nil, apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed)
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), creds)
}
signatureDateTime, err := time.Parse("20060102T150405Z", MultipartFormValue(r, "x-amz-date"))
@ -246,28 +339,40 @@ func (c *center) checkFormData(r *http.Request) (*Box, error) {
return nil, fmt.Errorf("failed to parse x-amz-date field: %w", err)
}
var addr oid.Address
if err = addr.DecodeString(strings.ReplaceAll(submatches["access_key_id"], "0", "/")); err != nil {
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID)
}
accessKeyID := submatches["access_key_id"]
box, err := c.cli.GetBox(r.Context(), addr)
cnrID, err := c.getAccessBoxContainer(accessKeyID)
if err != nil {
return nil, fmt.Errorf("get box: %w", err)
return nil, err
}
secret := box.Gate.AccessKey
box, attrs, err := c.cli.GetBox(r.Context(), cnrID, accessKeyID)
if err != nil {
return nil, fmt.Errorf("get box by accessKeyID '%s': %w", accessKeyID, err)
}
secret := box.Gate.SecretKey
service, region := submatches["service"], submatches["region"]
signature := signStr(secret, service, region, signatureDateTime, policy)
if signature != MultipartFormValue(r, "x-amz-signature") {
return nil, apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch)
signature := SignStr(secret, service, region, signatureDateTime, policy)
reqSignature := MultipartFormValue(r, "x-amz-signature")
if signature != reqSignature {
return nil, fmt.Errorf("%w: %s != %s", apierr.GetAPIError(apierr.ErrSignatureDoesNotMatch),
reqSignature, signature)
}
return &Box{AccessBox: box}, nil
return &middleware.Box{
AccessBox: box,
AuthHeaders: &middleware.AuthHeader{
AccessKeyID: accessKeyID,
Region: region,
SignatureV4: signature,
},
Attributes: attrs,
}, nil
}
func cloneRequest(r *http.Request, authHeader *authHeader) *http.Request {
func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
otherRequest := r.Clone(context.TODO())
otherRequest.Header = make(http.Header)
@ -288,44 +393,117 @@ func cloneRequest(r *http.Request, authHeader *authHeader) *http.Request {
return otherRequest
}
func (c *center) checkSign(authHeader *authHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
awsCreds := credentials.NewStaticCredentials(authHeader.AccessKeyID, box.Gate.AccessKey, "")
signer := v4.NewSigner(awsCreds)
func (c *Center) checkSign(ctx context.Context, authHeader *AuthHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
var signature string
switch authHeader.Preamble {
case signaturePreambleSigV4:
creds := aws.Credentials{
AccessKeyID: authHeader.AccessKeyID,
SecretAccessKey: box.Gate.SecretKey,
}
signer := v4.NewSigner(func(options *v4.SignerOptions) {
options.DisableURIPathEscaping = true
})
if authHeader.IsPresigned {
now := time.Now()
if signatureDateTime.Add(authHeader.Expiration).Before(now) {
return apiErrors.GetAPIError(apiErrors.ErrExpiredPresignRequest)
if err := checkPresignedDate(authHeader, signatureDateTime); err != nil {
return err
}
if now.Before(signatureDateTime) {
return apiErrors.GetAPIError(apiErrors.ErrBadRequest)
}
if _, err := signer.Presign(request, nil, authHeader.Service, authHeader.Region, authHeader.Expiration, signatureDateTime); err != nil {
signedURI, _, err := signer.PresignHTTP(ctx, creds, request, authHeader.PayloadHash, authHeader.Service, authHeader.Region, signatureDateTime)
if err != nil {
return fmt.Errorf("failed to pre-sign temporary HTTP request: %w", err)
}
signature = request.URL.Query().Get(AmzSignature)
u, err := url.ParseRequestURI(signedURI)
if err != nil {
return err
}
signature = u.Query().Get(AmzSignature)
} else {
signer.DisableURIPathEscaping = true
if _, err := signer.Sign(request, nil, authHeader.Service, authHeader.Region, signatureDateTime); err != nil {
if err := signer.SignHTTP(ctx, creds, request, authHeader.PayloadHash, authHeader.Service, authHeader.Region, signatureDateTime); err != nil {
return fmt.Errorf("failed to sign temporary HTTP request: %w", err)
}
signature = c.reg.GetSubmatches(request.Header.Get(AuthorizationHdr))["v4_signature"]
}
if authHeader.Signature != signature {
return fmt.Errorf("%w: %s != %s: headers %v", apierr.GetAPIError(apierr.ErrSignatureDoesNotMatch),
authHeader.Signature, signature, authHeader.SignedFields)
}
if authHeader.SignatureV4 != signature {
return apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch)
case signaturePreambleSigV4A:
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
options.DisableURIPathEscaping = true
})
credAdapter := v4a.SymmetricCredentialAdaptor{
SymmetricProvider: credentials.NewStaticCredentialsProvider(authHeader.AccessKeyID, box.Gate.SecretKey, ""),
}
creds, err := credAdapter.RetrievePrivateKey(request.Context())
if err != nil {
return fmt.Errorf("failed to derive assymetric key from credentials: %w", err)
}
if !authHeader.IsPresigned {
return signer.VerifySignature(creds, request, authHeader.PayloadHash, authHeader.Service,
strings.Split(authHeader.Region, ","), signatureDateTime, authHeader.Signature)
}
if err = checkPresignedDate(authHeader, signatureDateTime); err != nil {
return err
}
return signer.VerifyPresigned(creds, request, authHeader.PayloadHash, authHeader.Service,
strings.Split(authHeader.Region, ","), signatureDateTime, authHeader.Signature)
default:
return fmt.Errorf("invalid preamble: %s", authHeader.Preamble)
}
return nil
}
func signStr(secret, service, region string, t time.Time, strToSign string) string {
func checkPresignedDate(authHeader *AuthHeader, signatureDateTime time.Time) error {
now := time.Now()
if signatureDateTime.Add(authHeader.Expiration).Before(now) {
return fmt.Errorf("%w: expired: now %s, signature %s", apierr.GetAPIError(apierr.ErrExpiredPresignRequest),
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
}
if now.Before(signatureDateTime) {
return fmt.Errorf("%w: signature time from the future: now %s, signature %s", apierr.GetAPIError(apierr.ErrBadRequest),
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
}
return nil
}
func SignStr(secret, service, region string, t time.Time, strToSign string) string {
creds := deriveKey(secret, service, region, t)
signature := hmacSHA256(creds, []byte(strToSign))
return hex.EncodeToString(signature)
}
func SignStrV4A(ctx context.Context, cred aws.Credentials, strToSign string) (string, error) {
credAdapter := v4a.SymmetricCredentialAdaptor{
SymmetricProvider: credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, ""),
}
creds, err := credAdapter.RetrievePrivateKey(ctx)
if err != nil {
return "", err
}
hash := sha256.New()
hash.Write([]byte(strToSign))
sig, err := creds.PrivateKey.Sign(rand.Reader, hash.Sum(nil), crypto.SHA256)
if err != nil {
return "", err
}
return hex.EncodeToString(sig), nil
}
func deriveKey(secret, service, region string, t time.Time) []byte {
hmacDate := hmacSHA256([]byte("AWS4"+secret), []byte(t.UTC().Format("20060102")))
hmacRegion := hmacSHA256(hmacDate, []byte(region))

View file

@ -0,0 +1,93 @@
//go:build gofuzz
// +build gofuzz
package auth
import (
"context"
"strings"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/aws/aws-sdk-go-v2/aws"
utils "github.com/trailofbits/go-fuzz-utils"
"go.uber.org/zap"
)
const (
fuzzSuccessExitCode = 0
fuzzFailExitCode = -1
)
func InitFuzzAuthenticate() {
}
func DoFuzzAuthenticate(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
var accessKeyAddr oid.Address
err = tp.Fill(accessKeyAddr)
if err != nil {
return fuzzFailExitCode
}
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
secretKey, err := tp.GetString()
if err != nil {
return fuzzFailExitCode
}
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secretKey}
reqData := RequestData{
Method: "GET",
Endpoint: "http://localhost:8084",
Bucket: "my-bucket",
Object: "@obj/name",
}
presignData := PresignData{
Service: "s3",
Region: "spb",
Lifetime: 10 * time.Minute,
SignTime: time.Now().UTC(),
}
req, err := PresignRequest(context.Background(), awsCreds, reqData, presignData, zap.NewNop())
if req == nil {
return fuzzFailExitCode
}
expBox := &accessbox.Box{
Gate: &accessbox.GateData{
SecretKey: secretKey,
},
}
mock := newTokensFrostfsMock()
mock.addBox(accessKeyAddr, expBox)
c := &Center{
cli: mock,
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
}
_, _ = c.Authenticate(req)
return fuzzSuccessExitCode
}
func FuzzAuthenticate(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzAuthenticate(data)
})
}

View file

@ -1,36 +1,71 @@
package auth
import (
"bytes"
"context"
"fmt"
"mime/multipart"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"time"
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/v4"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
)
type centerSettingsMock struct {
accessBoxContainer *cid.ID
}
func (c *centerSettingsMock) AccessBoxContainer() (cid.ID, bool) {
if c.accessBoxContainer == nil {
return cid.ID{}, false
}
return *c.accessBoxContainer, true
}
func TestAuthHeaderParse(t *testing.T) {
defaultHeader := "AWS4-HMAC-SHA256 Credential=oid0cid/20210809/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=2811ccb9e242f41426738fb1f"
center := &center{
reg: NewRegexpMatcher(authorizationFieldRegexp),
center := &Center{
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
settings: &centerSettingsMock{},
}
for _, tc := range []struct {
header string
err error
expected *authHeader
expected *AuthHeader
}{
{
header: defaultHeader,
err: nil,
expected: &authHeader{
expected: &AuthHeader{
AccessKeyID: "oid0cid",
Service: "s3",
Region: "us-east-1",
SignatureV4: "2811ccb9e242f41426738fb1f",
Signature: "2811ccb9e242f41426738fb1f",
SignedFields: []string{"host", "x-amz-content-sha256", "x-amz-date"},
Date: "20210809",
Preamble: signaturePreambleSigV4,
},
},
{
@ -38,55 +73,13 @@ func TestAuthHeaderParse(t *testing.T) {
err: errors.GetAPIError(errors.ErrAuthorizationHeaderMalformed),
expected: nil,
},
{
header: strings.ReplaceAll(defaultHeader, "oid0cid", "oidcid"),
err: errors.GetAPIError(errors.ErrInvalidAccessKeyID),
expected: nil,
},
} {
authHeader, err := center.parseAuthHeader(tc.header)
require.Equal(t, tc.err, err, tc.header)
authHeader, err := center.parseAuthHeader(tc.header, nil)
require.ErrorIs(t, err, tc.err, tc.header)
require.Equal(t, tc.expected, authHeader, tc.header)
}
}
func TestAuthHeaderGetAddress(t *testing.T) {
defaulErr := errors.GetAPIError(errors.ErrInvalidAccessKeyID)
for _, tc := range []struct {
authHeader *authHeader
err error
}{
{
authHeader: &authHeader{
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJM0HrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
},
err: nil,
},
{
authHeader: &authHeader{
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJMHrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
},
err: defaulErr,
},
{
authHeader: &authHeader{
AccessKeyID: "oid0cid",
},
err: defaulErr,
},
{
authHeader: &authHeader{
AccessKeyID: "oidcid",
},
err: defaulErr,
},
} {
_, err := tc.authHeader.getAddress()
require.Equal(t, tc.err, err, tc.authHeader.AccessKeyID)
}
}
func TestSignature(t *testing.T) {
secret := "66be461c3cd429941c55daf42fad2b8153e5a2016ba89c9494d97677cc9d3872"
strToSign := "eyAiZXhwaXJhdGlvbiI6ICIyMDE1LTEyLTMwVDEyOjAwOjAwLjAwMFoiLAogICJjb25kaXRpb25zIjogWwogICAgeyJidWNrZXQiOiAiYWNsIn0sCiAgICBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS8iXSwKICAgIHsic3VjY2Vzc19hY3Rpb25fcmVkaXJlY3QiOiAiaHR0cDovL2xvY2FsaG9zdDo4MDg0L2FjbCJ9LAogICAgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLAogICAgeyJ4LWFtei1tZXRhLXV1aWQiOiAiMTQzNjUxMjM2NTEyNzQifSwKICAgIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLAoKICAgIHsiWC1BbXotQ3JlZGVudGlhbCI6ICI4Vmk0MVBIbjVGMXNzY2J4OUhqMXdmMUU2aERUYURpNndxOGhxTU05NllKdTA1QzVDeUVkVlFoV1E2aVZGekFpTkxXaTlFc3BiUTE5ZDRuR3pTYnZVZm10TS8yMDE1MTIyOS91cy1lYXN0LTEvczMvYXdzNF9yZXF1ZXN0In0sCiAgICB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sCiAgICB7IlgtQW16LURhdGUiOiAiMjAxNTEyMjlUMDAwMDAwWiIgfSwKICAgIHsieC1pZ25vcmUtdG1wIjogInNvbWV0aGluZyIgfQogIF0KfQ=="
@ -96,6 +89,622 @@ func TestSignature(t *testing.T) {
panic(err)
}
signature := signStr(secret, "s3", "us-east-1", signTime, strToSign)
signature := SignStr(secret, "s3", "us-east-1", signTime, strToSign)
require.Equal(t, "dfbe886241d9e369cf4b329ca0f15eb27306c97aa1022cc0bb5a914c4ef87634", signature)
}
func TestSignatureV4A(t *testing.T) {
accessKeyID := "2XEbqH4M3ym7a3E3esxfZ2gRLnMwDXrCN4y1SkQg5fHa09sThVmVL3EE6xeKsyMzaqu5jPi41YCaVbnwbwCTF3bx1"
secretKey := "00637f53f842573aaa06c2164c598973cd986880987111416cf71f1619def537"
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
options.DisableURIPathEscaping = true
options.Logger = zaptest.NewLogger(t)
options.LogSigning = true
})
credAdapter := v4a.SymmetricCredentialAdaptor{
SymmetricProvider: credentials.NewStaticCredentialsProvider(accessKeyID, secretKey, ""),
}
bodyStr := `
1b;chunk-signature=3045022100b63692a1b20759bdabd342011823427a8952df75c93174d98ad043abca8052e002201695228a91ba986171b8d0ad20856d3d94ca3614d0a90a50a531ba8e52447b9b**
Testing with the {sdk-java}
0;chunk-signature=30440220455885a2d4e9f705256ca6b0a5a22f7f784780ccbd1c0a371e5db3059c91745b022073259dd44746cbd63261d628a04d25be5a32a974c077c5c2d83c8157fb323b9f****
`
body := bytes.NewBufferString(bodyStr)
req, err := http.NewRequest("PUT", "http://localhost:8084/test/tmp", body)
require.NoError(t, err)
req.Header.Set("Amz-Sdk-Invocation-Id", "ca3a3cde-7d26-fce6-ed9c-82f7a0573824")
req.Header.Set("Amz-Sdk-Request", "attempt=2; max=2")
req.Header.Set("Authorization", "AWS4-ECDSA-P256-SHA256 Credential=2XEbqH4M3ym7a3E3esxfZ2gRLnMwDXrCN4y1SkQg5fHa09sThVmVL3EE6xeKsyMzaqu5jPi41YCaVbnwbwCTF3bx1/20240904/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-region-set, Signature=30440220574244c5ff5deba388c4e3b0541a42113179b6839b3e6b4212d255a118fa9089022056f7b9b72c93f67dbcd25fe9ca67950b5913fc00bb7a62bc276c21e828c0b6c7")
req.Header.Set("Content-Length", "360")
req.Header.Set("Content-Type", "text/plain; charset=UTF-8")
req.Header.Set("X-Amz-Content-Sha256", "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD")
req.Header.Set("X-Amz-Date", "20240904T133253Z")
req.Header.Set("X-Amz-Decoded-Content-Length", "27")
req.Header.Set("X-Amz-Region-Set", "us-east-1")
service := "s3"
regionSet := []string{"us-east-1"}
signature := "30440220574244c5ff5deba388c4e3b0541a42113179b6839b3e6b4212d255a118fa9089022056f7b9b72c93f67dbcd25fe9ca67950b5913fc00bb7a62bc276c21e828c0b6c7"
signingTime, err := time.Parse("20060102T150405Z", "20240904T133253Z")
require.NoError(t, err)
creds, err := credAdapter.RetrievePrivateKey(req.Context())
require.NoError(t, err)
err = signer.VerifySignature(creds, req, "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD", service, regionSet, signingTime, signature)
require.NoError(t, err)
}
func TestSignatureV4(t *testing.T) {
signer := v4.NewSigner(func(options *v4.SignerOptions) {
options.DisableURIPathEscaping = true
options.Logger = zaptest.NewLogger(t)
options.LogSigning = true
})
creds := aws.Credentials{
AccessKeyID: "9CBEGH8T9XfLin2pg7LG8ZxBH1PnZc1yoioViKngrUnu0CbC2mcjpcw9t4Y7AS6zsF5cJGkDhXAx5hxFDKwfZzgj7",
SecretAccessKey: "8742218da7f905de24f633f44efe02f82c6d2a317ed6f99592627215d17816e3",
}
bodyStr := `tmp2
`
body := bytes.NewBufferString(bodyStr)
req, err := http.NewRequest("PUT", "http://localhost:8084/main/tmp2", body)
require.NoError(t, err)
req.Header.Set("Authorization", "AWS4-HMAC-SHA256 Credential=9CBEGH8T9XfLin2pg7LG8ZxBH1PnZc1yoioViKngrUnu0CbC2mcjpcw9t4Y7AS6zsF5cJGkDhXAx5hxFDKwfZzgj7/20241210/ru/s3/aws4_request, SignedHeaders=content-md5;host;x-amz-content-sha256;x-amz-date, Signature=945664a5bccfd37a1167ca5e718e2b883f68a7ccf7f1044768e7fe58b737b7ed")
req.Header.Set("Content-Length", "5")
req.Header.Set("User-Agent", "aws-cli/2.13.2 Python/3.11.4 Linux/6.4.5-x64v1-xanmod1 exe/x86_64.debian.11 prompt/off command/s3api.put-object")
req.Header.Set("Content-MD5", "DstU4KxdzBj5jTGltfyqgA==")
req.Header.Set("Expect", "101-continue")
req.Header.Set("X-Amz-Content-Sha256", "1f9b7417ee5445c41dbe904c3651eb0ba1c12fecff16c1bccd8df3db6e390b5f")
req.Header.Set("X-Amz-Date", "20241210T114611Z")
service := "s3"
region := "ru"
signature := "945664a5bccfd37a1167ca5e718e2b883f68a7ccf7f1044768e7fe58b737b7ed"
signingTime, err := time.Parse("20060102T150405Z", "20241210T114611Z")
require.NoError(t, err)
cloned := cloneRequest(req, &AuthHeader{SignedFields: []string{"content-md5", "host", "x-amz-content-sha256", "x-amz-date"}})
err = signer.SignHTTP(cloned.Context(), creds, cloned, "1f9b7417ee5445c41dbe904c3651eb0ba1c12fecff16c1bccd8df3db6e390b5f", service, region, signingTime)
require.NoError(t, err)
signatureComputed := NewRegexpMatcher(AuthorizationFieldRegexp).GetSubmatches(cloned.Header.Get(AuthorizationHdr))["v4_signature"]
require.Equal(t, signature, signatureComputed, "signature mismatched")
}
func TestCheckFormatContentSHA256(t *testing.T) {
defaultErr := errors.GetAPIError(errors.ErrContentSHA256Mismatch)
for _, tc := range []struct {
name string
hash string
error error
}{
{
name: "invalid hash format: length and character",
hash: "invalid-hash",
error: defaultErr,
},
{
name: "invalid hash format: length (63 characters)",
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f7",
error: defaultErr,
},
{
name: "invalid hash format: character",
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f7s",
error: defaultErr,
},
{
name: "invalid hash format: hash size",
hash: "5aadb45520dcd8726b2822a7a78bb53d794f557199d5d4abdedd2c55a4bd6ca73607605c558de3db80c8e86c3196484566163ed1327e82e8b6757d1932113cb8",
error: defaultErr,
},
{
name: "unsigned payload",
hash: "UNSIGNED-PAYLOAD",
error: nil,
},
{
name: "no hash",
hash: "",
error: nil,
},
{
name: "correct hash format",
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73",
error: nil,
},
} {
t.Run(tc.name, func(t *testing.T) {
err := checkFormatHashContentSHA256(tc.hash)
require.ErrorIs(t, err, tc.error)
})
}
}
type frostFSMock struct {
objects map[string]*object.Object
}
func newFrostFSMock() *frostFSMock {
return &frostFSMock{
objects: map[string]*object.Object{},
}
}
func (f *frostFSMock) GetCredsObject(_ context.Context, prm tokens.PrmGetCredsObject) (*object.Object, error) {
obj, ok := f.objects[prm.AccessKeyID]
if !ok {
return nil, fmt.Errorf("not found")
}
return obj, nil
}
func (f *frostFSMock) CreateObject(context.Context, tokens.PrmObjectCreate) (oid.ID, error) {
return oid.ID{}, fmt.Errorf("the mock method is not implemented")
}
func TestAuthenticate(t *testing.T) {
ctx := context.Background()
key, err := keys.NewPrivateKey()
require.NoError(t, err)
cfg := &cache.Config{
Size: 10,
Lifetime: 24 * time.Hour,
Logger: zaptest.NewLogger(t),
}
gateData := []*accessbox.GateData{{
BearerToken: &bearer.Token{},
GateKey: key.PublicKey(),
}}
accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"), false)
require.NoError(t, err)
data, err := accessBox.Marshal()
require.NoError(t, err)
var obj object.Object
obj.SetPayload(data)
addr := oidtest.Address()
obj.SetContainerID(addr.Container())
obj.SetID(addr.Object())
accessKeyID := getAccessKeyID(addr)
frostfs := newFrostFSMock()
frostfs.objects[accessKeyID] = &obj
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secret.SecretKey}
defaultSigner := v4.NewSigner()
service, region := "s3", "default"
invalidValue := "invalid-value"
bigConfig := tokens.Config{
FrostFS: frostfs,
Key: key,
CacheConfig: cfg,
}
for _, tc := range []struct {
name string
prefixes []string
request *http.Request
err bool
errCode errors.ErrorCode
}{
{
name: "valid sign",
prefixes: []string{addr.Container().String()},
request: func() *http.Request {
r := httptest.NewRequest(http.MethodPost, "/", nil)
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
require.NoError(t, err)
return r
}(),
},
{
name: "no authorization header",
request: func() *http.Request {
return httptest.NewRequest(http.MethodPost, "/", nil)
}(),
err: true,
},
{
name: "invalid authorization header",
request: func() *http.Request {
r := httptest.NewRequest(http.MethodPost, "/", nil)
r.Header.Set(AuthorizationHdr, invalidValue)
return r
}(),
err: true,
errCode: errors.ErrAuthorizationHeaderMalformed,
},
{
name: "invalid access key id format",
request: func() *http.Request {
r := httptest.NewRequest(http.MethodPost, "/", nil)
cred := aws.Credentials{AccessKeyID: addr.Object().String(), SecretAccessKey: secret.SecretKey}
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
require.NoError(t, err)
return r
}(),
err: true,
errCode: errors.ErrInvalidAccessKeyID,
},
{
name: "not allowed access key id",
prefixes: []string{addr.Object().String()},
request: func() *http.Request {
r := httptest.NewRequest(http.MethodPost, "/", nil)
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
require.NoError(t, err)
return r
}(),
err: true,
errCode: errors.ErrAccessDenied,
},
{
name: "invalid access key id value",
request: func() *http.Request {
r := httptest.NewRequest(http.MethodPost, "/", nil)
cred := aws.Credentials{AccessKeyID: accessKeyID[:len(accessKeyID)-4], SecretAccessKey: secret.SecretKey}
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
require.NoError(t, err)
return r
}(),
err: true,
errCode: errors.ErrInvalidAccessKeyID,
},
{
name: "unknown access key id",
request: func() *http.Request {
r := httptest.NewRequest(http.MethodPost, "/", nil)
cred := aws.Credentials{AccessKeyID: addr.Object().String() + "0" + addr.Container().String(), SecretAccessKey: secret.SecretKey}
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
require.NoError(t, err)
return r
}(),
err: true,
},
{
name: "invalid signature",
request: func() *http.Request {
r := httptest.NewRequest(http.MethodPost, "/", nil)
cred := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: "secret"}
err = v4.NewSigner().SignHTTP(ctx, cred, r, "", service, region, time.Now())
require.NoError(t, err)
return r
}(),
err: true,
errCode: errors.ErrSignatureDoesNotMatch,
},
{
name: "invalid signature - AmzDate",
prefixes: []string{addr.Container().String()},
request: func() *http.Request {
r := httptest.NewRequest(http.MethodPost, "/", nil)
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
r.Header.Set(AmzDate, invalidValue)
require.NoError(t, err)
return r
}(),
err: true,
},
{
name: "invalid AmzContentSHA256",
prefixes: []string{addr.Container().String()},
request: func() *http.Request {
r := httptest.NewRequest(http.MethodPost, "/", nil)
err = defaultSigner.SignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
r.Header.Set(AmzContentSHA256, invalidValue)
require.NoError(t, err)
return r
}(),
err: true,
},
{
name: "valid presign",
request: func() *http.Request {
r := httptest.NewRequest(http.MethodPost, "/", nil)
r.Header.Set(AmzExpires, "60")
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, "", service, region, time.Now())
require.NoError(t, err)
r.URL, err = url.ParseRequestURI(signedURI)
require.NoError(t, err)
return r
}(),
},
{
name: "presign, bad X-Amz-Credential",
request: func() *http.Request {
r := httptest.NewRequest(http.MethodPost, "/", nil)
query := url.Values{
AmzAlgorithm: []string{"AWS4-HMAC-SHA256"},
AmzCredential: []string{invalidValue},
}
r.URL.RawQuery = query.Encode()
return r
}(),
err: true,
},
{
name: "presign, bad X-Amz-Expires",
request: func() *http.Request {
r := httptest.NewRequest(http.MethodPost, "/", nil)
r.Header.Set(AmzExpires, invalidValue)
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, UnsignedPayload, service, region, time.Now())
require.NoError(t, err)
r.URL, err = url.ParseRequestURI(signedURI)
require.NoError(t, err)
return r
}(),
err: true,
errCode: errors.ErrMalformedExpires,
},
{
name: "presign, expired",
request: func() *http.Request {
r := httptest.NewRequest(http.MethodPost, "/", nil)
r.Header.Set(AmzExpires, "60")
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, UnsignedPayload, service, region, time.Now().Add(-time.Minute))
require.NoError(t, err)
r.URL, err = url.ParseRequestURI(signedURI)
require.NoError(t, err)
return r
}(),
err: true,
errCode: errors.ErrExpiredPresignRequest,
},
{
name: "presign, signature from future",
request: func() *http.Request {
r := httptest.NewRequest(http.MethodPost, "/", nil)
r.Header.Set(AmzExpires, "60")
signedURI, _, err := defaultSigner.PresignHTTP(ctx, awsCreds, r, UnsignedPayload, service, region, time.Now().Add(time.Minute))
require.NoError(t, err)
r.URL, err = url.ParseRequestURI(signedURI)
require.NoError(t, err)
return r
}(),
err: true,
errCode: errors.ErrBadRequest,
},
} {
t.Run(tc.name, func(t *testing.T) {
creds := tokens.New(bigConfig)
cntr := New(creds, tc.prefixes, &centerSettingsMock{})
box, err := cntr.Authenticate(tc.request)
if tc.err {
require.Error(t, err)
if tc.errCode > 0 {
err = frosterr.UnwrapErr(err)
require.Equal(t, errors.GetAPIError(tc.errCode), err)
}
} else {
require.NoError(t, err)
require.Equal(t, accessKeyID, box.AuthHeaders.AccessKeyID)
require.Equal(t, region, box.AuthHeaders.Region)
require.Equal(t, secret.SecretKey, box.AccessBox.Gate.SecretKey)
}
})
}
}
func TestHTTPPostAuthenticate(t *testing.T) {
const (
policyBase64 = "eyJleHBpcmF0aW9uIjogIjIwMjUtMTItMDFUMTI6MDA6MDAuMDAwWiIsImNvbmRpdGlvbnMiOiBbCiBbInN0YXJ0cy13aXRoIiwgIiR4LWFtei1jcmVkZW50aWFsIiwgIiJdLAogWyJzdGFydHMtd2l0aCIsICIkeC1hbXotZGF0ZSIsICIiXQpdfQ=="
invalidValue = "invalid-value"
defaultFieldName = "file"
service = "s3"
region = "default"
)
key, err := keys.NewPrivateKey()
require.NoError(t, err)
cfg := &cache.Config{
Size: 10,
Lifetime: 24 * time.Hour,
Logger: zaptest.NewLogger(t),
}
gateData := []*accessbox.GateData{{
BearerToken: &bearer.Token{},
GateKey: key.PublicKey(),
}}
accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"), false)
require.NoError(t, err)
data, err := accessBox.Marshal()
require.NoError(t, err)
var obj object.Object
obj.SetPayload(data)
addr := oidtest.Address()
obj.SetContainerID(addr.Container())
obj.SetID(addr.Object())
accessKeyID := getAccessKeyID(addr)
frostfs := newFrostFSMock()
frostfs.objects[accessKeyID] = &obj
invalidAccessKeyID := oidtest.Address().String() + "0" + oidtest.Address().Object().String()
timeToSign := time.Now()
timeToSignStr := timeToSign.Format("20060102T150405Z")
bigConfig := tokens.Config{
FrostFS: frostfs,
Key: key,
CacheConfig: cfg,
}
for _, tc := range []struct {
name string
prefixes []string
request *http.Request
err bool
errCode errors.ErrorCode
}{
{
name: "HTTP POST valid",
request: func() *http.Request {
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
}(),
},
{
name: "HTTP POST valid with custom field name",
request: func() *http.Request {
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, "files")
}(),
},
{
name: "HTTP POST valid with field name with a capital letter",
request: func() *http.Request {
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, "File")
}(),
},
{
name: "HTTP POST invalid multipart form",
request: func() *http.Request {
req := httptest.NewRequest(http.MethodPost, "/", nil)
req.Header.Set(ContentTypeHdr, "multipart/form-data")
return req
}(),
err: true,
errCode: errors.ErrInvalidArgument,
},
{
name: "HTTP POST invalid signature date time",
request: func() *http.Request {
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
return getRequestWithMultipartForm(t, policyBase64, creds, invalidValue, sign, defaultFieldName)
}(),
err: true,
},
{
name: "HTTP POST invalid creds",
request: func() *http.Request {
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
return getRequestWithMultipartForm(t, policyBase64, invalidValue, timeToSignStr, sign, defaultFieldName)
}(),
err: true,
errCode: errors.ErrAuthorizationHeaderMalformed,
},
{
name: "HTTP POST missing policy",
request: func() *http.Request {
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
return getRequestWithMultipartForm(t, "", creds, timeToSignStr, sign, defaultFieldName)
}(),
err: true,
},
{
name: "HTTP POST invalid accessKeyId",
request: func() *http.Request {
creds := getCredsStr(invalidValue, timeToSignStr, region, service)
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
}(),
err: true,
},
{
name: "HTTP POST invalid accessKeyId - a non-existent box",
request: func() *http.Request {
creds := getCredsStr(invalidAccessKeyID, timeToSignStr, region, service)
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
}(),
err: true,
},
{
name: "HTTP POST invalid signature",
request: func() *http.Request {
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
sign := SignStr(secret.SecretKey, service, region, timeToSign, invalidValue)
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
}(),
err: true,
errCode: errors.ErrSignatureDoesNotMatch,
},
} {
t.Run(tc.name, func(t *testing.T) {
creds := tokens.New(bigConfig)
cntr := New(creds, tc.prefixes, &centerSettingsMock{})
box, err := cntr.Authenticate(tc.request)
if tc.err {
require.Error(t, err)
if tc.errCode > 0 {
err = frosterr.UnwrapErr(err)
require.Equal(t, errors.GetAPIError(tc.errCode), err)
}
} else {
require.NoError(t, err)
require.Equal(t, secret.SecretKey, box.AccessBox.Gate.SecretKey)
require.Equal(t, accessKeyID, box.AuthHeaders.AccessKeyID)
}
})
}
}
func getCredsStr(accessKeyID, timeToSign, region, service string) string {
return accessKeyID + "/" + timeToSign + "/" + region + "/" + service + "/aws4_request"
}
func getRequestWithMultipartForm(t *testing.T, policy, creds, date, sign, fieldName string) *http.Request {
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
defer writer.Close()
err := writer.WriteField("policy", policy)
require.NoError(t, err)
err = writer.WriteField(AmzCredential, creds)
require.NoError(t, err)
err = writer.WriteField(AmzDate, date)
require.NoError(t, err)
err = writer.WriteField(AmzSignature, sign)
require.NoError(t, err)
_, err = writer.CreateFormFile(fieldName, "test.txt")
require.NoError(t, err)
req := httptest.NewRequest(http.MethodPost, "/", body)
req.Header.Set(ContentTypeHdr, writer.FormDataContentType())
return req
}
func getAccessKeyID(addr oid.Address) string {
return strings.ReplaceAll(addr.EncodeToString(), "/", "0")
}

102
api/auth/presign.go Normal file
View file

@ -0,0 +1,102 @@
package auth
import (
"context"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/smithy/encoding/httpbinding"
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/v4"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/credentials"
"go.uber.org/zap"
)
type RequestData struct {
Method string
Endpoint string
Bucket string
Object string
}
type PresignData struct {
Service string
Region string
Lifetime time.Duration
SignTime time.Time
Headers map[string]string
}
// PresignRequest forms pre-signed request to access objects without aws credentials.
func PresignRequest(ctx context.Context, creds aws.Credentials, reqData RequestData, presignData PresignData, log *zap.Logger) (*http.Request, error) {
urlStr := fmt.Sprintf("%s/%s/%s", reqData.Endpoint, httpbinding.EscapePath(reqData.Bucket, false), httpbinding.EscapePath(reqData.Object, false))
req, err := http.NewRequest(strings.ToUpper(reqData.Method), urlStr, nil)
if err != nil {
return nil, fmt.Errorf("failed to create new request: %w", err)
}
for k, v := range presignData.Headers {
req.Header.Set(k, v) // maybe we should filter system header (or keep responsibility on caller)
}
req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z"))
req.Header.Set(AmzExpires, strconv.FormatFloat(presignData.Lifetime.Round(time.Second).Seconds(), 'f', 0, 64))
signer := v4.NewSigner(func(options *v4.SignerOptions) {
options.DisableURIPathEscaping = true
options.LogSigning = true
options.Logger = log
})
signedURI, _, err := signer.PresignHTTP(ctx, creds, req, presignData.Headers[AmzContentSHA256], presignData.Service, presignData.Region, presignData.SignTime)
if err != nil {
return nil, fmt.Errorf("presign: %w", err)
}
if req.URL, err = url.ParseRequestURI(signedURI); err != nil {
return nil, fmt.Errorf("parse signed URI: %w", err)
}
return req, nil
}
// PresignRequestV4a forms pre-signed request to access objects without aws credentials.
func PresignRequestV4a(cred aws.Credentials, reqData RequestData, presignData PresignData, log *zap.Logger) (*http.Request, error) {
urlStr := fmt.Sprintf("%s/%s/%s", reqData.Endpoint, httpbinding.EscapePath(reqData.Bucket, false), httpbinding.EscapePath(reqData.Object, false))
req, err := http.NewRequest(strings.ToUpper(reqData.Method), urlStr, nil)
if err != nil {
return nil, fmt.Errorf("failed to create new request: %w", err)
}
for k, v := range presignData.Headers {
req.Header.Set(k, v) // maybe we should filter system header (or keep responsibility on caller)
}
req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z"))
req.Header.Set(AmzExpires, strconv.FormatFloat(presignData.Lifetime.Round(time.Second).Seconds(), 'f', 0, 64))
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
options.DisableURIPathEscaping = true
options.LogSigning = true
options.Logger = log
})
credAdapter := v4a.SymmetricCredentialAdaptor{
SymmetricProvider: credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, ""),
}
creds, err := credAdapter.RetrievePrivateKey(req.Context())
if err != nil {
return nil, fmt.Errorf("failed to derive assymetric key from credentials: %w", err)
}
presignedURL, _, err := signer.PresignHTTP(req.Context(), creds, req, presignData.Headers[AmzContentSHA256], presignData.Service, []string{presignData.Region}, presignData.SignTime)
if err != nil {
return nil, fmt.Errorf("presign: %w", err)
}
return http.NewRequest(reqData.Method, presignedURL, nil)
}

202
api/auth/presign_test.go Normal file
View file

@ -0,0 +1,202 @@
package auth
import (
"context"
"fmt"
"net/http"
"strings"
"testing"
"time"
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/aws/aws-sdk-go-v2/aws"
credentialsv2 "github.com/aws/aws-sdk-go-v2/credentials"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
)
var _ tokens.Credentials = (*credentialsMock)(nil)
type credentialsMock struct {
boxes map[string]*accessbox.Box
}
func newTokensFrostfsMock() *credentialsMock {
return &credentialsMock{
boxes: make(map[string]*accessbox.Box),
}
}
func (m credentialsMock) addBox(addr oid.Address, box *accessbox.Box) {
m.boxes[getAccessKeyID(addr)] = box
}
func (m credentialsMock) GetBox(_ context.Context, _ cid.ID, accessKeyID string) (*accessbox.Box, []object.Attribute, error) {
box, ok := m.boxes[accessKeyID]
if !ok {
return nil, nil, &apistatus.ObjectNotFound{}
}
return box, nil, nil
}
func (m credentialsMock) Put(context.Context, tokens.CredentialsParam) (oid.Address, error) {
return oid.Address{}, nil
}
func (m credentialsMock) Update(context.Context, tokens.CredentialsParam) (oid.Address, error) {
return oid.Address{}, nil
}
func TestCheckSign(t *testing.T) {
ctx := context.Background()
var accessKeyAddr oid.Address
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
require.NoError(t, err)
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secretKey}
reqData := RequestData{
Method: "GET",
Endpoint: "http://localhost:8084",
Bucket: "my-bucket",
Object: "@obj/name",
}
presignData := PresignData{
Service: "s3",
Region: "spb",
Lifetime: 10 * time.Minute,
SignTime: time.Now().UTC(),
Headers: map[string]string{
ContentTypeHdr: "text/plain",
AmzContentSHA256: UnsignedPayload,
},
}
req, err := PresignRequest(ctx, awsCreds, reqData, presignData, zaptest.NewLogger(t))
require.NoError(t, err)
expBox := &accessbox.Box{
Gate: &accessbox.GateData{
SecretKey: secretKey,
},
}
mock := newTokensFrostfsMock()
mock.addBox(accessKeyAddr, expBox)
c := &Center{
cli: mock,
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
settings: &centerSettingsMock{},
}
box, err := c.Authenticate(req)
require.NoError(t, err)
require.EqualValues(t, expBox, box.AccessBox)
}
func TestCheckSignV4a(t *testing.T) {
var accessKeyAddr oid.Address
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
require.NoError(t, err)
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
awsCreds := aws.Credentials{AccessKeyID: accessKeyID, SecretAccessKey: secretKey}
reqData := RequestData{
Method: "GET",
Endpoint: "http://localhost:8084",
Bucket: "my-bucket",
Object: "@obj/name",
}
presignData := PresignData{
Service: "s3",
Region: "spb",
Lifetime: 10 * time.Minute,
SignTime: time.Now().UTC(),
Headers: map[string]string{
ContentTypeHdr: "text/plain",
},
}
req, err := PresignRequestV4a(awsCreds, reqData, presignData, zaptest.NewLogger(t))
require.NoError(t, err)
req.Header.Set(ContentTypeHdr, "text/plain")
expBox := &accessbox.Box{
Gate: &accessbox.GateData{
SecretKey: secretKey,
},
}
mock := newTokensFrostfsMock()
mock.addBox(accessKeyAddr, expBox)
c := &Center{
cli: mock,
regV4a: NewRegexpMatcher(authorizationFieldV4aRegexp),
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
}
box, err := c.Authenticate(req)
require.NoError(t, err)
require.EqualValues(t, expBox, box.AccessBox)
}
func TestPresignRequestV4a(t *testing.T) {
var accessKeyAddr oid.Address
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
require.NoError(t, err)
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
signer := v4a.NewSigner(func(options *v4a.SignerOptions) {
options.DisableURIPathEscaping = true
options.LogSigning = true
options.Logger = zaptest.NewLogger(t)
})
credAdapter := v4a.SymmetricCredentialAdaptor{
SymmetricProvider: credentialsv2.NewStaticCredentialsProvider(accessKeyID, secretKey, ""),
}
creds, err := credAdapter.RetrievePrivateKey(context.TODO())
require.NoError(t, err)
signingTime := time.Now()
service := "s3"
regionSet := []string{"spb"}
req, err := http.NewRequest("GET", "http://localhost:8084/bucket/object", nil)
require.NoError(t, err)
req.Header.Set(AmzExpires, "600")
presignedURL, hdr, err := signer.PresignHTTP(req.Context(), creds, req, "", service, regionSet, signingTime)
require.NoError(t, err)
fmt.Println(presignedURL)
fmt.Println(hdr)
signature := req.URL.Query().Get(AmzSignature)
r, err := http.NewRequest("GET", presignedURL, nil)
require.NoError(t, err)
query := r.URL.Query()
query.Del(AmzSignature)
r.URL.RawQuery = query.Encode()
err = signer.VerifyPresigned(creds, r, "", service, regionSet, signingTime, signature)
require.NoError(t, err)
}

View file

@ -0,0 +1,37 @@
// This file is part of https://github.com/aws/smithy-go/blob/f0c6adfdec6e40bb8bb2920a40d016943b4ad762/encoding/httpbinding/path_replace.go
package httpbinding
import (
"bytes"
"fmt"
)
// EscapePath escapes part of a URL path in Amazon style.
func EscapePath(path string, encodeSep bool) string {
var buf bytes.Buffer
for i := 0; i < len(path); i++ {
c := path[i]
if noEscape[c] || (c == '/' && !encodeSep) {
buf.WriteByte(c)
} else {
fmt.Fprintf(&buf, "%%%02X", c)
}
}
return buf.String()
}
var noEscape [256]bool
func init() {
for i := 0; i < len(noEscape); i++ {
// AWS expects every character except these to be escaped
noEscape[i] = (i >= 'A' && i <= 'Z') ||
(i >= 'a' && i <= 'z') ||
(i >= '0' && i <= '9') ||
i == '-' ||
i == '.' ||
i == '_' ||
i == '~'
}
}

View file

@ -1,87 +0,0 @@
package v4
import (
"strings"
)
// validator houses a set of rule needed for validation of a
// string value.
type rules []rule
// rule interface allows for more flexible rules and just simply
// checks whether or not a value adheres to that rule.
type rule interface {
IsValid(value string) bool
}
// IsValid will iterate through all rules and see if any rules
// apply to the value and supports nested rules.
func (r rules) IsValid(value string) bool {
for _, rule := range r {
if rule.IsValid(value) {
return true
}
}
return false
}
// mapRule generic rule for maps.
type mapRule map[string]struct{}
// IsValid for the map rule satisfies whether it exists in the map.
func (m mapRule) IsValid(value string) bool {
_, ok := m[value]
return ok
}
// whitelist is a generic rule for whitelisting.
type whitelist struct {
rule
}
// IsValid for whitelist checks if the value is within the whitelist.
func (w whitelist) IsValid(value string) bool {
return w.rule.IsValid(value)
}
// blacklist is a generic rule for blacklisting.
type blacklist struct {
rule
}
// IsValid for whitelist checks if the value is within the whitelist.
func (b blacklist) IsValid(value string) bool {
return !b.rule.IsValid(value)
}
type patterns []string
// IsValid for patterns checks each pattern and returns if a match has
// been found.
func (p patterns) IsValid(value string) bool {
for _, pattern := range p {
if HasPrefixFold(value, pattern) {
return true
}
}
return false
}
// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
// under Unicode case-folding.
func HasPrefixFold(s, prefix string) bool {
return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
}
// inclusiveRules rules allow for rules to depend on one another.
type inclusiveRules []rule
// IsValid will return true if all rules are true.
func (r inclusiveRules) IsValid(value string) bool {
for _, rule := range r {
if !rule.IsValid(value) {
return false
}
}
return true
}

View file

@ -1,7 +0,0 @@
package v4
// WithUnsignedPayload will enable and set the UnsignedPayload field to
// true of the signer.
func WithUnsignedPayload(v4 *Signer) {
v4.UnsignedPayload = true
}

View file

@ -1,14 +0,0 @@
//go:build go1.7
// +build go1.7
package v4
import (
"net/http"
"github.com/aws/aws-sdk-go/aws"
)
func requestContext(r *http.Request) aws.Context {
return r.Context()
}

View file

@ -1,63 +0,0 @@
package v4
import (
"encoding/hex"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/credentials"
)
type credentialValueProvider interface {
Get() (credentials.Value, error)
}
// StreamSigner implements signing of event stream encoded payloads.
type StreamSigner struct {
region string
service string
credentials credentialValueProvider
prevSig []byte
}
// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages.
func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner {
return &StreamSigner{
region: region,
service: service,
credentials: credentials,
prevSig: seedSignature,
}
}
// GetSignature takes an event stream encoded headers and payload and returns a signature.
func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) {
credValue, err := s.credentials.Get()
if err != nil {
return nil, err
}
sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date)
keyPath := buildSigningScope(s.region, s.service, date)
stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date)
signature := hmacSHA256(sigKey, []byte(stringToSign))
s.prevSig = signature
return signature, nil
}
func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string {
return strings.Join([]string{
"AWS4-HMAC-SHA256-PAYLOAD",
formatTime(date),
scope,
hex.EncodeToString(prevSig),
hex.EncodeToString(hashSHA256(headers)),
hex.EncodeToString(hashSHA256(payload)),
}, "\n")
}

View file

@ -1,25 +0,0 @@
//go:build go1.5
// +build go1.5
package v4
import (
"net/url"
"strings"
)
func getURIPath(u *url.URL) string {
var uri string
if len(u.Opaque) > 0 {
uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
} else {
uri = u.EscapedPath()
}
if len(uri) == 0 {
uri = "/"
}
return uri
}

View file

@ -1,856 +0,0 @@
// Package v4 implements signing for AWS V4 signer
//
// Provides request signing for request that need to be signed with
// AWS V4 Signatures.
//
// # Standalone Signer
//
// Generally using the signer outside of the SDK should not require any additional
// logic when using Go v1.5 or higher. The signer does this by taking advantage
// of the URL.EscapedPath method. If your request URI requires additional escaping
// you many need to use the URL.Opaque to define what the raw URI should be sent
// to the service as.
//
// The signer will first check the URL.Opaque field, and use its value if set.
// The signer does require the URL.Opaque field to be set in the form of:
//
// "//<hostname>/<path>"
//
// // e.g.
// "//example.com/some/path"
//
// The leading "//" and hostname are required or the URL.Opaque escaping will
// not work correctly.
//
// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
// method and using the returned value. If you're using Go v1.4 you must set
// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
// Go v1.5 the signer will fallback to URL.Path.
//
// AWS v4 signature validation requires that the canonical string's URI path
// element must be the URI escaped form of the HTTP request's path.
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
//
// The Go HTTP client will perform escaping automatically on the request. Some
// of these escaping may cause signature validation errors because the HTTP
// request differs from the URI path or query that the signature was generated.
// https://golang.org/pkg/net/url/#URL.EscapedPath
//
// Because of this, it is recommended that when using the signer outside of the
// SDK that explicitly escaping the request prior to being signed is preferable,
// and will help prevent signature validation errors. This can be done by setting
// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
// call URL.EscapedPath() if Opaque is not set.
//
// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
// request URL. https://github.com/golang/go/issues/16847 points to a bug in
// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP
// message. URL.Opaque generally will force Go to make requests with absolute URL.
// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
// or url.EscapedPath will ignore the RawPath escaping.
//
// Test `TestStandaloneSign` provides a complete example of using the signer
// outside of the SDK and pre-escaping the URI path.
package v4
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/rest"
)
const (
authorizationHeader = "Authorization"
authHeaderSignatureElem = "Signature="
signatureQueryKey = "X-Amz-Signature"
authHeaderPrefix = "AWS4-HMAC-SHA256"
timeFormat = "20060102T150405Z"
shortTimeFormat = "20060102"
awsV4Request = "aws4_request"
// emptyStringSHA256 is a SHA256 of an empty string.
emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
)
var ignoredPresignHeaders = rules{
blacklist{
mapRule{
authorizationHeader: struct{}{},
"User-Agent": struct{}{},
"X-Amzn-Trace-Id": struct{}{},
},
},
}
// drop User-Agent header to be compatible with aws sdk java v1.
var ignoredHeaders = rules{
blacklist{
mapRule{
authorizationHeader: struct{}{},
"X-Amzn-Trace-Id": struct{}{},
},
},
}
// requiredSignedHeaders is a whitelist for build canonical headers.
var requiredSignedHeaders = rules{
whitelist{
mapRule{
"Cache-Control": struct{}{},
"Content-Disposition": struct{}{},
"Content-Encoding": struct{}{},
"Content-Language": struct{}{},
"Content-Md5": struct{}{},
"Content-Type": struct{}{},
"Expires": struct{}{},
"If-Match": struct{}{},
"If-Modified-Since": struct{}{},
"If-None-Match": struct{}{},
"If-Unmodified-Since": struct{}{},
"Range": struct{}{},
"X-Amz-Acl": struct{}{},
"X-Amz-Copy-Source": struct{}{},
"X-Amz-Copy-Source-If-Match": struct{}{},
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
"X-Amz-Copy-Source-If-None-Match": struct{}{},
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
"X-Amz-Copy-Source-Range": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Grant-Full-control": struct{}{},
"X-Amz-Grant-Read": struct{}{},
"X-Amz-Grant-Read-Acp": struct{}{},
"X-Amz-Grant-Write": struct{}{},
"X-Amz-Grant-Write-Acp": struct{}{},
"X-Amz-Metadata-Directive": struct{}{},
"X-Amz-Mfa": struct{}{},
"X-Amz-Request-Payer": struct{}{},
"X-Amz-Server-Side-Encryption": struct{}{},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Storage-Class": struct{}{},
"X-Amz-Tagging": struct{}{},
"X-Amz-Website-Redirect-Location": struct{}{},
"X-Amz-Content-Sha256": struct{}{},
},
},
patterns{"X-Amz-Meta-"},
}
// allowedHoisting is a whitelist for build query headers. The boolean value
// represents whether or not it is a pattern.
var allowedQueryHoisting = inclusiveRules{
blacklist{requiredSignedHeaders},
patterns{"X-Amz-"},
}
// Signer applies AWS v4 signing to given request. Use this to sign requests
// that need to be signed with AWS V4 Signatures.
type Signer struct {
// The authentication credentials the request will be signed against.
// This value must be set to sign requests.
Credentials *credentials.Credentials
// Sets the log level the signer should use when reporting information to
// the logger. If the logger is nil nothing will be logged. See
// aws.LogLevelType for more information on available logging levels
//
// By default nothing will be logged.
Debug aws.LogLevelType
// The logger loging information will be written to. If there the logger
// is nil, nothing will be logged.
Logger aws.Logger
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
// request header to the request's query string. This is most commonly used
// with pre-signed requests preventing headers from being added to the
// request's query string.
DisableHeaderHoisting bool
// Disables the automatic escaping of the URI path of the request for the
// siganture's canonical string's path. For services that do not need additional
// escaping then use this to disable the signer escaping the path.
//
// S3 is an example of a service that does not need additional escaping.
//
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
DisableURIPathEscaping bool
// Disables the automatical setting of the HTTP request's Body field with the
// io.ReadSeeker passed in to the signer. This is useful if you're using a
// custom wrapper around the body for the io.ReadSeeker and want to preserve
// the Body value on the Request.Body.
//
// This does run the risk of signing a request with a body that will not be
// sent in the request. Need to ensure that the underlying data of the Body
// values are the same.
DisableRequestBodyOverwrite bool
// currentTimeFn returns the time value which represents the current time.
// This value should only be used for testing. If it is nil the default
// time.Now will be used.
currentTimeFn func() time.Time
// UnsignedPayload will prevent signing of the payload. This will only
// work for services that have support for this.
UnsignedPayload bool
}
// NewSigner returns a Signer pointer configured with the credentials and optional
// option values provided. If not options are provided the Signer will use its
// default configuration.
func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
v4 := &Signer{
Credentials: credentials,
}
for _, option := range options {
option(v4)
}
return v4
}
type signingCtx struct {
ServiceName string
Region string
Request *http.Request
Body io.ReadSeeker
Query url.Values
Time time.Time
ExpireTime time.Duration
SignedHeaderVals http.Header
DisableURIPathEscaping bool
credValues credentials.Value
isPresign bool
unsignedPayload bool
bodyDigest string
signedHeaders string
canonicalHeaders string
canonicalString string
credentialString string
stringToSign string
signature string
}
// Sign signs AWS v4 requests with the provided body, service name, region the
// request is made to, and time the request is signed at. The signTime allows
// you to specify that a request is signed for the future, and cannot be
// used until then.
//
// Returns a list of HTTP headers that were included in the signature or an
// error if signing the request failed. Generally for signed requests this value
// is not needed as the full request context will be captured by the http.Request
// value. It is included for reference though.
//
// Sign will set the request's Body to be the `body` parameter passed in. If
// the body is not already an io.ReadCloser, it will be wrapped within one. If
// a `nil` body parameter passed to Sign, the request's Body field will be
// also set to nil. Its important to note that this functionality will not
// change the request's ContentLength of the request.
//
// Sign differs from Presign in that it will sign the request using HTTP
// header values. This type of signing is intended for http.Request values that
// will not be shared, or are shared in a way the header values on the request
// will not be lost.
//
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
// generated. To bypass the signer computing the hash you can set the
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
// only compute the hash if the request header value is empty.
func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
return v4.signWithBody(r, body, service, region, 0, false, signTime)
}
// Presign signs AWS v4 requests with the provided body, service name, region
// the request is made to, and time the request is signed at. The signTime
// allows you to specify that a request is signed for the future, and cannot
// be used until then.
//
// Returns a list of HTTP headers that were included in the signature or an
// error if signing the request failed. For presigned requests these headers
// and their values must be included on the HTTP request when it is made. This
// is helpful to know what header values need to be shared with the party the
// presigned request will be distributed to.
//
// Presign differs from Sign in that it will sign the request using query string
// instead of header values. This allows you to share the Presigned Request's
// URL with third parties, or distribute it throughout your system with minimal
// dependencies.
//
// Presign also takes an exp value which is the duration the
// signed request will be valid after the signing time. This is allows you to
// set when the request will expire.
//
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
// generated. To bypass the signer computing the hash you can set the
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
// only compute the hash if the request header value is empty.
//
// Presigning a S3 request will not compute the body's SHA256 hash by default.
// This is done due to the general use case for S3 presigned URLs is to share
// PUT/GET capabilities. If you would like to include the body's SHA256 in the
// presigned request's signature you can set the "X-Amz-Content-Sha256"
// HTTP header and that will be included in the request's signature.
func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
return v4.signWithBody(r, body, service, region, exp, true, signTime)
}
func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) {
currentTimeFn := v4.currentTimeFn
if currentTimeFn == nil {
currentTimeFn = time.Now
}
ctx := &signingCtx{
Request: r,
Body: body,
Query: r.URL.Query(),
Time: signTime,
ExpireTime: exp,
isPresign: isPresign,
ServiceName: service,
Region: region,
DisableURIPathEscaping: v4.DisableURIPathEscaping,
unsignedPayload: v4.UnsignedPayload,
}
for key := range ctx.Query {
sort.Strings(ctx.Query[key])
}
if ctx.isRequestSigned() {
ctx.Time = currentTimeFn()
ctx.handlePresignRemoval()
}
var err error
ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r))
if err != nil {
return http.Header{}, err
}
ctx.sanitizeHostForHeader()
ctx.assignAmzQueryValues()
if err := ctx.build(v4.DisableHeaderHoisting); err != nil {
return nil, err
}
// If the request is not presigned the body should be attached to it. This
// prevents the confusion of wanting to send a signed request without
// the body the request was signed for attached.
if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
var reader io.ReadCloser
if body != nil {
var ok bool
if reader, ok = body.(io.ReadCloser); !ok {
reader = io.NopCloser(body)
}
}
r.Body = reader
}
if v4.Debug.Matches(aws.LogDebugWithSigning) {
v4.logSigningInfo(ctx)
}
return ctx.SignedHeaderVals, nil
}
func (ctx *signingCtx) sanitizeHostForHeader() {
request.SanitizeHostForHeader(ctx.Request)
}
func (ctx *signingCtx) handlePresignRemoval() {
if !ctx.isPresign {
return
}
// The credentials have expired for this request. The current signing
// is invalid, and needs to be request because the request will fail.
ctx.removePresign()
// Update the request's query string to ensure the values stays in
// sync in the case retrieving the new credentials fails.
ctx.Request.URL.RawQuery = ctx.Query.Encode()
}
func (ctx *signingCtx) assignAmzQueryValues() {
if ctx.isPresign {
ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
if ctx.credValues.SessionToken != "" {
ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
} else {
ctx.Query.Del("X-Amz-Security-Token")
}
return
}
if ctx.credValues.SessionToken != "" {
ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
}
}
// SignRequestHandler is a named request handler the SDK will use to sign
// service client request with using the V4 signature.
var SignRequestHandler = request.NamedHandler{
Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
}
// SignSDKRequest signs an AWS request with the V4 signature. This
// request handler should only be used with the SDK's built in service client's
// API operation requests.
//
// This function should not be used on its on its own, but in conjunction with
// an AWS service client's API operation call. To sign a standalone request
// not created by a service client's API operation method use the "Sign" or
// "Presign" functions of the "Signer" type.
//
// If the credentials of the request's config are set to
// credentials.AnonymousCredentials the request will not be signed.
func SignSDKRequest(req *request.Request) {
SignSDKRequestWithCurrentTime(req, time.Now)
}
// BuildNamedHandler will build a generic handler for signing.
func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler {
return request.NamedHandler{
Name: name,
Fn: func(req *request.Request) {
SignSDKRequestWithCurrentTime(req, time.Now, opts...)
},
}
}
// SignSDKRequestWithCurrentTime will sign the SDK's request using the time
// function passed in. Behaves the same as SignSDKRequest with the exception
// the request is signed with the value returned by the current time function.
func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
// If the request does not need to be signed ignore the signing of the
// request if the AnonymousCredentials object is used.
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
region := req.ClientInfo.SigningRegion
if region == "" {
region = aws.StringValue(req.Config.Region)
}
name := req.ClientInfo.SigningName
if name == "" {
name = req.ClientInfo.ServiceName
}
v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
v4.Debug = req.Config.LogLevel.Value()
v4.Logger = req.Config.Logger
v4.DisableHeaderHoisting = req.NotHoist
v4.currentTimeFn = curTimeFn
if name == "s3" {
// S3 service should not have any escaping applied
v4.DisableURIPathEscaping = true
}
// Prevents setting the HTTPRequest's Body. Since the Body could be
// wrapped in a custom io.Closer that we do not want to be stompped
// on top of by the signer.
v4.DisableRequestBodyOverwrite = true
})
for _, opt := range opts {
opt(v4)
}
curTime := curTimeFn()
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
name, region, req.ExpireTime, req.ExpireTime > 0, curTime,
)
if err != nil {
req.Error = err
req.SignedHeaderVals = nil
return
}
req.SignedHeaderVals = signedHeaders
req.LastSignedAt = curTime
}
const logSignInfoMsg = `DEBUG: Request Signature:
---[ CANONICAL STRING ]-----------------------------
%s
---[ STRING TO SIGN ]--------------------------------
%s%s
-----------------------------------------------------`
const logSignedURLMsg = `
---[ SIGNED URL ]------------------------------------
%s`
func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
signedURLMsg := ""
if ctx.isPresign {
signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
}
msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
v4.Logger.Log(msg)
}
func (ctx *signingCtx) build(disableHeaderHoisting bool) error {
ctx.buildTime() // no depends
ctx.buildCredentialString() // no depends
if err := ctx.buildBodyDigest(); err != nil {
return err
}
unsignedHeaders := ctx.Request.Header
if ctx.isPresign {
if !disableHeaderHoisting {
var urlValues url.Values
urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
for k := range urlValues {
ctx.Query[k] = urlValues[k]
}
}
}
if ctx.isPresign {
ctx.buildCanonicalHeaders(ignoredPresignHeaders, unsignedHeaders)
} else {
ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
}
ctx.buildCanonicalString() // depends on canon headers / signed headers
ctx.buildStringToSign() // depends on canon string
ctx.buildSignature() // depends on string to sign
if ctx.isPresign {
ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature
} else {
parts := []string{
authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
"SignedHeaders=" + ctx.signedHeaders,
authHeaderSignatureElem + ctx.signature,
}
ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", "))
}
return nil
}
// GetSignedRequestSignature attempts to extract the signature of the request.
// Returning an error if the request is unsigned, or unable to extract the
// signature.
func GetSignedRequestSignature(r *http.Request) ([]byte, error) {
if auth := r.Header.Get(authorizationHeader); len(auth) != 0 {
ps := strings.Split(auth, ", ")
for _, p := range ps {
if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 {
sig := p[len(authHeaderSignatureElem):]
if len(sig) == 0 {
return nil, fmt.Errorf("invalid request signature authorization header")
}
return hex.DecodeString(sig)
}
}
}
if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 {
return hex.DecodeString(sig)
}
return nil, fmt.Errorf("request not signed")
}
func (ctx *signingCtx) buildTime() {
if ctx.isPresign {
duration := int64(ctx.ExpireTime / time.Second)
ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time))
ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
} else {
ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time))
}
}
func (ctx *signingCtx) buildCredentialString() {
ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time)
if ctx.isPresign {
ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
}
}
func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
query := url.Values{}
unsignedHeaders := http.Header{}
for k, h := range header {
if r.IsValid(k) {
query[k] = h
} else {
unsignedHeaders[k] = h
}
}
return query, unsignedHeaders
}
func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
var headers []string
headers = append(headers, "host")
for k, v := range header {
if !r.IsValid(k) {
continue // ignored header
}
if ctx.SignedHeaderVals == nil {
ctx.SignedHeaderVals = make(http.Header)
}
lowerCaseKey := strings.ToLower(k)
if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
// include additional values
ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
continue
}
headers = append(headers, lowerCaseKey)
ctx.SignedHeaderVals[lowerCaseKey] = v
}
sort.Strings(headers)
ctx.signedHeaders = strings.Join(headers, ";")
if ctx.isPresign {
ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
}
headerValues := make([]string, len(headers))
for i, k := range headers {
if k == "host" {
if ctx.Request.Host != "" {
headerValues[i] = "host:" + ctx.Request.Host
} else {
headerValues[i] = "host:" + ctx.Request.URL.Host
}
} else {
headerValues[i] = k + ":" +
strings.Join(ctx.SignedHeaderVals[k], ",")
}
}
stripExcessSpaces(headerValues)
ctx.canonicalHeaders = strings.Join(headerValues, "\n")
}
func (ctx *signingCtx) buildCanonicalString() {
ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
uri := getURIPath(ctx.Request.URL)
if !ctx.DisableURIPathEscaping {
uri = rest.EscapePath(uri, false)
}
ctx.canonicalString = strings.Join([]string{
ctx.Request.Method,
uri,
ctx.Request.URL.RawQuery,
ctx.canonicalHeaders + "\n",
ctx.signedHeaders,
ctx.bodyDigest,
}, "\n")
}
func (ctx *signingCtx) buildStringToSign() {
ctx.stringToSign = strings.Join([]string{
authHeaderPrefix,
formatTime(ctx.Time),
ctx.credentialString,
hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))),
}, "\n")
}
func (ctx *signingCtx) buildSignature() {
creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time)
signature := hmacSHA256(creds, []byte(ctx.stringToSign))
ctx.signature = hex.EncodeToString(signature)
}
func (ctx *signingCtx) buildBodyDigest() error {
hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
if hash == "" {
includeSHA256Header := ctx.unsignedPayload ||
ctx.ServiceName == "s3" ||
ctx.ServiceName == "glacier"
s3Presign := ctx.isPresign && ctx.ServiceName == "s3"
if ctx.unsignedPayload || s3Presign {
hash = "UNSIGNED-PAYLOAD"
includeSHA256Header = !s3Presign
} else if ctx.Body == nil {
hash = emptyStringSHA256
} else {
if !aws.IsReaderSeekable(ctx.Body) {
return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
}
hashBytes, err := makeSha256Reader(ctx.Body)
if err != nil {
return err
}
hash = hex.EncodeToString(hashBytes)
}
if includeSHA256Header {
ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
}
}
ctx.bodyDigest = hash
return nil
}
// isRequestSigned returns if the request is currently signed or presigned.
func (ctx *signingCtx) isRequestSigned() bool {
if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
return true
}
if ctx.Request.Header.Get("Authorization") != "" {
return true
}
return false
}
// unsign removes signing flags for both signed and presigned requests.
func (ctx *signingCtx) removePresign() {
ctx.Query.Del("X-Amz-Algorithm")
ctx.Query.Del("X-Amz-Signature")
ctx.Query.Del("X-Amz-Security-Token")
ctx.Query.Del("X-Amz-Date")
ctx.Query.Del("X-Amz-Expires")
ctx.Query.Del("X-Amz-Credential")
ctx.Query.Del("X-Amz-SignedHeaders")
}
func hmacSHA256(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
func hashSHA256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) {
hash := sha256.New()
start, err := reader.Seek(0, io.SeekCurrent)
if err != nil {
return nil, err
}
defer func() {
// ensure error is return if unable to seek back to start of payload.
_, err = reader.Seek(start, io.SeekStart)
}()
// Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
// smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
size, err := aws.SeekerLen(reader)
if err != nil {
_, _ = io.Copy(hash, reader)
} else {
_, _ = io.CopyN(hash, reader, size)
}
return hash.Sum(nil), nil
}
const doubleSpace = " "
// stripExcessSpaces will rewrite the passed in slice's string values to not
// contain multiple side-by-side spaces.
func stripExcessSpaces(vals []string) {
var j, k, l, m, spaces int
for i, str := range vals {
// Trim trailing spaces
for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
}
// Trim leading spaces
for k = 0; k < j && str[k] == ' '; k++ {
}
str = str[k : j+1]
// Strip multiple spaces.
j = strings.Index(str, doubleSpace)
if j < 0 {
vals[i] = str
continue
}
buf := []byte(str)
for k, m, l = j, j, len(buf); k < l; k++ {
if buf[k] == ' ' {
if spaces == 0 {
// First space.
buf[m] = buf[k]
m++
}
spaces++
} else {
// End of multiple spaces.
spaces = 0
buf[m] = buf[k]
m++
}
}
vals[i] = string(buf[:m])
}
}
func buildSigningScope(region, service string, dt time.Time) string {
return strings.Join([]string{
formatShortTime(dt),
region,
service,
awsV4Request,
}, "/")
}
func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte {
hmacDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt)))
hmacRegion := hmacSHA256(hmacDate, []byte(region))
hmacService := hmacSHA256(hmacRegion, []byte(service))
signingKey := hmacSHA256(hmacService, []byte(awsV4Request))
return signingKey
}
func formatShortTime(dt time.Time) string {
return dt.UTC().Format(shortTimeFormat)
}
func formatTime(dt time.Time) string {
return dt.UTC().Format(timeFormat)
}

View file

@ -0,0 +1,144 @@
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/credentials.go
// with changes:
// * use `time.Now()` instead of `sdk.NowTime()`
package v4a
import (
"context"
"crypto/ecdsa"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
)
// Credentials is Context, ECDSA, and Optional Session Token that can be used
// to sign requests using SigV4a
type Credentials struct {
Context string
PrivateKey *ecdsa.PrivateKey
SessionToken string
// Time the credentials will expire.
CanExpire bool
Expires time.Time
}
// Expired returns if the credentials have expired.
func (v Credentials) Expired() bool {
if v.CanExpire {
return !v.Expires.After(time.Now())
}
return false
}
// HasKeys returns if the credentials keys are set.
func (v Credentials) HasKeys() bool {
return len(v.Context) > 0 && v.PrivateKey != nil
}
// SymmetricCredentialAdaptor wraps a SigV4 AccessKey/SecretKey provider and adapts the credentials
// to a ECDSA PrivateKey for signing with SiV4a
type SymmetricCredentialAdaptor struct {
SymmetricProvider aws.CredentialsProvider
asymmetric atomic.Value
m sync.Mutex
}
// Retrieve retrieves symmetric credentials from the underlying provider.
func (s *SymmetricCredentialAdaptor) Retrieve(ctx context.Context) (aws.Credentials, error) {
symCreds, err := s.retrieveFromSymmetricProvider(ctx)
if err != nil {
return aws.Credentials{}, err
}
if asymCreds := s.getCreds(); asymCreds == nil {
return symCreds, nil
}
s.m.Lock()
defer s.m.Unlock()
asymCreds := s.getCreds()
if asymCreds == nil {
return symCreds, nil
}
// if the context does not match the access key id clear it
if asymCreds.Context != symCreds.AccessKeyID {
s.asymmetric.Store((*Credentials)(nil))
}
return symCreds, nil
}
// RetrievePrivateKey returns credentials suitable for SigV4a signing
func (s *SymmetricCredentialAdaptor) RetrievePrivateKey(ctx context.Context) (Credentials, error) {
if asymCreds := s.getCreds(); asymCreds != nil {
return *asymCreds, nil
}
s.m.Lock()
defer s.m.Unlock()
if asymCreds := s.getCreds(); asymCreds != nil {
return *asymCreds, nil
}
symmetricCreds, err := s.retrieveFromSymmetricProvider(ctx)
if err != nil {
return Credentials{}, fmt.Errorf("failed to retrieve symmetric credentials: %v", err)
}
privateKey, err := deriveKeyFromAccessKeyPair(symmetricCreds.AccessKeyID, symmetricCreds.SecretAccessKey)
if err != nil {
return Credentials{}, fmt.Errorf("failed to derive assymetric key from credentials")
}
creds := Credentials{
Context: symmetricCreds.AccessKeyID,
PrivateKey: privateKey,
SessionToken: symmetricCreds.SessionToken,
CanExpire: symmetricCreds.CanExpire,
Expires: symmetricCreds.Expires,
}
s.asymmetric.Store(&creds)
return creds, nil
}
func (s *SymmetricCredentialAdaptor) getCreds() *Credentials {
v := s.asymmetric.Load()
if v == nil {
return nil
}
c := v.(*Credentials)
if c != nil && c.HasKeys() && !c.Expired() {
return c
}
return nil
}
func (s *SymmetricCredentialAdaptor) retrieveFromSymmetricProvider(ctx context.Context) (aws.Credentials, error) {
credentials, err := s.SymmetricProvider.Retrieve(ctx)
if err != nil {
return aws.Credentials{}, err
}
return credentials, nil
}
// CredentialsProvider is the interface for a provider to retrieve credentials
// to sign requests with.
type CredentialsProvider interface {
RetrievePrivateKey(context.Context) (Credentials, error)
}

View file

@ -0,0 +1,79 @@
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/credentials_test.go
package v4a
import (
"context"
"fmt"
"testing"
"github.com/aws/aws-sdk-go-v2/aws"
)
type rotatingCredsProvider struct {
count int
fail chan struct{}
}
func (r *rotatingCredsProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {
select {
case <-r.fail:
return aws.Credentials{}, fmt.Errorf("rotatingCredsProvider error")
default:
}
credentials := aws.Credentials{
AccessKeyID: fmt.Sprintf("ACCESS_KEY_ID_%d", r.count),
SecretAccessKey: fmt.Sprintf("SECRET_ACCESS_KEY_%d", r.count),
SessionToken: fmt.Sprintf("SESSION_TOKEN_%d", r.count),
}
return credentials, nil
}
func TestSymmetricCredentialAdaptor(t *testing.T) {
provider := &rotatingCredsProvider{
count: 0,
fail: make(chan struct{}),
}
adaptor := &SymmetricCredentialAdaptor{SymmetricProvider: provider}
if symCreds, err := adaptor.Retrieve(context.Background()); err != nil {
t.Fatalf("expect no error, got %v", err)
} else if !symCreds.HasKeys() {
t.Fatalf("expect symmetric credentials to have keys")
}
if load := adaptor.asymmetric.Load(); load != nil {
t.Errorf("expect asymmetric credentials to be nil")
}
if asymCreds, err := adaptor.RetrievePrivateKey(context.Background()); err != nil {
t.Fatalf("expect no error, got %v", err)
} else if !asymCreds.HasKeys() {
t.Fatalf("expect asymmetric credentials to have keys")
}
if _, err := adaptor.Retrieve(context.Background()); err != nil {
t.Fatalf("expect no error, got %v", err)
}
if load := adaptor.asymmetric.Load(); load.(*Credentials) == nil {
t.Errorf("expect asymmetric credentials to be not nil")
}
provider.count++
if _, err := adaptor.Retrieve(context.Background()); err != nil {
t.Fatalf("expect no error, got %v", err)
}
if load := adaptor.asymmetric.Load(); load.(*Credentials) != nil {
t.Errorf("expect asymmetric credentials to be nil")
}
close(provider.fail) // All requests to the original provider will now fail from this point-on.
_, err := adaptor.Retrieve(context.Background())
if err == nil {
t.Error("expect error, got nil")
}
}

View file

@ -0,0 +1,32 @@
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/compare.go
package crypto
import "fmt"
// ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison
// if the two byte slices assuming they represent a big-endian number.
//
// error if len(x) != len(y)
// -1 if x < y
// 0 if x == y
// +1 if x > y
func ConstantTimeByteCompare(x, y []byte) (int, error) {
if len(x) != len(y) {
return 0, fmt.Errorf("slice lengths do not match")
}
xLarger, yLarger := 0, 0
for i := 0; i < len(x); i++ {
xByte, yByte := int(x[i]), int(y[i])
x := ((yByte - xByte) >> 8) & 1
y := ((xByte - yByte) >> 8) & 1
xLarger |= x &^ yLarger
yLarger |= y &^ xLarger
}
return xLarger - yLarger, nil
}

View file

@ -0,0 +1,62 @@
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/compare_test.go
package crypto
import (
"bytes"
"math/big"
"testing"
)
func TestConstantTimeByteCompare(t *testing.T) {
cases := []struct {
x, y []byte
r int
expectErr bool
}{
{x: []byte{}, y: []byte{}, r: 0},
{x: []byte{40}, y: []byte{30}, r: 1},
{x: []byte{30}, y: []byte{40}, r: -1},
{x: []byte{60, 40, 30, 10, 20}, y: []byte{50, 30, 20, 0, 10}, r: 1},
{x: []byte{50, 30, 20, 0, 10}, y: []byte{60, 40, 30, 10, 20}, r: -1},
{x: nil, y: []byte{}, r: 0},
{x: []byte{}, y: nil, r: 0},
{x: []byte{}, y: []byte{10}, expectErr: true},
{x: []byte{10}, y: []byte{}, expectErr: true},
{x: []byte{10, 20}, y: []byte{10}, expectErr: true},
}
for _, tt := range cases {
compare, err := ConstantTimeByteCompare(tt.x, tt.y)
if (err != nil) != tt.expectErr {
t.Fatalf("expectErr=%v, got %v", tt.expectErr, err)
}
if e, a := tt.r, compare; e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
}
func BenchmarkConstantTimeCompare(b *testing.B) {
x, y := big.NewInt(1023), big.NewInt(1024)
b.ResetTimer()
for i := 0; i < b.N; i++ {
ConstantTimeByteCompare(x.Bytes(), y.Bytes())
}
}
func BenchmarkCompare(b *testing.B) {
x, y := big.NewInt(1023).Bytes(), big.NewInt(1024).Bytes()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bytes.Compare(x, y)
}
}
func mustBigInt(s string) *big.Int {
b, ok := (&big.Int{}).SetString(s, 16)
if !ok {
panic("can't parse as big.Int")
}
return b
}

View file

@ -0,0 +1,115 @@
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/ecc.go
package crypto
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/hmac"
"encoding/asn1"
"encoding/binary"
"fmt"
"hash"
"math"
"math/big"
)
type ecdsaSignature struct {
R, S *big.Int
}
// ECDSAKey takes the given elliptic curve, and private key (d) byte slice
// and returns the private ECDSA key.
func ECDSAKey(curve elliptic.Curve, d []byte) *ecdsa.PrivateKey {
return ECDSAKeyFromPoint(curve, (&big.Int{}).SetBytes(d))
}
// ECDSAKeyFromPoint takes the given elliptic curve and point and returns the
// private and public keypair
func ECDSAKeyFromPoint(curve elliptic.Curve, d *big.Int) *ecdsa.PrivateKey {
pX, pY := curve.ScalarBaseMult(d.Bytes())
privKey := &ecdsa.PrivateKey{
PublicKey: ecdsa.PublicKey{
Curve: curve,
X: pX,
Y: pY,
},
D: d,
}
return privKey
}
// ECDSAPublicKey takes the provide curve and (x, y) coordinates and returns
// *ecdsa.PublicKey. Returns an error if the given points are not on the curve.
func ECDSAPublicKey(curve elliptic.Curve, x, y []byte) (*ecdsa.PublicKey, error) {
xPoint := (&big.Int{}).SetBytes(x)
yPoint := (&big.Int{}).SetBytes(y)
if !curve.IsOnCurve(xPoint, yPoint) {
return nil, fmt.Errorf("point(%v, %v) is not on the given curve", xPoint.String(), yPoint.String())
}
return &ecdsa.PublicKey{
Curve: curve,
X: xPoint,
Y: yPoint,
}, nil
}
// VerifySignature takes the provided public key, hash, and asn1 encoded signature and returns
// whether the given signature is valid.
func VerifySignature(key *ecdsa.PublicKey, hash []byte, signature []byte) (bool, error) {
var ecdsaSignature ecdsaSignature
_, err := asn1.Unmarshal(signature, &ecdsaSignature)
if err != nil {
return false, err
}
return ecdsa.Verify(key, hash, ecdsaSignature.R, ecdsaSignature.S), nil
}
// HMACKeyDerivation provides an implementation of a NIST-800-108 of a KDF (Key Derivation Function) in Counter Mode.
// For the purposes of this implantation HMAC is used as the PRF (Pseudorandom function), where the value of
// `r` is defined as a 4 byte counter.
func HMACKeyDerivation(hash func() hash.Hash, bitLen int, key []byte, label, context []byte) ([]byte, error) {
// verify that we won't overflow the counter
n := int64(math.Ceil((float64(bitLen) / 8) / float64(hash().Size())))
if n > 0x7FFFFFFF {
return nil, fmt.Errorf("unable to derive key of size %d using 32-bit counter", bitLen)
}
// verify the requested bit length is not larger then the length encoding size
if int64(bitLen) > 0x7FFFFFFF {
return nil, fmt.Errorf("bitLen is greater than 32-bits")
}
fixedInput := bytes.NewBuffer(nil)
fixedInput.Write(label)
fixedInput.WriteByte(0x00)
fixedInput.Write(context)
if err := binary.Write(fixedInput, binary.BigEndian, int32(bitLen)); err != nil {
return nil, fmt.Errorf("failed to write bit length to fixed input string: %v", err)
}
var output []byte
h := hmac.New(hash, key)
for i := int64(1); i <= n; i++ {
h.Reset()
if err := binary.Write(h, binary.BigEndian, int32(i)); err != nil {
return nil, err
}
_, err := h.Write(fixedInput.Bytes())
if err != nil {
return nil, err
}
output = append(output, h.Sum(nil)...)
}
return output[:bitLen/8], nil
}

View file

@ -0,0 +1,279 @@
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/crypto/ecc_test.go
package crypto
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"io"
"testing"
)
func TestECDSAPublicKeyDerivation_P256(t *testing.T) {
d := []byte{
0xc9, 0x80, 0x68, 0x98, 0xa0, 0x33, 0x49, 0x16, 0xc8, 0x60, 0x74, 0x88, 0x80, 0xa5, 0x41, 0xf0,
0x93, 0xb5, 0x79, 0xa9, 0xb1, 0xf3, 0x29, 0x34, 0xd8, 0x6c, 0x36, 0x3c, 0x39, 0x80, 0x03, 0x57,
}
x := []byte{
0xd0, 0x72, 0x0d, 0xc6, 0x91, 0xaa, 0x80, 0x09, 0x6b, 0xa3, 0x2f, 0xed, 0x1c, 0xb9, 0x7c, 0x2b,
0x62, 0x06, 0x90, 0xd0, 0x6d, 0xe0, 0x31, 0x7b, 0x86, 0x18, 0xd5, 0xce, 0x65, 0xeb, 0x72, 0x8f,
}
y := []byte{
0x96, 0x81, 0xb5, 0x17, 0xb1, 0xcd, 0xa1, 0x7d, 0x0d, 0x83, 0xd3, 0x35, 0xd9, 0xc4, 0xa8, 0xa9,
0xa9, 0xb0, 0xb1, 0xb3, 0xc7, 0x10, 0x6d, 0x8f, 0x3c, 0x72, 0xbc, 0x50, 0x93, 0xdc, 0x27, 0x5f,
}
testKeyDerivation(t, elliptic.P256(), d, x, y)
}
func TestECDSAPublicKeyDerivation_P384(t *testing.T) {
d := []byte{
0x53, 0x94, 0xf7, 0x97, 0x3e, 0xa8, 0x68, 0xc5, 0x2b, 0xf3, 0xff, 0x8d, 0x8c, 0xee, 0xb4, 0xdb,
0x90, 0xa6, 0x83, 0x65, 0x3b, 0x12, 0x48, 0x5d, 0x5f, 0x62, 0x7c, 0x3c, 0xe5, 0xab, 0xd8, 0x97,
0x8f, 0xc9, 0x67, 0x3d, 0x14, 0xa7, 0x1d, 0x92, 0x57, 0x47, 0x93, 0x16, 0x62, 0x49, 0x3c, 0x37,
}
x := []byte{
0xfd, 0x3c, 0x84, 0xe5, 0x68, 0x9b, 0xed, 0x27, 0x0e, 0x60, 0x1b, 0x3d, 0x80, 0xf9, 0x0d, 0x67,
0xa9, 0xae, 0x45, 0x1c, 0xce, 0x89, 0x0f, 0x53, 0xe5, 0x83, 0x22, 0x9a, 0xd0, 0xe2, 0xee, 0x64,
0x56, 0x11, 0xfa, 0x99, 0x36, 0xdf, 0xa4, 0x53, 0x06, 0xec, 0x18, 0x06, 0x67, 0x74, 0xaa, 0x24,
}
y := []byte{
0xb8, 0x3c, 0xa4, 0x12, 0x6c, 0xfc, 0x4c, 0x4d, 0x1d, 0x18, 0xa4, 0xb6, 0xc2, 0x1c, 0x7f, 0x69,
0x9d, 0x51, 0x23, 0xdd, 0x9c, 0x24, 0xf6, 0x6f, 0x83, 0x38, 0x46, 0xee, 0xb5, 0x82, 0x96, 0x19,
0x6b, 0x42, 0xec, 0x06, 0x42, 0x5d, 0xb5, 0xb7, 0x0a, 0x4b, 0x81, 0xb7, 0xfc, 0xf7, 0x05, 0xa0,
}
testKeyDerivation(t, elliptic.P384(), d, x, y)
}
func TestECDSAKnownSigningValue_P256(t *testing.T) {
d := []byte{
0x51, 0x9b, 0x42, 0x3d, 0x71, 0x5f, 0x8b, 0x58, 0x1f, 0x4f, 0xa8, 0xee, 0x59, 0xf4, 0x77, 0x1a,
0x5b, 0x44, 0xc8, 0x13, 0x0b, 0x4e, 0x3e, 0xac, 0xca, 0x54, 0xa5, 0x6d, 0xda, 0x72, 0xb4, 0x64,
}
testKnownSigningValue(t, elliptic.P256(), d)
}
func TestECDSAKnownSigningValue_P384(t *testing.T) {
d := []byte{
0x53, 0x94, 0xf7, 0x97, 0x3e, 0xa8, 0x68, 0xc5, 0x2b, 0xf3, 0xff, 0x8d, 0x8c, 0xee, 0xb4, 0xdb,
0x90, 0xa6, 0x83, 0x65, 0x3b, 0x12, 0x48, 0x5d, 0x5f, 0x62, 0x7c, 0x3c, 0xe5, 0xab, 0xd8, 0x97,
0x8f, 0xc9, 0x67, 0x3d, 0x14, 0xa7, 0x1d, 0x92, 0x57, 0x47, 0x93, 0x16, 0x62, 0x49, 0x3c, 0x37,
}
testKnownSigningValue(t, elliptic.P384(), d)
}
func testKeyDerivation(t *testing.T, curve elliptic.Curve, d, expectedX, expectedY []byte) {
privKey := ECDSAKey(curve, d)
if e, a := d, privKey.D.Bytes(); bytes.Compare(e, a) != 0 {
t.Errorf("expected % x, got % x", e, a)
}
if e, a := expectedX, privKey.X.Bytes(); bytes.Compare(e, a) != 0 {
t.Errorf("expected % x, got % x", e, a)
}
if e, a := expectedY, privKey.Y.Bytes(); bytes.Compare(e, a) != 0 {
t.Errorf("expected % x, got % x", e, a)
}
}
func testKnownSigningValue(t *testing.T, curve elliptic.Curve, d []byte) {
signingKey := ECDSAKey(curve, d)
message := []byte{
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
}
sha256Hash := sha256.New()
_, err := io.Copy(sha256Hash, bytes.NewReader(message))
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
msgHash := sha256Hash.Sum(nil)
msgSignature, err := signingKey.Sign(rand.Reader, msgHash, crypto.SHA256)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
verified, err := VerifySignature(&signingKey.PublicKey, msgHash, msgSignature)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
if !verified {
t.Fatalf("failed to verify message msgSignature")
}
}
func TestECDSAInvalidSignature_P256(t *testing.T) {
testInvalidSignature(t, elliptic.P256())
}
func TestECDSAInvalidSignature_P384(t *testing.T) {
testInvalidSignature(t, elliptic.P384())
}
func TestECDSAGenKeySignature_P256(t *testing.T) {
testGenKeySignature(t, elliptic.P256())
}
func TestECDSAGenKeySignature_P384(t *testing.T) {
testGenKeySignature(t, elliptic.P384())
}
func testInvalidSignature(t *testing.T, curve elliptic.Curve) {
privateKey, err := ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
t.Fatalf("failed to generate key: %v", err)
}
message := []byte{
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
}
sha256Hash := sha256.New()
_, err = io.Copy(sha256Hash, bytes.NewReader(message))
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
msgHash := sha256Hash.Sum(nil)
msgSignature, err := privateKey.Sign(rand.Reader, msgHash, crypto.SHA256)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
byteToFlip := 15
switch msgSignature[byteToFlip] {
case 0:
msgSignature[byteToFlip] = 0x0a
default:
msgSignature[byteToFlip] &^= msgSignature[byteToFlip]
}
verified, err := VerifySignature(&privateKey.PublicKey, msgHash, msgSignature)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
if verified {
t.Fatalf("expected message verification to fail")
}
}
func testGenKeySignature(t *testing.T, curve elliptic.Curve) {
privateKey, err := ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
t.Fatalf("failed to generate key: %v", err)
}
message := []byte{
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
}
sha256Hash := sha256.New()
_, err = io.Copy(sha256Hash, bytes.NewReader(message))
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
msgHash := sha256Hash.Sum(nil)
msgSignature, err := privateKey.Sign(rand.Reader, msgHash, crypto.SHA256)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
verified, err := VerifySignature(&privateKey.PublicKey, msgHash, msgSignature)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
if !verified {
t.Fatalf("expected message verification to fail")
}
}
func TestECDSASignatureFormat(t *testing.T) {
asn1Signature := []byte{
0x30, 0x45, 0x02, 0x21, 0x00, 0xd7, 0xc5, 0xb9, 0x9e, 0x0b, 0xb1, 0x1a, 0x1f, 0x32, 0xda, 0x66, 0xe0, 0xff,
0x59, 0xb7, 0x8a, 0x5e, 0xb3, 0x94, 0x9c, 0x23, 0xb3, 0xfc, 0x1f, 0x18, 0xcc, 0xf6, 0x61, 0x67, 0x8b, 0xf1,
0xc1, 0x02, 0x20, 0x26, 0x4d, 0x8b, 0x7c, 0xaa, 0x52, 0x4c, 0xc0, 0x2e, 0x5f, 0xf6, 0x7e, 0x24, 0x82, 0xe5,
0xfb, 0xcb, 0xc7, 0x9b, 0x83, 0x0d, 0x19, 0x7e, 0x7a, 0x40, 0x37, 0x87, 0xdd, 0x1c, 0x93, 0x13, 0xc4,
}
x := []byte{
0x1c, 0xcb, 0xe9, 0x1c, 0x07, 0x5f, 0xc7, 0xf4, 0xf0, 0x33, 0xbf, 0xa2, 0x48, 0xdb, 0x8f, 0xcc,
0xd3, 0x56, 0x5d, 0xe9, 0x4b, 0xbf, 0xb1, 0x2f, 0x3c, 0x59, 0xff, 0x46, 0xc2, 0x71, 0xbf, 0x83,
}
y := []byte{
0xce, 0x40, 0x14, 0xc6, 0x88, 0x11, 0xf9, 0xa2, 0x1a, 0x1f, 0xdb, 0x2c, 0x0e, 0x61, 0x13, 0xe0,
0x6d, 0xb7, 0xca, 0x93, 0xb7, 0x40, 0x4e, 0x78, 0xdc, 0x7c, 0xcd, 0x5c, 0xa8, 0x9a, 0x4c, 0xa9,
}
publicKey, err := ECDSAPublicKey(elliptic.P256(), x, y)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
message := []byte{
0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9,
0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf,
0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd,
0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5,
0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a,
0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c,
0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f,
0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8,
}
hash := sha256.New()
_, err = io.Copy(hash, bytes.NewReader(message))
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
msgHash := hash.Sum(nil)
verifySignature, err := VerifySignature(publicKey, msgHash, asn1Signature)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
if !verifySignature {
t.Fatalf("failed to verify signature")
}
}

View file

@ -0,0 +1,38 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/const.go
package v4
const (
// EmptyStringSHA256 is the hex encoded sha256 value of an empty string
EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
// UnsignedPayload indicates that the request payload body is unsigned
UnsignedPayload = "UNSIGNED-PAYLOAD"
// AmzAlgorithmKey indicates the signing algorithm
AmzAlgorithmKey = "X-Amz-Algorithm"
// AmzSecurityTokenKey indicates the security token to be used with temporary credentials
AmzSecurityTokenKey = "X-Amz-Security-Token"
// AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z'
AmzDateKey = "X-Amz-Date"
// AmzCredentialKey is the access key ID and credential scope
AmzCredentialKey = "X-Amz-Credential"
// AmzSignedHeadersKey is the set of headers signed for the request
AmzSignedHeadersKey = "X-Amz-SignedHeaders"
// AmzSignatureKey is the query parameter to store the SigV4 signature
AmzSignatureKey = "X-Amz-Signature"
// TimeFormat is the time format to be used in the X-Amz-Date header or query parameter
TimeFormat = "20060102T150405Z"
// ShortTimeFormat is the shorten time format used in the credential scope
ShortTimeFormat = "20060102"
// ContentSHAKey is the SHA256 of request body
ContentSHAKey = "X-Amz-Content-Sha256"
)

View file

@ -0,0 +1,90 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/header_rules.go
package v4
import (
"strings"
)
// Rules houses a set of Rule needed for validation of a
// string value
type Rules []Rule
// Rule interface allows for more flexible rules and just simply
// checks whether or not a value adheres to that Rule
type Rule interface {
IsValid(value string) bool
}
// IsValid will iterate through all rules and see if any rules
// apply to the value and supports nested rules
func (r Rules) IsValid(value string) bool {
for _, rule := range r {
if rule.IsValid(value) {
return true
}
}
return false
}
// MapRule generic Rule for maps
type MapRule map[string]struct{}
// IsValid for the map Rule satisfies whether it exists in the map
func (m MapRule) IsValid(value string) bool {
_, ok := m[value]
return ok
}
// AllowList is a generic Rule for whitelisting
type AllowList struct {
Rule
}
// IsValid for AllowList checks if the value is within the AllowList
func (w AllowList) IsValid(value string) bool {
return w.Rule.IsValid(value)
}
// DenyList is a generic Rule for blacklisting
type DenyList struct {
Rule
}
// IsValid for AllowList checks if the value is within the AllowList
func (b DenyList) IsValid(value string) bool {
return !b.Rule.IsValid(value)
}
// Patterns is a list of strings to match against
type Patterns []string
// IsValid for Patterns checks each pattern and returns if a match has
// been found
func (p Patterns) IsValid(value string) bool {
for _, pattern := range p {
if HasPrefixFold(value, pattern) {
return true
}
}
return false
}
// InclusiveRules rules allow for rules to depend on one another
type InclusiveRules []Rule
// IsValid will return true if all rules are true
func (r InclusiveRules) IsValid(value string) bool {
for _, rule := range r {
if !rule.IsValid(value) {
return false
}
}
return true
}
// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
// under Unicode case-folding.
func HasPrefixFold(s, prefix string) bool {
return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
}

View file

@ -0,0 +1,83 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/header.go
// with changes:
// * drop User-Agent header from ignored
package v4
// IgnoredPresignedHeaders is a list of headers that are ignored during signing
var IgnoredPresignedHeaders = Rules{
DenyList{
MapRule{
"Authorization": struct{}{},
"User-Agent": struct{}{},
"X-Amzn-Trace-Id": struct{}{},
},
},
}
// IgnoredHeaders is a list of headers that are ignored during signing
// drop User-Agent header to be compatible with aws sdk java v1.
var IgnoredHeaders = Rules{
DenyList{
MapRule{
"Authorization": struct{}{},
//"User-Agent": struct{}{},
"X-Amzn-Trace-Id": struct{}{},
},
},
}
// RequiredSignedHeaders is a whitelist for Build canonical headers.
var RequiredSignedHeaders = Rules{
AllowList{
MapRule{
"Cache-Control": struct{}{},
"Content-Disposition": struct{}{},
"Content-Encoding": struct{}{},
"Content-Language": struct{}{},
"Content-Md5": struct{}{},
"Content-Type": struct{}{},
"Expires": struct{}{},
"If-Match": struct{}{},
"If-Modified-Since": struct{}{},
"If-None-Match": struct{}{},
"If-Unmodified-Since": struct{}{},
"Range": struct{}{},
"X-Amz-Acl": struct{}{},
"X-Amz-Copy-Source": struct{}{},
"X-Amz-Copy-Source-If-Match": struct{}{},
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
"X-Amz-Copy-Source-If-None-Match": struct{}{},
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
"X-Amz-Copy-Source-Range": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Grant-Full-control": struct{}{},
"X-Amz-Grant-Read": struct{}{},
"X-Amz-Grant-Read-Acp": struct{}{},
"X-Amz-Grant-Write": struct{}{},
"X-Amz-Grant-Write-Acp": struct{}{},
"X-Amz-Metadata-Directive": struct{}{},
"X-Amz-Mfa": struct{}{},
"X-Amz-Request-Payer": struct{}{},
"X-Amz-Server-Side-Encryption": struct{}{},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Storage-Class": struct{}{},
"X-Amz-Website-Redirect-Location": struct{}{},
"X-Amz-Content-Sha256": struct{}{},
"X-Amz-Tagging": struct{}{},
},
},
Patterns{"X-Amz-Meta-"},
}
// AllowedQueryHoisting is a whitelist for Build query headers. The boolean value
// represents whether or not it is a pattern.
var AllowedQueryHoisting = InclusiveRules{
DenyList{RequiredSignedHeaders},
Patterns{"X-Amz-"},
}

View file

@ -0,0 +1,15 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/hmac.go
package v4
import (
"crypto/hmac"
"crypto/sha256"
)
// HMACSHA256 computes a HMAC-SHA256 of data given the provided key.
func HMACSHA256(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}

View file

@ -0,0 +1,77 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/host.go
package v4
import (
"net/http"
"strings"
)
// SanitizeHostForHeader removes default port from host and updates request.Host
func SanitizeHostForHeader(r *http.Request) {
host := getHost(r)
port := portOnly(host)
if port != "" && isDefaultPort(r.URL.Scheme, port) {
r.Host = stripPort(host)
}
}
// Returns host from request
func getHost(r *http.Request) string {
if r.Host != "" {
return r.Host
}
return r.URL.Host
}
// Hostname returns u.Host, without any port number.
//
// If Host is an IPv6 literal with a port number, Hostname returns the
// IPv6 literal without the square brackets. IPv6 literals may include
// a zone identifier.
//
// Copied from the Go 1.8 standard library (net/url)
func stripPort(hostport string) string {
colon := strings.IndexByte(hostport, ':')
if colon == -1 {
return hostport
}
if i := strings.IndexByte(hostport, ']'); i != -1 {
return strings.TrimPrefix(hostport[:i], "[")
}
return hostport[:colon]
}
// Port returns the port part of u.Host, without the leading colon.
// If u.Host doesn't contain a port, Port returns an empty string.
//
// Copied from the Go 1.8 standard library (net/url)
func portOnly(hostport string) string {
colon := strings.IndexByte(hostport, ':')
if colon == -1 {
return ""
}
if i := strings.Index(hostport, "]:"); i != -1 {
return hostport[i+len("]:"):]
}
if strings.Contains(hostport, "]") {
return ""
}
return hostport[colon+len(":"):]
}
// Returns true if the specified URI is using the standard port
// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
func isDefaultPort(scheme, port string) bool {
if port == "" {
return true
}
lowerCaseScheme := strings.ToLower(scheme)
if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
return true
}
return false
}

View file

@ -0,0 +1,38 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/time.go
package v4
import "time"
// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing.
type SigningTime struct {
time.Time
timeFormat string
shortTimeFormat string
}
// NewSigningTime creates a new SigningTime given a time.Time
func NewSigningTime(t time.Time) SigningTime {
return SigningTime{
Time: t,
}
}
// TimeFormat provides a time formatted in the X-Amz-Date format.
func (m *SigningTime) TimeFormat() string {
return m.format(&m.timeFormat, TimeFormat)
}
// ShortTimeFormat provides a time formatted of 20060102.
func (m *SigningTime) ShortTimeFormat() string {
return m.format(&m.shortTimeFormat, ShortTimeFormat)
}
func (m *SigningTime) format(target *string, format string) string {
if len(*target) > 0 {
return *target
}
v := m.Time.Format(format)
*target = v
return v
}

View file

@ -0,0 +1,66 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/util.go
package v4
import (
"net/url"
"strings"
)
const doubleSpace = " "
// StripExcessSpaces will rewrite the passed in slice's string values to not
// contain muliple side-by-side spaces.
func StripExcessSpaces(str string) string {
var j, k, l, m, spaces int
// Trim trailing spaces
for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
}
// Trim leading spaces
for k = 0; k < j && str[k] == ' '; k++ {
}
str = str[k : j+1]
// Strip multiple spaces.
j = strings.Index(str, doubleSpace)
if j < 0 {
return str
}
buf := []byte(str)
for k, m, l = j, j, len(buf); k < l; k++ {
if buf[k] == ' ' {
if spaces == 0 {
// First space.
buf[m] = buf[k]
m++
}
spaces++
} else {
// End of multiple spaces.
spaces = 0
buf[m] = buf[k]
m++
}
}
return string(buf[:m])
}
// GetURIPath returns the escaped URI component from the provided URL
func GetURIPath(u *url.URL) string {
var uri string
if len(u.Opaque) > 0 {
uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
} else {
uri = u.EscapedPath()
}
if len(uri) == 0 {
uri = "/"
}
return uri
}

View file

@ -0,0 +1,77 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/internal/v4/tuil_test.go
package v4
import (
"testing"
)
func TestStripExcessHeaders(t *testing.T) {
vals := []string{
"",
"123",
"1 2 3",
"1 2 3 ",
" 1 2 3",
"1 2 3",
"1 23",
"1 2 3",
"1 2 ",
" 1 2 ",
"12 3",
"12 3 1",
"12 3 1",
"12 3 1abc123",
}
expected := []string{
"",
"123",
"1 2 3",
"1 2 3",
"1 2 3",
"1 2 3",
"1 23",
"1 2 3",
"1 2",
"1 2",
"12 3",
"12 3 1",
"12 3 1",
"12 3 1abc123",
}
for i := 0; i < len(vals); i++ {
r := StripExcessSpaces(vals[i])
if e, a := expected[i], r; e != a {
t.Errorf("%d, expect %v, got %v", i, e, a)
}
}
}
var stripExcessSpaceCases = []string{
`AWS4-HMAC-SHA256 Credential=AKIDFAKEIDFAKEID/20160628/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=1234567890abcdef1234567890abcdef1234567890abcdef`,
`123 321 123 321`,
` 123 321 123 321 `,
` 123 321 123 321 `,
"123",
"1 2 3",
" 1 2 3",
"1 2 3",
"1 23",
"1 2 3",
"1 2 ",
" 1 2 ",
"12 3",
"12 3 1",
"12 3 1",
"12 3 1abc123",
}
func BenchmarkStripExcessSpaces(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, v := range stripExcessSpaceCases {
StripExcessSpaces(v)
}
}
}

View file

@ -0,0 +1,98 @@
// This file is adopting https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/stream.go for sigv4a.
package v4a
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"strings"
"time"
signerCrypto "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/crypto"
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/v4"
)
// EventStreamSigner is an AWS EventStream protocol signer.
type EventStreamSigner interface {
GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error)
}
// StreamSignerOptions is the configuration options for StreamSigner.
type StreamSignerOptions struct{}
// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads.
type StreamSigner struct {
options StreamSignerOptions
credentials Credentials
service string
prevSignature []byte
}
// NewStreamSigner returns a new AWS EventStream protocol signer.
func NewStreamSigner(credentials Credentials, service string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner {
o := StreamSignerOptions{}
for _, fn := range optFns {
fn(&o)
}
return &StreamSigner{
options: o,
credentials: credentials,
service: service,
prevSignature: seedSignature,
}
}
func (s *StreamSigner) VerifySignature(headers, payload []byte, signingTime time.Time, signature []byte, optFns ...func(*StreamSignerOptions)) error {
options := s.options
for _, fn := range optFns {
fn(&options)
}
prevSignature := s.prevSignature
st := v4Internal.NewSigningTime(signingTime)
scope := buildCredentialScope(st, s.service)
stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st)
ok, err := signerCrypto.VerifySignature(&s.credentials.PrivateKey.PublicKey, makeHash(sha256.New(), []byte(stringToSign)), signature)
if err != nil {
return err
}
if !ok {
return fmt.Errorf("v4a: invalid signature")
}
s.prevSignature = signature
return nil
}
func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
hash := sha256.New()
return strings.Join([]string{
"AWS4-ECDSA-P256-SHA256-PAYLOAD",
signingTime.TimeFormat(),
credentialScope,
hex.EncodeToString(previousSignature),
hex.EncodeToString(makeHash(hash, headers)),
hex.EncodeToString(makeHash(hash, payload)),
}, "\n")
}
func buildCredentialScope(st v4Internal.SigningTime, service string) string {
return strings.Join([]string{
st.Format(shortTimeFormat),
service,
"aws4_request",
}, "/")
}

View file

@ -0,0 +1,591 @@
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/v4a.go
// with changes:
// * adding exported VerifySignature methods
// * using different ignore headers for sing/presign requests
// * don't duplicate content-length as signed header
// * use copy of smithy-go encoding/httpbinding package
// * use zap.Logger instead of smithy-go/logging
package v4a
import (
"bytes"
"context"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"fmt"
"hash"
"math/big"
"net/http"
"net/textproto"
"net/url"
"sort"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/smithy/encoding/httpbinding"
signerCrypto "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/crypto"
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/v4"
"go.uber.org/zap"
)
const (
// AmzRegionSetKey represents the region set header used for sigv4a
AmzRegionSetKey = "X-Amz-Region-Set"
amzAlgorithmKey = v4Internal.AmzAlgorithmKey
amzSecurityTokenKey = v4Internal.AmzSecurityTokenKey
amzDateKey = v4Internal.AmzDateKey
amzCredentialKey = v4Internal.AmzCredentialKey
amzSignedHeadersKey = v4Internal.AmzSignedHeadersKey
authorizationHeader = "Authorization"
signingAlgorithm = "AWS4-ECDSA-P256-SHA256"
timeFormat = "20060102T150405Z"
shortTimeFormat = "20060102"
// EmptyStringSHA256 is a hex encoded SHA-256 hash of an empty string
EmptyStringSHA256 = v4Internal.EmptyStringSHA256
// Version of signing v4a
Version = "SigV4A"
)
var (
p256 elliptic.Curve
nMinusTwoP256 *big.Int
one = new(big.Int).SetInt64(1)
)
func init() {
// Ensure the elliptic curve parameters are initialized on package import rather then on first usage
p256 = elliptic.P256()
nMinusTwoP256 = new(big.Int).SetBytes(p256.Params().N.Bytes())
nMinusTwoP256 = nMinusTwoP256.Sub(nMinusTwoP256, new(big.Int).SetInt64(2))
}
// SignerOptions is the SigV4a signing options for constructing a Signer.
type SignerOptions struct {
Logger *zap.Logger
LogSigning bool
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
// request header to the request's query string. This is most commonly used
// with pre-signed requests preventing headers from being added to the
// request's query string.
DisableHeaderHoisting bool
// Disables the automatic escaping of the URI path of the request for the
// siganture's canonical string's path. For services that do not need additional
// escaping then use this to disable the signer escaping the path.
//
// S3 is an example of a service that does not need additional escaping.
//
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
DisableURIPathEscaping bool
}
// Signer is a SigV4a HTTP signing implementation
type Signer struct {
options SignerOptions
}
// NewSigner constructs a SigV4a Signer.
func NewSigner(optFns ...func(*SignerOptions)) *Signer {
options := SignerOptions{}
for _, fn := range optFns {
fn(&options)
}
return &Signer{options: options}
}
// deriveKeyFromAccessKeyPair derives a NIST P-256 PrivateKey from the given
// IAM AccessKey and SecretKey pair.
//
// Based on FIPS.186-4 Appendix B.4.2
func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey, error) {
params := p256.Params()
bitLen := params.BitSize // Testing random candidates does not require an additional 64 bits
counter := 0x01
buffer := make([]byte, 1+len(accessKey)) // 1 byte counter + len(accessKey)
kdfContext := bytes.NewBuffer(buffer)
inputKey := append([]byte("AWS4A"), []byte(secretKey)...)
d := new(big.Int)
for {
kdfContext.Reset()
kdfContext.WriteString(accessKey)
kdfContext.WriteByte(byte(counter))
key, err := signerCrypto.HMACKeyDerivation(sha256.New, bitLen, inputKey, []byte(signingAlgorithm), kdfContext.Bytes())
if err != nil {
return nil, err
}
// Check key first before calling SetBytes if key key is in fact a valid candidate.
// This ensures the byte slice is the correct length (32-bytes) to compare in constant-time
cmp, err := signerCrypto.ConstantTimeByteCompare(key, nMinusTwoP256.Bytes())
if err != nil {
return nil, err
}
if cmp == -1 {
d.SetBytes(key)
break
}
counter++
if counter > 0xFF {
return nil, fmt.Errorf("exhausted single byte external counter")
}
}
d = d.Add(d, one)
priv := new(ecdsa.PrivateKey)
priv.PublicKey.Curve = p256
priv.D = d
priv.PublicKey.X, priv.PublicKey.Y = p256.ScalarBaseMult(d.Bytes())
return priv, nil
}
type httpSigner struct {
Request *http.Request
ServiceName string
RegionSet []string
Time time.Time
Credentials Credentials
IsPreSign bool
Logger *zap.Logger
Debug bool
// PayloadHash is the hex encoded SHA-256 hash of the request payload
// If len(PayloadHash) == 0 the signer will attempt to send the request
// as an unsigned payload. Note: Unsigned payloads only work for a subset of services.
PayloadHash string
DisableHeaderHoisting bool
DisableURIPathEscaping bool
}
// SignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and signs using SigV4a.
// The passed in request will be modified in place.
func (s *Signer) SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) error {
options := s.options
for _, fn := range optFns {
fn(&options)
}
signer := &httpSigner{
Request: r,
PayloadHash: payloadHash,
ServiceName: service,
RegionSet: regionSet,
Credentials: credentials,
Time: signingTime.UTC(),
DisableHeaderHoisting: options.DisableHeaderHoisting,
DisableURIPathEscaping: options.DisableURIPathEscaping,
Logger: options.Logger,
}
signedRequest, err := signer.Build()
if err != nil {
return err
}
logHTTPSigningInfo(ctx, options, signedRequest)
return nil
}
// VerifySignature checks sigv4a.
func (s *Signer) VerifySignature(credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, signature string, optFns ...func(*SignerOptions)) error {
return s.verifySignature(credentials, r, payloadHash, service, regionSet, signingTime, signature, false, optFns...)
}
// VerifyPresigned checks sigv4a.
func (s *Signer) VerifyPresigned(credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, signature string, optFns ...func(*SignerOptions)) error {
return s.verifySignature(credentials, r, payloadHash, service, regionSet, signingTime, signature, true, optFns...)
}
func (s *Signer) verifySignature(credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, signature string, isPresigned bool, optFns ...func(*SignerOptions)) error {
options := s.options
for _, fn := range optFns {
fn(&options)
}
signer := &httpSigner{
Request: r,
PayloadHash: payloadHash,
ServiceName: service,
RegionSet: regionSet,
Credentials: credentials,
Time: signingTime.UTC(),
IsPreSign: isPresigned,
DisableHeaderHoisting: options.DisableHeaderHoisting,
DisableURIPathEscaping: options.DisableURIPathEscaping,
}
signedReq, err := signer.Build()
if err != nil {
return err
}
logHTTPSigningInfo(context.TODO(), options, signedReq)
signatureRaw, err := hex.DecodeString(signature)
if err != nil {
return fmt.Errorf("decode hex signature: %w", err)
}
ok, err := signerCrypto.VerifySignature(&credentials.PrivateKey.PublicKey, makeHash(sha256.New(), []byte(signedReq.StringToSign)), signatureRaw)
if err != nil {
return err
}
if !ok {
return fmt.Errorf("v4a: invalid signature")
}
return nil
}
// PresignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and presigns using SigV4a
// Returns the presigned URL along with the headers that were signed with the request.
//
// PresignHTTP will not set the expires time of the presigned request
// automatically. To specify the expire duration for a request add the
// "X-Amz-Expires" query parameter on the request with the value as the
// duration in seconds the presigned URL should be considered valid for. This
// parameter is not used by all AWS services, and is most notable used by
// Amazon S3 APIs.
func (s *Signer) PresignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) (signedURI string, signedHeaders http.Header, err error) {
options := s.options
for _, fn := range optFns {
fn(&options)
}
signer := &httpSigner{
Request: r,
PayloadHash: payloadHash,
ServiceName: service,
RegionSet: regionSet,
Credentials: credentials,
Time: signingTime.UTC(),
IsPreSign: true,
DisableHeaderHoisting: options.DisableHeaderHoisting,
DisableURIPathEscaping: options.DisableURIPathEscaping,
}
signedRequest, err := signer.Build()
if err != nil {
return "", nil, err
}
logHTTPSigningInfo(ctx, options, signedRequest)
signedHeaders = make(http.Header)
// For the signed headers we canonicalize the header keys in the returned map.
// This avoids situations where can standard library double headers like host header. For example the standard
// library will set the Host header, even if it is present in lower-case form.
for k, v := range signedRequest.SignedHeaders {
key := textproto.CanonicalMIMEHeaderKey(k)
signedHeaders[key] = append(signedHeaders[key], v...)
}
return signedRequest.Request.URL.String(), signedHeaders, nil
}
func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) {
amzDate := s.Time.Format(timeFormat)
if s.IsPreSign {
query.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ","))
query.Set(amzDateKey, amzDate)
query.Set(amzAlgorithmKey, signingAlgorithm)
if len(s.Credentials.SessionToken) > 0 {
query.Set(amzSecurityTokenKey, s.Credentials.SessionToken)
}
return
}
headers.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ","))
headers.Set(amzDateKey, amzDate)
if len(s.Credentials.SessionToken) > 0 {
headers.Set(amzSecurityTokenKey, s.Credentials.SessionToken)
}
}
func (s *httpSigner) Build() (signedRequest, error) {
req := s.Request
query := req.URL.Query()
headers := req.Header
s.setRequiredSigningFields(headers, query)
// Sort Each Query Key's Values
for key := range query {
sort.Strings(query[key])
}
v4Internal.SanitizeHostForHeader(req)
credentialScope := s.buildCredentialScope()
credentialStr := s.Credentials.Context + "/" + credentialScope
if s.IsPreSign {
query.Set(amzCredentialKey, credentialStr)
}
unsignedHeaders := headers
if s.IsPreSign && !s.DisableHeaderHoisting {
urlValues := url.Values{}
urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, unsignedHeaders)
for k := range urlValues {
query[k] = urlValues[k]
}
}
host := req.URL.Host
if len(req.Host) > 0 {
host = req.Host
}
var (
signedHeaders http.Header
signedHeadersStr string
canonicalHeaderStr string
)
if s.IsPreSign {
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredPresignedHeaders, unsignedHeaders, s.Request.ContentLength)
} else {
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength)
}
if s.IsPreSign {
query.Set(amzSignedHeadersKey, signedHeadersStr)
}
rawQuery := strings.Replace(query.Encode(), "+", "%20", -1)
canonicalURI := v4Internal.GetURIPath(req.URL)
if !s.DisableURIPathEscaping {
canonicalURI = httpbinding.EscapePath(canonicalURI, false)
}
canonicalString := s.buildCanonicalString(
req.Method,
canonicalURI,
rawQuery,
signedHeadersStr,
canonicalHeaderStr,
)
strToSign := s.buildStringToSign(credentialScope, canonicalString)
signingSignature, err := s.buildSignature(strToSign)
if err != nil {
return signedRequest{}, err
}
if s.IsPreSign {
rawQuery += "&X-Amz-Signature=" + signingSignature
} else {
headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature))
}
req.URL.RawQuery = rawQuery
return signedRequest{
Request: req,
SignedHeaders: signedHeaders,
CanonicalString: canonicalString,
StringToSign: strToSign,
PreSigned: s.IsPreSign,
}, nil
}
func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string {
const credential = "Credential="
const signedHeaders = "SignedHeaders="
const signature = "Signature="
const commaSpace = ", "
var parts strings.Builder
parts.Grow(len(signingAlgorithm) + 1 +
len(credential) + len(credentialStr) + len(commaSpace) +
len(signedHeaders) + len(signedHeadersStr) + len(commaSpace) +
len(signature) + len(signingSignature),
)
parts.WriteString(signingAlgorithm)
parts.WriteRune(' ')
parts.WriteString(credential)
parts.WriteString(credentialStr)
parts.WriteString(commaSpace)
parts.WriteString(signedHeaders)
parts.WriteString(signedHeadersStr)
parts.WriteString(commaSpace)
parts.WriteString(signature)
parts.WriteString(signingSignature)
return parts.String()
}
func (s *httpSigner) buildCredentialScope() string {
return strings.Join([]string{
s.Time.Format(shortTimeFormat),
s.ServiceName,
"aws4_request",
}, "/")
}
func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) {
query := url.Values{}
unsignedHeaders := http.Header{}
for k, h := range header {
if r.IsValid(k) {
query[k] = h
} else {
unsignedHeaders[k] = h
}
}
return query, unsignedHeaders
}
func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) {
signed = make(http.Header)
var headers []string
const hostHeader = "host"
headers = append(headers, hostHeader)
signed[hostHeader] = append(signed[hostHeader], host)
//const contentLengthHeader = "content-length"
//if length > 0 {
// headers = append(headers, contentLengthHeader)
// signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10))
//}
for k, v := range header {
if !rule.IsValid(k) {
continue // ignored header
}
lowerCaseKey := strings.ToLower(k)
if _, ok := signed[lowerCaseKey]; ok {
// include additional values
signed[lowerCaseKey] = append(signed[lowerCaseKey], v...)
continue
}
headers = append(headers, lowerCaseKey)
signed[lowerCaseKey] = v
}
sort.Strings(headers)
signedHeaders = strings.Join(headers, ";")
var canonicalHeaders strings.Builder
n := len(headers)
const colon = ':'
for i := 0; i < n; i++ {
if headers[i] == hostHeader {
canonicalHeaders.WriteString(hostHeader)
canonicalHeaders.WriteRune(colon)
canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host))
} else {
canonicalHeaders.WriteString(headers[i])
canonicalHeaders.WriteRune(colon)
// Trim out leading, trailing, and dedup inner spaces from signed header values.
values := signed[headers[i]]
for j, v := range values {
cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v))
canonicalHeaders.WriteString(cleanedValue)
if j < len(values)-1 {
canonicalHeaders.WriteRune(',')
}
}
}
canonicalHeaders.WriteRune('\n')
}
canonicalHeadersStr = canonicalHeaders.String()
return signed, signedHeaders, canonicalHeadersStr
}
func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string {
return strings.Join([]string{
method,
uri,
query,
canonicalHeaders,
signedHeaders,
s.PayloadHash,
}, "\n")
}
func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string {
return strings.Join([]string{
signingAlgorithm,
s.Time.Format(timeFormat),
credentialScope,
hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))),
}, "\n")
}
func makeHash(hash hash.Hash, b []byte) []byte {
hash.Reset()
hash.Write(b)
return hash.Sum(nil)
}
func (s *httpSigner) buildSignature(strToSign string) (string, error) {
sig, err := s.Credentials.PrivateKey.Sign(rand.Reader, makeHash(sha256.New(), []byte(strToSign)), crypto.SHA256)
if err != nil {
return "", err
}
return hex.EncodeToString(sig), nil
}
const logSignInfoMsg = `Request Signature:
---[ CANONICAL STRING ]-----------------------------
%s
---[ STRING TO SIGN ]--------------------------------
%s%s
-----------------------------------------------------`
const logSignedURLMsg = `
---[ SIGNED URL ]------------------------------------
%s`
func logHTTPSigningInfo(_ context.Context, options SignerOptions, r signedRequest) {
if !options.LogSigning {
return
}
signedURLMsg := ""
if r.PreSigned {
signedURLMsg = fmt.Sprintf(logSignedURLMsg, r.Request.URL.String())
}
if options.Logger != nil {
options.Logger.Debug(fmt.Sprintf(logSignInfoMsg, r.CanonicalString, r.StringToSign, signedURLMsg))
}
}
type signedRequest struct {
Request *http.Request
SignedHeaders http.Header
CanonicalString string
StringToSign string
PreSigned bool
}

View file

@ -0,0 +1,425 @@
// This file is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/internal/v4a/v4a_test.go
// with changes:
// * use zap.Logger instead of smithy-go/logging
package v4a
import (
"context"
"encoding/hex"
"fmt"
"math/big"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2/internal/crypto"
"github.com/aws/aws-sdk-go-v2/aws"
"go.uber.org/zap/zaptest"
)
const (
accessKey = "AKISORANDOMAASORANDOM"
secretKey = "q+jcrXGc+0zWN6uzclKVhvMmUsIfRPa4rlRandom"
)
func TestDeriveECDSAKeyPairFromSecret(t *testing.T) {
privateKey, err := deriveKeyFromAccessKeyPair(accessKey, secretKey)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
expectedX := func() *big.Int {
t.Helper()
b, ok := new(big.Int).SetString("15D242CEEBF8D8169FD6A8B5A746C41140414C3B07579038DA06AF89190FFFCB", 16)
if !ok {
t.Fatalf("failed to parse big integer")
}
return b
}()
expectedY := func() *big.Int {
t.Helper()
b, ok := new(big.Int).SetString("515242CEDD82E94799482E4C0514B505AFCCF2C0C98D6A553BF539F424C5EC0", 16)
if !ok {
t.Fatalf("failed to parse big integer")
}
return b
}()
if privateKey.X.Cmp(expectedX) != 0 {
t.Errorf("expected % X, got % X", expectedX, privateKey.X)
}
if privateKey.Y.Cmp(expectedY) != 0 {
t.Errorf("expected % X, got % X", expectedY, privateKey.Y)
}
}
func TestSignHTTP(t *testing.T) {
req := buildRequest("dynamodb", "us-east-1")
signer, credProvider := buildSigner(t, true)
key, err := credProvider.RetrievePrivateKey(context.Background())
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
err = signer.SignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
expectedDate := "19700101T000000Z"
expectedAlg := "AWS4-ECDSA-P256-SHA256"
expectedCredential := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
expectedSignedHeaders := "content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-region-set;x-amz-security-token;x-amz-target"
expectedStrToSignHash := "4ba7d0482cf4d5450cefdc067a00de1a4a715e444856fa3e1d85c35fb34d9730"
q := req.Header
validateAuthorization(t, q.Get("Authorization"), expectedAlg, expectedCredential, expectedSignedHeaders, expectedStrToSignHash)
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestSignHTTP_NoSessionToken(t *testing.T) {
req := buildRequest("dynamodb", "us-east-1")
signer, credProvider := buildSigner(t, false)
key, err := credProvider.RetrievePrivateKey(context.Background())
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
err = signer.SignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
expectedAlg := "AWS4-ECDSA-P256-SHA256"
expectedCredential := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
expectedSignedHeaders := "content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-region-set;x-amz-target"
expectedStrToSignHash := "1aeefb422ae6aa0de7aec829da813e55cff35553cac212dffd5f9474c71e47ee"
q := req.Header
validateAuthorization(t, q.Get("Authorization"), expectedAlg, expectedCredential, expectedSignedHeaders, expectedStrToSignHash)
}
func TestPresignHTTP(t *testing.T) {
req := buildRequest("dynamodb", "us-east-1")
signer, credProvider := buildSigner(t, false)
key, err := credProvider.RetrievePrivateKey(context.Background())
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
query := req.URL.Query()
query.Set("X-Amz-Expires", "18000")
req.URL.RawQuery = query.Encode()
signedURL, _, err := signer.PresignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
expectedDate := "19700101T000000Z"
expectedAlg := "AWS4-ECDSA-P256-SHA256"
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
expectedCredential := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
expectedStrToSignHash := "d7ffbd2fab644384c056957e6ac38de4ae68246764b5f5df171b3824153b6397"
expectedTarget := "prefix.Operation"
signedReq, err := url.Parse(signedURL)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
q := signedReq.Query()
validateSignature(t, expectedStrToSignHash, q.Get("X-Amz-Signature"))
if e, a := expectedAlg, q.Get("X-Amz-Algorithm"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := expectedCredential, q.Get("X-Amz-Credential"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
t.Errorf("expect %v to be empty", a)
}
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "us-east-1", q.Get("X-Amz-Region-Set"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestPresignHTTP_BodyWithArrayRequest(t *testing.T) {
req := buildRequest("dynamodb", "us-east-1")
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
signer, credProvider := buildSigner(t, true)
key, err := credProvider.RetrievePrivateKey(context.Background())
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
query := req.URL.Query()
query.Set("X-Amz-Expires", "300")
req.URL.RawQuery = query.Encode()
signedURI, _, err := signer.PresignHTTP(context.Background(), key, req, EmptyStringSHA256, "dynamodb", []string{"us-east-1"}, time.Unix(0, 0))
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
signedReq, err := url.Parse(signedURI)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
expectedAlg := "AWS4-ECDSA-P256-SHA256"
expectedDate := "19700101T000000Z"
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
expectedStrToSignHash := "acff64fd3689be96259d4112c3742ff79f4da0d813bc58a285dc1c4449760bec"
expectedCred := "AKISORANDOMAASORANDOM/19700101/dynamodb/aws4_request"
expectedTarget := "prefix.Operation"
q := signedReq.Query()
validateSignature(t, expectedStrToSignHash, q.Get("X-Amz-Signature"))
if e, a := expectedAlg, q.Get("X-Amz-Algorithm"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := expectedCred, q.Get("X-Amz-Credential"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
t.Errorf("expect %v to be empty, was not", a)
}
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "us-east-1", q.Get("X-Amz-Region-Set"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestSign_buildCanonicalHeaders(t *testing.T) {
serviceName := "mockAPI"
region := "mock-region"
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
req, err := http.NewRequest("POST", endpoint, nil)
if err != nil {
t.Fatalf("failed to create request, %v", err)
}
req.Header.Set("FooInnerSpace", " inner space ")
req.Header.Set("FooLeadingSpace", " leading-space")
req.Header.Add("FooMultipleSpace", "no-space")
req.Header.Add("FooMultipleSpace", "\ttab-space")
req.Header.Add("FooMultipleSpace", "trailing-space ")
req.Header.Set("FooNoSpace", "no-space")
req.Header.Set("FooTabSpace", "\ttab-space\t")
req.Header.Set("FooTrailingSpace", "trailing-space ")
req.Header.Set("FooWrappedSpace", " wrapped-space ")
credProvider := &SymmetricCredentialAdaptor{
SymmetricProvider: staticCredentialsProvider{
Value: aws.Credentials{
AccessKeyID: accessKey,
SecretAccessKey: secretKey,
},
},
}
key, err := credProvider.RetrievePrivateKey(context.Background())
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
ctx := &httpSigner{
Request: req,
ServiceName: serviceName,
RegionSet: []string{region},
Credentials: key,
Time: time.Date(2021, 10, 20, 12, 42, 0, 0, time.UTC),
}
build, err := ctx.Build()
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
expectCanonicalString := strings.Join([]string{
`POST`,
`/`,
``,
`fooinnerspace:inner space`,
`fooleadingspace:leading-space`,
`foomultiplespace:no-space,tab-space,trailing-space`,
`foonospace:no-space`,
`footabspace:tab-space`,
`footrailingspace:trailing-space`,
`foowrappedspace:wrapped-space`,
`host:mockAPI.mock-region.amazonaws.com`,
`x-amz-date:20211020T124200Z`,
`x-amz-region-set:mock-region`,
``,
`fooinnerspace;fooleadingspace;foomultiplespace;foonospace;footabspace;footrailingspace;foowrappedspace;host;x-amz-date;x-amz-region-set`,
``,
}, "\n")
if diff := cmpDiff(expectCanonicalString, build.CanonicalString); diff != "" {
t.Errorf("expect match, got\n%s", diff)
}
}
func validateAuthorization(t *testing.T, authorization, expectedAlg, expectedCredential, expectedSignedHeaders, expectedStrToSignHash string) {
t.Helper()
split := strings.SplitN(authorization, " ", 2)
if len(split) != 2 {
t.Fatal("unexpected authorization header format")
}
if e, a := split[0], expectedAlg; e != a {
t.Errorf("expected %v, got %v", e, a)
}
keyValues := strings.Split(split[1], ", ")
seen := make(map[string]string)
for _, kv := range keyValues {
idx := strings.Index(kv, "=")
if idx == -1 {
continue
}
key, value := kv[:idx], kv[idx+1:]
seen[key] = value
}
if a, ok := seen["Credential"]; ok {
if expectedCredential != a {
t.Errorf("expected credential %v, got %v", expectedCredential, a)
}
} else {
t.Errorf("Credential not found in authorization string")
}
if a, ok := seen["SignedHeaders"]; ok {
if expectedSignedHeaders != a {
t.Errorf("expected signed headers %v, got %v", expectedSignedHeaders, a)
}
} else {
t.Errorf("SignedHeaders not found in authorization string")
}
if a, ok := seen["Signature"]; ok {
validateSignature(t, expectedStrToSignHash, a)
} else {
t.Errorf("signature not found in authorization string")
}
}
func validateSignature(t *testing.T, expectedHash, signature string) {
t.Helper()
pair, err := deriveKeyFromAccessKeyPair(accessKey, secretKey)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
hash, _ := hex.DecodeString(expectedHash)
sig, _ := hex.DecodeString(signature)
ok, err := crypto.VerifySignature(&pair.PublicKey, hash, sig)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
if !ok {
t.Errorf("failed to verify signing singature")
}
}
func buildRequest(serviceName, region string) *http.Request {
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
req, _ := http.NewRequest("POST", endpoint, nil)
req.URL.Opaque = "//example.org/bucket/key-._~,!@%23$%25^&*()"
req.Header.Set("X-Amz-Target", "prefix.Operation")
req.Header.Set("Content-Type", "application/x-amz-json-1.0")
req.Header.Set("Content-Length", strconv.Itoa(1024))
req.Header.Set("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
req.Header.Add("X-Amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
req.Header.Add("X-amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
return req
}
func buildSigner(t *testing.T, withToken bool) (*Signer, CredentialsProvider) {
creds := aws.Credentials{
AccessKeyID: accessKey,
SecretAccessKey: secretKey,
}
if withToken {
creds.SessionToken = "TOKEN"
}
return NewSigner(func(options *SignerOptions) {
options.Logger = zaptest.NewLogger(t)
}), &SymmetricCredentialAdaptor{
SymmetricProvider: staticCredentialsProvider{
Value: creds,
},
}
}
type staticCredentialsProvider struct {
Value aws.Credentials
}
func (s staticCredentialsProvider) Retrieve(_ context.Context) (aws.Credentials, error) {
v := s.Value
if v.AccessKeyID == "" || v.SecretAccessKey == "" {
return aws.Credentials{
Source: "Source Name",
}, fmt.Errorf("static credentials are empty")
}
if len(v.Source) == 0 {
v.Source = "Source Name"
}
return v, nil
}
func cmpDiff(e, a interface{}) string {
if !reflect.DeepEqual(e, a) {
return fmt.Sprintf("%v != %v", e, a)
}
return ""
}

View file

@ -0,0 +1,117 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/cache.go
package v4
import (
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
)
func lookupKey(service, region string) string {
var s strings.Builder
s.Grow(len(region) + len(service) + 3)
s.WriteString(region)
s.WriteRune('/')
s.WriteString(service)
return s.String()
}
type derivedKey struct {
AccessKey string
Date time.Time
Credential []byte
}
type derivedKeyCache struct {
values map[string]derivedKey
mutex sync.RWMutex
}
func newDerivedKeyCache() derivedKeyCache {
return derivedKeyCache{
values: make(map[string]derivedKey),
}
}
func (s *derivedKeyCache) Get(credentials aws.Credentials, service, region string, signingTime SigningTime) []byte {
key := lookupKey(service, region)
s.mutex.RLock()
if cred, ok := s.get(key, credentials, signingTime.Time); ok {
s.mutex.RUnlock()
return cred
}
s.mutex.RUnlock()
s.mutex.Lock()
if cred, ok := s.get(key, credentials, signingTime.Time); ok {
s.mutex.Unlock()
return cred
}
cred := deriveKey(credentials.SecretAccessKey, service, region, signingTime)
entry := derivedKey{
AccessKey: credentials.AccessKeyID,
Date: signingTime.Time,
Credential: cred,
}
s.values[key] = entry
s.mutex.Unlock()
return cred
}
func (s *derivedKeyCache) get(key string, credentials aws.Credentials, signingTime time.Time) ([]byte, bool) {
cacheEntry, ok := s.retrieveFromCache(key)
if ok && cacheEntry.AccessKey == credentials.AccessKeyID && isSameDay(signingTime, cacheEntry.Date) {
return cacheEntry.Credential, true
}
return nil, false
}
func (s *derivedKeyCache) retrieveFromCache(key string) (derivedKey, bool) {
if v, ok := s.values[key]; ok {
return v, true
}
return derivedKey{}, false
}
// SigningKeyDeriver derives a signing key from a set of credentials
type SigningKeyDeriver struct {
cache derivedKeyCache
}
// NewSigningKeyDeriver returns a new SigningKeyDeriver
func NewSigningKeyDeriver() *SigningKeyDeriver {
return &SigningKeyDeriver{
cache: newDerivedKeyCache(),
}
}
// DeriveKey returns a derived signing key from the given credentials to be used with SigV4 signing.
func (k *SigningKeyDeriver) DeriveKey(credential aws.Credentials, service, region string, signingTime SigningTime) []byte {
return k.cache.Get(credential, service, region, signingTime)
}
func deriveKey(secret, service, region string, t SigningTime) []byte {
hmacDate := HMACSHA256([]byte("AWS4"+secret), []byte(t.ShortTimeFormat()))
hmacRegion := HMACSHA256(hmacDate, []byte(region))
hmacService := HMACSHA256(hmacRegion, []byte(service))
return HMACSHA256(hmacService, []byte("aws4_request"))
}
func isSameDay(x, y time.Time) bool {
xYear, xMonth, xDay := x.Date()
yYear, yMonth, yDay := y.Date()
if xYear != yYear {
return false
}
if xMonth != yMonth {
return false
}
return xDay == yDay
}

View file

@ -0,0 +1,42 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/const.go
package v4
// Signature Version 4 (SigV4) Constants
const (
// EmptyStringSHA256 is the hex encoded sha256 value of an empty string
EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
// UnsignedPayload indicates that the request payload body is unsigned
UnsignedPayload = "UNSIGNED-PAYLOAD"
// AmzAlgorithmKey indicates the signing algorithm
AmzAlgorithmKey = "X-Amz-Algorithm"
// AmzSecurityTokenKey indicates the security token to be used with temporary credentials
AmzSecurityTokenKey = "X-Amz-Security-Token"
// AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z'
AmzDateKey = "X-Amz-Date"
// AmzCredentialKey is the access key ID and credential scope
AmzCredentialKey = "X-Amz-Credential"
// AmzSignedHeadersKey is the set of headers signed for the request
AmzSignedHeadersKey = "X-Amz-SignedHeaders"
// AmzSignatureKey is the query parameter to store the SigV4 signature
AmzSignatureKey = "X-Amz-Signature"
// TimeFormat is the time format to be used in the X-Amz-Date header or query parameter
TimeFormat = "20060102T150405Z"
// ShortTimeFormat is the shorten time format used in the credential scope
ShortTimeFormat = "20060102"
// ContentSHAKey is the SHA256 of request body
ContentSHAKey = "X-Amz-Content-Sha256"
// StreamingEventsPayload indicates that the request payload body is a signed event stream.
StreamingEventsPayload = "STREAMING-AWS4-HMAC-SHA256-EVENTS"
)

View file

@ -0,0 +1,90 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/header_rules.go
package v4
import (
"strings"
)
// Rules houses a set of Rule needed for validation of a
// string value
type Rules []Rule
// Rule interface allows for more flexible rules and just simply
// checks whether or not a value adheres to that Rule
type Rule interface {
IsValid(value string) bool
}
// IsValid will iterate through all rules and see if any rules
// apply to the value and supports nested rules
func (r Rules) IsValid(value string) bool {
for _, rule := range r {
if rule.IsValid(value) {
return true
}
}
return false
}
// MapRule generic Rule for maps
type MapRule map[string]struct{}
// IsValid for the map Rule satisfies whether it exists in the map
func (m MapRule) IsValid(value string) bool {
_, ok := m[value]
return ok
}
// AllowList is a generic Rule for include listing
type AllowList struct {
Rule
}
// IsValid for AllowList checks if the value is within the AllowList
func (w AllowList) IsValid(value string) bool {
return w.Rule.IsValid(value)
}
// ExcludeList is a generic Rule for exclude listing
type ExcludeList struct {
Rule
}
// IsValid for AllowList checks if the value is within the AllowList
func (b ExcludeList) IsValid(value string) bool {
return !b.Rule.IsValid(value)
}
// Patterns is a list of strings to match against
type Patterns []string
// IsValid for Patterns checks each pattern and returns if a match has
// been found
func (p Patterns) IsValid(value string) bool {
for _, pattern := range p {
if HasPrefixFold(value, pattern) {
return true
}
}
return false
}
// InclusiveRules rules allow for rules to depend on one another
type InclusiveRules []Rule
// IsValid will return true if all rules are true
func (r InclusiveRules) IsValid(value string) bool {
for _, rule := range r {
if !rule.IsValid(value) {
return false
}
}
return true
}
// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
// under Unicode case-folding.
func HasPrefixFold(s, prefix string) bool {
return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
}

View file

@ -0,0 +1,88 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/header.go
// with changes:
// * drop User-Agent header from ignored
package v4
// IgnoredPresignedHeaders is a list of headers that are ignored during signing
var IgnoredPresignedHeaders = Rules{
ExcludeList{
MapRule{
"Authorization": struct{}{},
"User-Agent": struct{}{},
"X-Amzn-Trace-Id": struct{}{},
"Expect": struct{}{},
},
},
}
// IgnoredHeaders is a list of headers that are ignored during signing
// drop User-Agent header to be compatible with aws sdk java v1.
var IgnoredHeaders = Rules{
ExcludeList{
MapRule{
"Authorization": struct{}{},
//"User-Agent": struct{}{},
"X-Amzn-Trace-Id": struct{}{},
"Expect": struct{}{},
},
},
}
// RequiredSignedHeaders is a allow list for Build canonical headers.
var RequiredSignedHeaders = Rules{
AllowList{
MapRule{
"Cache-Control": struct{}{},
"Content-Disposition": struct{}{},
"Content-Encoding": struct{}{},
"Content-Language": struct{}{},
"Content-Md5": struct{}{},
"Content-Type": struct{}{},
"Expires": struct{}{},
"If-Match": struct{}{},
"If-Modified-Since": struct{}{},
"If-None-Match": struct{}{},
"If-Unmodified-Since": struct{}{},
"Range": struct{}{},
"X-Amz-Acl": struct{}{},
"X-Amz-Copy-Source": struct{}{},
"X-Amz-Copy-Source-If-Match": struct{}{},
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
"X-Amz-Copy-Source-If-None-Match": struct{}{},
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
"X-Amz-Copy-Source-Range": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Expected-Bucket-Owner": struct{}{},
"X-Amz-Grant-Full-control": struct{}{},
"X-Amz-Grant-Read": struct{}{},
"X-Amz-Grant-Read-Acp": struct{}{},
"X-Amz-Grant-Write": struct{}{},
"X-Amz-Grant-Write-Acp": struct{}{},
"X-Amz-Metadata-Directive": struct{}{},
"X-Amz-Mfa": struct{}{},
"X-Amz-Request-Payer": struct{}{},
"X-Amz-Server-Side-Encryption": struct{}{},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
"X-Amz-Server-Side-Encryption-Context": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Storage-Class": struct{}{},
"X-Amz-Website-Redirect-Location": struct{}{},
"X-Amz-Content-Sha256": struct{}{},
"X-Amz-Tagging": struct{}{},
},
},
Patterns{"X-Amz-Object-Lock-"},
Patterns{"X-Amz-Meta-"},
}
// AllowedQueryHoisting is a allowed list for Build query headers. The boolean value
// represents whether or not it is a pattern.
var AllowedQueryHoisting = InclusiveRules{
ExcludeList{RequiredSignedHeaders},
Patterns{"X-Amz-"},
}

View file

@ -0,0 +1,65 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/header_test.go
package v4
import "testing"
func TestAllowedQueryHoisting(t *testing.T) {
cases := map[string]struct {
Header string
ExpectHoist bool
}{
"object-lock": {
Header: "X-Amz-Object-Lock-Mode",
ExpectHoist: false,
},
"s3 metadata": {
Header: "X-Amz-Meta-SomeName",
ExpectHoist: false,
},
"another header": {
Header: "X-Amz-SomeOtherHeader",
ExpectHoist: true,
},
"non X-AMZ header": {
Header: "X-SomeOtherHeader",
ExpectHoist: false,
},
}
for name, c := range cases {
t.Run(name, func(t *testing.T) {
if e, a := c.ExpectHoist, AllowedQueryHoisting.IsValid(c.Header); e != a {
t.Errorf("expect hoist %v, was %v", e, a)
}
})
}
}
func TestIgnoredHeaders(t *testing.T) {
cases := map[string]struct {
Header string
ExpectIgnored bool
}{
"expect": {
Header: "Expect",
ExpectIgnored: true,
},
"authorization": {
Header: "Authorization",
ExpectIgnored: true,
},
"X-AMZ header": {
Header: "X-Amz-Content-Sha256",
ExpectIgnored: false,
},
}
for name, c := range cases {
t.Run(name, func(t *testing.T) {
if e, a := c.ExpectIgnored, IgnoredHeaders.IsValid(c.Header); e == a {
t.Errorf("expect ignored %v, was %v", e, a)
}
})
}
}

View file

@ -0,0 +1,15 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/hmac.go
package v4
import (
"crypto/hmac"
"crypto/sha256"
)
// HMACSHA256 computes a HMAC-SHA256 of data given the provided key.
func HMACSHA256(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}

View file

@ -0,0 +1,77 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/host.go
package v4
import (
"net/http"
"strings"
)
// SanitizeHostForHeader removes default port from host and updates request.Host
func SanitizeHostForHeader(r *http.Request) {
host := getHost(r)
port := portOnly(host)
if port != "" && isDefaultPort(r.URL.Scheme, port) {
r.Host = stripPort(host)
}
}
// Returns host from request
func getHost(r *http.Request) string {
if r.Host != "" {
return r.Host
}
return r.URL.Host
}
// Hostname returns u.Host, without any port number.
//
// If Host is an IPv6 literal with a port number, Hostname returns the
// IPv6 literal without the square brackets. IPv6 literals may include
// a zone identifier.
//
// Copied from the Go 1.8 standard library (net/url)
func stripPort(hostport string) string {
colon := strings.IndexByte(hostport, ':')
if colon == -1 {
return hostport
}
if i := strings.IndexByte(hostport, ']'); i != -1 {
return strings.TrimPrefix(hostport[:i], "[")
}
return hostport[:colon]
}
// Port returns the port part of u.Host, without the leading colon.
// If u.Host doesn't contain a port, Port returns an empty string.
//
// Copied from the Go 1.8 standard library (net/url)
func portOnly(hostport string) string {
colon := strings.IndexByte(hostport, ':')
if colon == -1 {
return ""
}
if i := strings.Index(hostport, "]:"); i != -1 {
return hostport[i+len("]:"):]
}
if strings.Contains(hostport, "]") {
return ""
}
return hostport[colon+len(":"):]
}
// Returns true if the specified URI is using the standard port
// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
func isDefaultPort(scheme, port string) bool {
if port == "" {
return true
}
lowerCaseScheme := strings.ToLower(scheme)
if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
return true
}
return false
}

View file

@ -0,0 +1,15 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/scope.go
package v4
import "strings"
// BuildCredentialScope builds the Signature Version 4 (SigV4) signing scope
func BuildCredentialScope(signingTime SigningTime, region, service string) string {
return strings.Join([]string{
signingTime.ShortTimeFormat(),
region,
service,
"aws4_request",
}, "/")
}

View file

@ -0,0 +1,38 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/time.go
package v4
import "time"
// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing.
type SigningTime struct {
time.Time
timeFormat string
shortTimeFormat string
}
// NewSigningTime creates a new SigningTime given a time.Time
func NewSigningTime(t time.Time) SigningTime {
return SigningTime{
Time: t,
}
}
// TimeFormat provides a time formatted in the X-Amz-Date format.
func (m *SigningTime) TimeFormat() string {
return m.format(&m.timeFormat, TimeFormat)
}
// ShortTimeFormat provides a time formatted of 20060102.
func (m *SigningTime) ShortTimeFormat() string {
return m.format(&m.shortTimeFormat, ShortTimeFormat)
}
func (m *SigningTime) format(target *string, format string) string {
if len(*target) > 0 {
return *target
}
v := m.Time.Format(format)
*target = v
return v
}

View file

@ -0,0 +1,82 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/util.go
package v4
import (
"net/url"
"strings"
)
const doubleSpace = " "
// StripExcessSpaces will rewrite the passed in slice's string values to not
// contain multiple side-by-side spaces.
func StripExcessSpaces(str string) string {
var j, k, l, m, spaces int
// Trim trailing spaces
for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
}
// Trim leading spaces
for k = 0; k < j && str[k] == ' '; k++ {
}
str = str[k : j+1]
// Strip multiple spaces.
j = strings.Index(str, doubleSpace)
if j < 0 {
return str
}
buf := []byte(str)
for k, m, l = j, j, len(buf); k < l; k++ {
if buf[k] == ' ' {
if spaces == 0 {
// First space.
buf[m] = buf[k]
m++
}
spaces++
} else {
// End of multiple spaces.
spaces = 0
buf[m] = buf[k]
m++
}
}
return string(buf[:m])
}
// GetURIPath returns the escaped URI component from the provided URL.
func GetURIPath(u *url.URL) string {
var uriPath string
if len(u.Opaque) > 0 {
const schemeSep, pathSep, queryStart = "//", "/", "?"
opaque := u.Opaque
// Cut off the query string if present.
if idx := strings.Index(opaque, queryStart); idx >= 0 {
opaque = opaque[:idx]
}
// Cutout the scheme separator if present.
if strings.Index(opaque, schemeSep) == 0 {
opaque = opaque[len(schemeSep):]
}
// capture URI path starting with first path separator.
if idx := strings.Index(opaque, pathSep); idx >= 0 {
uriPath = opaque[idx:]
}
} else {
uriPath = u.EscapedPath()
}
if len(uriPath) == 0 {
uriPath = "/"
}
return uriPath
}

View file

@ -0,0 +1,160 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/internal/v4/util_test.go
package v4
import (
"net/http"
"net/url"
"testing"
)
func lazyURLParse(v string) func() (*url.URL, error) {
return func() (*url.URL, error) {
return url.Parse(v)
}
}
func TestGetURIPath(t *testing.T) {
cases := map[string]struct {
getURL func() (*url.URL, error)
expect string
}{
// Cases
"with scheme": {
getURL: lazyURLParse("https://localhost:9000"),
expect: "/",
},
"no port, with scheme": {
getURL: lazyURLParse("https://localhost"),
expect: "/",
},
"without scheme": {
getURL: lazyURLParse("localhost:9000"),
expect: "/",
},
"without scheme, with path": {
getURL: lazyURLParse("localhost:9000/abc123"),
expect: "/abc123",
},
"without scheme, with separator": {
getURL: lazyURLParse("//localhost:9000"),
expect: "/",
},
"no port, without scheme, with separator": {
getURL: lazyURLParse("//localhost"),
expect: "/",
},
"without scheme, with separator, with path": {
getURL: lazyURLParse("//localhost:9000/abc123"),
expect: "/abc123",
},
"no port, without scheme, with separator, with path": {
getURL: lazyURLParse("//localhost/abc123"),
expect: "/abc123",
},
"opaque with query string": {
getURL: lazyURLParse("localhost:9000/abc123?efg=456"),
expect: "/abc123",
},
"failing test": {
getURL: func() (*url.URL, error) {
endpoint := "https://service.region.amazonaws.com"
req, _ := http.NewRequest("POST", endpoint, nil)
u := req.URL
u.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()"
query := u.Query()
query.Set("some-query-key", "value")
u.RawQuery = query.Encode()
return u, nil
},
expect: "/bucket/key-._~,!@#$%^&*()",
},
}
for name, c := range cases {
t.Run(name, func(t *testing.T) {
u, err := c.getURL()
if err != nil {
t.Fatalf("failed to get URL, %v", err)
}
actual := GetURIPath(u)
if e, a := c.expect, actual; e != a {
t.Errorf("expect %v path, got %v", e, a)
}
})
}
}
func TestStripExcessHeaders(t *testing.T) {
vals := []string{
"",
"123",
"1 2 3",
"1 2 3 ",
" 1 2 3",
"1 2 3",
"1 23",
"1 2 3",
"1 2 ",
" 1 2 ",
"12 3",
"12 3 1",
"12 3 1",
"12 3 1abc123",
}
expected := []string{
"",
"123",
"1 2 3",
"1 2 3",
"1 2 3",
"1 2 3",
"1 23",
"1 2 3",
"1 2",
"1 2",
"12 3",
"12 3 1",
"12 3 1",
"12 3 1abc123",
}
for i := 0; i < len(vals); i++ {
r := StripExcessSpaces(vals[i])
if e, a := expected[i], r; e != a {
t.Errorf("%d, expect %v, got %v", i, e, a)
}
}
}
var stripExcessSpaceCases = []string{
`AWS4-HMAC-SHA256 Credential=AKIDFAKEIDFAKEID/20160628/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=1234567890abcdef1234567890abcdef1234567890abcdef`,
`123 321 123 321`,
` 123 321 123 321 `,
` 123 321 123 321 `,
"123",
"1 2 3",
" 1 2 3",
"1 2 3",
"1 23",
"1 2 3",
"1 2 ",
" 1 2 ",
"12 3",
"12 3 1",
"12 3 1",
"12 3 1abc123",
}
func BenchmarkStripExcessSpaces(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, v := range stripExcessSpaceCases {
StripExcessSpaces(v)
}
}
}

View file

@ -0,0 +1,89 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/stream.go
package v4
import (
"context"
"crypto/sha256"
"encoding/hex"
"strings"
"time"
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/internal/v4"
"github.com/aws/aws-sdk-go-v2/aws"
)
// EventStreamSigner is an AWS EventStream protocol signer.
type EventStreamSigner interface {
GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error)
}
// StreamSignerOptions is the configuration options for StreamSigner.
type StreamSignerOptions struct{}
// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads.
type StreamSigner struct {
options StreamSignerOptions
credentials aws.Credentials
service string
region string
prevSignature []byte
signingKeyDeriver *v4Internal.SigningKeyDeriver
}
// NewStreamSigner returns a new AWS EventStream protocol signer.
func NewStreamSigner(credentials aws.Credentials, service, region string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner {
o := StreamSignerOptions{}
for _, fn := range optFns {
fn(&o)
}
return &StreamSigner{
options: o,
credentials: credentials,
service: service,
region: region,
signingKeyDeriver: v4Internal.NewSigningKeyDeriver(),
prevSignature: seedSignature,
}
}
// GetSignature signs the provided header and payload bytes.
func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) {
options := s.options
for _, fn := range optFns {
fn(&options)
}
prevSignature := s.prevSignature
st := v4Internal.NewSigningTime(signingTime)
sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st)
scope := v4Internal.BuildCredentialScope(st, s.region, s.service)
stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st)
signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign))
s.prevSignature = signature
return signature, nil
}
func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
hash := sha256.New()
return strings.Join([]string{
"AWS4-HMAC-SHA256-PAYLOAD",
signingTime.TimeFormat(),
credentialScope,
hex.EncodeToString(previousSignature),
hex.EncodeToString(makeHash(hash, headers)),
hex.EncodeToString(makeHash(hash, payload)),
}, "\n")
}

View file

@ -0,0 +1,582 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/v4.go
// with changes:
// * using different headers for sign/presign
// * don't duplicate content-length as signed header
// * use copy of smithy-go encoding/httpbinding package
// * use zap.Logger instead of smithy-go/logging
// Package v4 implements the AWS signature version 4 algorithm (commonly known
// as SigV4).
//
// For more information about SigV4, see [Signing AWS API requests] in the IAM
// user guide.
//
// While this implementation CAN work in an external context, it is developed
// primarily for SDK use and you may encounter fringe behaviors around header
// canonicalization.
//
// # Pre-escaping a request URI
//
// AWS v4 signature validation requires that the canonical string's URI path
// component must be the escaped form of the HTTP request's path.
//
// The Go HTTP client will perform escaping automatically on the HTTP request.
// This may cause signature validation errors because the request differs from
// the URI path or query from which the signature was generated.
//
// Because of this, we recommend that you explicitly escape the request when
// using this signer outside of the SDK to prevent possible signature mismatch.
// This can be done by setting URL.Opaque on the request. The signer will
// prefer that value, falling back to the return of URL.EscapedPath if unset.
//
// When setting URL.Opaque you must do so in the form of:
//
// "//<hostname>/<path>"
//
// // e.g.
// "//example.com/some/path"
//
// The leading "//" and hostname are required or the escaping will not work
// correctly.
//
// The TestStandaloneSign unit test provides a complete example of using the
// signer outside of the SDK and pre-escaping the URI path.
//
// [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html
package v4
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"hash"
"net/http"
"net/textproto"
"net/url"
"sort"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/smithy/encoding/httpbinding"
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/internal/v4"
"github.com/aws/aws-sdk-go-v2/aws"
"go.uber.org/zap"
)
const (
signingAlgorithm = "AWS4-HMAC-SHA256"
authorizationHeader = "Authorization"
// Version of signing v4
Version = "SigV4"
)
// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests
type HTTPSigner interface {
SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*SignerOptions)) error
}
type keyDerivator interface {
DeriveKey(credential aws.Credentials, service, region string, signingTime v4Internal.SigningTime) []byte
}
// SignerOptions is the SigV4 Signer options.
type SignerOptions struct {
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
// request header to the request's query string. This is most commonly used
// with pre-signed requests preventing headers from being added to the
// request's query string.
DisableHeaderHoisting bool
// Disables the automatic escaping of the URI path of the request for the
// siganture's canonical string's path. For services that do not need additional
// escaping then use this to disable the signer escaping the path.
//
// S3 is an example of a service that does not need additional escaping.
//
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
DisableURIPathEscaping bool
// The logger to send log messages to.
Logger *zap.Logger
// Enable logging of signed requests.
// This will enable logging of the canonical request, the string to sign, and for presigning the subsequent
// presigned URL.
LogSigning bool
// Disables setting the session token on the request as part of signing
// through X-Amz-Security-Token. This is needed for variations of v4 that
// present the token elsewhere.
DisableSessionToken bool
}
// Signer applies AWS v4 signing to given request. Use this to sign requests
// that need to be signed with AWS V4 Signatures.
type Signer struct {
options SignerOptions
keyDerivator keyDerivator
}
// NewSigner returns a new SigV4 Signer
func NewSigner(optFns ...func(signer *SignerOptions)) *Signer {
options := SignerOptions{}
for _, fn := range optFns {
fn(&options)
}
return &Signer{options: options, keyDerivator: v4Internal.NewSigningKeyDeriver()}
}
type httpSigner struct {
Request *http.Request
ServiceName string
Region string
Time v4Internal.SigningTime
Credentials aws.Credentials
KeyDerivator keyDerivator
IsPreSign bool
PayloadHash string
DisableHeaderHoisting bool
DisableURIPathEscaping bool
DisableSessionToken bool
}
func (s *httpSigner) Build() (signedRequest, error) {
req := s.Request
query := req.URL.Query()
headers := req.Header
s.setRequiredSigningFields(headers, query)
// Sort Each Query Key's Values
for key := range query {
sort.Strings(query[key])
}
v4Internal.SanitizeHostForHeader(req)
credentialScope := s.buildCredentialScope()
credentialStr := s.Credentials.AccessKeyID + "/" + credentialScope
if s.IsPreSign {
query.Set(v4Internal.AmzCredentialKey, credentialStr)
}
unsignedHeaders := headers
if s.IsPreSign && !s.DisableHeaderHoisting {
var urlValues url.Values
urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, headers)
for k := range urlValues {
query[k] = urlValues[k]
}
}
host := req.URL.Host
if len(req.Host) > 0 {
host = req.Host
}
var (
signedHeaders http.Header
signedHeadersStr string
canonicalHeaderStr string
)
if s.IsPreSign {
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredPresignedHeaders, unsignedHeaders, s.Request.ContentLength)
} else {
signedHeaders, signedHeadersStr, canonicalHeaderStr = s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength)
}
if s.IsPreSign {
query.Set(v4Internal.AmzSignedHeadersKey, signedHeadersStr)
}
var rawQuery strings.Builder
rawQuery.WriteString(strings.Replace(query.Encode(), "+", "%20", -1))
canonicalURI := v4Internal.GetURIPath(req.URL)
if !s.DisableURIPathEscaping {
canonicalURI = httpbinding.EscapePath(canonicalURI, false)
}
canonicalString := s.buildCanonicalString(
req.Method,
canonicalURI,
rawQuery.String(),
signedHeadersStr,
canonicalHeaderStr,
)
strToSign := s.buildStringToSign(credentialScope, canonicalString)
signingSignature, err := s.buildSignature(strToSign)
if err != nil {
return signedRequest{}, err
}
if s.IsPreSign {
rawQuery.WriteString("&X-Amz-Signature=")
rawQuery.WriteString(signingSignature)
} else {
headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature))
}
req.URL.RawQuery = rawQuery.String()
return signedRequest{
Request: req,
SignedHeaders: signedHeaders,
CanonicalString: canonicalString,
StringToSign: strToSign,
PreSigned: s.IsPreSign,
}, nil
}
func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string {
const credential = "Credential="
const signedHeaders = "SignedHeaders="
const signature = "Signature="
const commaSpace = ", "
var parts strings.Builder
parts.Grow(len(signingAlgorithm) + 1 +
len(credential) + len(credentialStr) + 2 +
len(signedHeaders) + len(signedHeadersStr) + 2 +
len(signature) + len(signingSignature),
)
parts.WriteString(signingAlgorithm)
parts.WriteRune(' ')
parts.WriteString(credential)
parts.WriteString(credentialStr)
parts.WriteString(commaSpace)
parts.WriteString(signedHeaders)
parts.WriteString(signedHeadersStr)
parts.WriteString(commaSpace)
parts.WriteString(signature)
parts.WriteString(signingSignature)
return parts.String()
}
// SignHTTP signs AWS v4 requests with the provided payload hash, service name, region the
// request is made to, and time the request is signed at. The signTime allows
// you to specify that a request is signed for the future, and cannot be
// used until then.
//
// The payloadHash is the hex encoded SHA-256 hash of the request payload, and
// must be provided. Even if the request has no payload (aka body). If the
// request has no payload you should use the hex encoded SHA-256 of an empty
// string as the payloadHash value.
//
// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
//
// Some services such as Amazon S3 accept alternative values for the payload
// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
// included in the request signature.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
//
// Sign differs from Presign in that it will sign the request using HTTP
// header values. This type of signing is intended for http.Request values that
// will not be shared, or are shared in a way the header values on the request
// will not be lost.
//
// The passed in request will be modified in place.
func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(options *SignerOptions)) error {
options := s.options
for _, fn := range optFns {
fn(&options)
}
signer := &httpSigner{
Request: r,
PayloadHash: payloadHash,
ServiceName: service,
Region: region,
Credentials: credentials,
Time: v4Internal.NewSigningTime(signingTime.UTC()),
DisableHeaderHoisting: options.DisableHeaderHoisting,
DisableURIPathEscaping: options.DisableURIPathEscaping,
DisableSessionToken: options.DisableSessionToken,
KeyDerivator: s.keyDerivator,
}
signedRequest, err := signer.Build()
if err != nil {
return err
}
logSigningInfo(ctx, options, &signedRequest, false)
return nil
}
// PresignHTTP signs AWS v4 requests with the payload hash, service name, region
// the request is made to, and time the request is signed at. The signTime
// allows you to specify that a request is signed for the future, and cannot
// be used until then.
//
// Returns the signed URL and the map of HTTP headers that were included in the
// signature or an error if signing the request failed. For presigned requests
// these headers and their values must be included on the HTTP request when it
// is made. This is helpful to know what header values need to be shared with
// the party the presigned request will be distributed to.
//
// The payloadHash is the hex encoded SHA-256 hash of the request payload, and
// must be provided. Even if the request has no payload (aka body). If the
// request has no payload you should use the hex encoded SHA-256 of an empty
// string as the payloadHash value.
//
// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
//
// Some services such as Amazon S3 accept alternative values for the payload
// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
// included in the request signature.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
//
// PresignHTTP differs from SignHTTP in that it will sign the request using
// query string instead of header values. This allows you to share the
// Presigned Request's URL with third parties, or distribute it throughout your
// system with minimal dependencies.
//
// PresignHTTP will not set the expires time of the presigned request
// automatically. To specify the expire duration for a request add the
// "X-Amz-Expires" query parameter on the request with the value as the
// duration in seconds the presigned URL should be considered valid for. This
// parameter is not used by all AWS services, and is most notable used by
// Amazon S3 APIs.
//
// expires := 20 * time.Minute
// query := req.URL.Query()
// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10))
// req.URL.RawQuery = query.Encode()
//
// This method does not modify the provided request.
func (s *Signer) PresignHTTP(
ctx context.Context, credentials aws.Credentials, r *http.Request,
payloadHash string, service string, region string, signingTime time.Time,
optFns ...func(*SignerOptions),
) (signedURI string, signedHeaders http.Header, err error) {
options := s.options
for _, fn := range optFns {
fn(&options)
}
signer := &httpSigner{
Request: r.Clone(r.Context()),
PayloadHash: payloadHash,
ServiceName: service,
Region: region,
Credentials: credentials,
Time: v4Internal.NewSigningTime(signingTime.UTC()),
IsPreSign: true,
DisableHeaderHoisting: options.DisableHeaderHoisting,
DisableURIPathEscaping: options.DisableURIPathEscaping,
DisableSessionToken: options.DisableSessionToken,
KeyDerivator: s.keyDerivator,
}
signedRequest, err := signer.Build()
if err != nil {
return "", nil, err
}
logSigningInfo(ctx, options, &signedRequest, true)
signedHeaders = make(http.Header)
// For the signed headers we canonicalize the header keys in the returned map.
// This avoids situations where can standard library double headers like host header. For example the standard
// library will set the Host header, even if it is present in lower-case form.
for k, v := range signedRequest.SignedHeaders {
key := textproto.CanonicalMIMEHeaderKey(k)
signedHeaders[key] = append(signedHeaders[key], v...)
}
return signedRequest.Request.URL.String(), signedHeaders, nil
}
func (s *httpSigner) buildCredentialScope() string {
return v4Internal.BuildCredentialScope(s.Time, s.Region, s.ServiceName)
}
func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) {
query := url.Values{}
unsignedHeaders := http.Header{}
// A list of headers to be converted to lower case to mitigate a limitation from S3
lowerCaseHeaders := map[string]string{
"X-Amz-Expected-Bucket-Owner": "x-amz-expected-bucket-owner", // see #2508
"X-Amz-Request-Payer": "x-amz-request-payer", // see #2764
}
for k, h := range header {
if newKey, ok := lowerCaseHeaders[k]; ok {
k = newKey
}
if r.IsValid(k) {
query[k] = h
} else {
unsignedHeaders[k] = h
}
}
return query, unsignedHeaders
}
func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) {
signed = make(http.Header)
var headers []string
const hostHeader = "host"
headers = append(headers, hostHeader)
signed[hostHeader] = append(signed[hostHeader], host)
//const contentLengthHeader = "content-length"
//if length > 0 {
// headers = append(headers, contentLengthHeader)
// signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10))
//}
for k, v := range header {
if !rule.IsValid(k) {
continue // ignored header
}
//if strings.EqualFold(k, contentLengthHeader) {
// // prevent signing already handled content-length header.
// continue
//}
lowerCaseKey := strings.ToLower(k)
if _, ok := signed[lowerCaseKey]; ok {
// include additional values
signed[lowerCaseKey] = append(signed[lowerCaseKey], v...)
continue
}
headers = append(headers, lowerCaseKey)
signed[lowerCaseKey] = v
}
sort.Strings(headers)
signedHeaders = strings.Join(headers, ";")
var canonicalHeaders strings.Builder
n := len(headers)
const colon = ':'
for i := 0; i < n; i++ {
if headers[i] == hostHeader {
canonicalHeaders.WriteString(hostHeader)
canonicalHeaders.WriteRune(colon)
canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host))
} else {
canonicalHeaders.WriteString(headers[i])
canonicalHeaders.WriteRune(colon)
// Trim out leading, trailing, and dedup inner spaces from signed header values.
values := signed[headers[i]]
for j, v := range values {
cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v))
canonicalHeaders.WriteString(cleanedValue)
if j < len(values)-1 {
canonicalHeaders.WriteRune(',')
}
}
}
canonicalHeaders.WriteRune('\n')
}
canonicalHeadersStr = canonicalHeaders.String()
return signed, signedHeaders, canonicalHeadersStr
}
func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string {
return strings.Join([]string{
method,
uri,
query,
canonicalHeaders,
signedHeaders,
s.PayloadHash,
}, "\n")
}
func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string {
return strings.Join([]string{
signingAlgorithm,
s.Time.TimeFormat(),
credentialScope,
hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))),
}, "\n")
}
func makeHash(hash hash.Hash, b []byte) []byte {
hash.Reset()
hash.Write(b)
return hash.Sum(nil)
}
func (s *httpSigner) buildSignature(strToSign string) (string, error) {
key := s.KeyDerivator.DeriveKey(s.Credentials, s.ServiceName, s.Region, s.Time)
return hex.EncodeToString(v4Internal.HMACSHA256(key, []byte(strToSign))), nil
}
func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) {
amzDate := s.Time.TimeFormat()
if s.IsPreSign {
query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm)
sessionToken := s.Credentials.SessionToken
if !s.DisableSessionToken && len(sessionToken) > 0 {
query.Set("X-Amz-Security-Token", sessionToken)
}
query.Set(v4Internal.AmzDateKey, amzDate)
return
}
headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate)
if !s.DisableSessionToken && len(s.Credentials.SessionToken) > 0 {
headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken)
}
}
func logSigningInfo(_ context.Context, options SignerOptions, request *signedRequest, isPresign bool) {
if !options.LogSigning {
return
}
signedURLMsg := ""
if isPresign {
signedURLMsg = fmt.Sprintf(logSignedURLMsg, request.Request.URL.String())
}
if options.Logger != nil {
options.Logger.Debug(fmt.Sprintf(logSignInfoMsg, request.CanonicalString, request.StringToSign, signedURLMsg))
}
}
type signedRequest struct {
Request *http.Request
SignedHeaders http.Header
CanonicalString string
StringToSign string
PreSigned bool
}
const logSignInfoMsg = `Request Signature:
---[ CANONICAL STRING ]-----------------------------
%s
---[ STRING TO SIGN ]--------------------------------
%s%s
-----------------------------------------------------`
const logSignedURLMsg = `
---[ SIGNED URL ]------------------------------------
%s`

View file

@ -0,0 +1,370 @@
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/v4_test.go
// with changes:
// * don't duplicate content-length as signed header
package v4
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"strings"
"testing"
"time"
v4Internal "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/internal/v4"
"github.com/aws/aws-sdk-go-v2/aws"
)
var testCredentials = aws.Credentials{AccessKeyID: "AKID", SecretAccessKey: "SECRET", SessionToken: "SESSION"}
func buildRequest(serviceName, region, body string) (*http.Request, string) {
reader := strings.NewReader(body)
return buildRequestWithBodyReader(serviceName, region, reader)
}
func buildRequestWithBodyReader(serviceName, region string, body io.Reader) (*http.Request, string) {
var bodyLen int
type lenner interface {
Len() int
}
if lr, ok := body.(lenner); ok {
bodyLen = lr.Len()
}
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
req, _ := http.NewRequest("POST", endpoint, body)
req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()"
req.Header.Set("X-Amz-Target", "prefix.Operation")
req.Header.Set("Content-Type", "application/x-amz-json-1.0")
if bodyLen > 0 {
req.ContentLength = int64(bodyLen)
}
req.Header.Set("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
req.Header.Add("X-Amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
req.Header.Add("X-amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
h := sha256.New()
_, _ = io.Copy(h, body)
payloadHash := hex.EncodeToString(h.Sum(nil))
return req, payloadHash
}
func TestPresignRequest(t *testing.T) {
req, body := buildRequest("dynamodb", "us-east-1", "{}")
req.Header.Set("Content-Length", "2")
query := req.URL.Query()
query.Set("X-Amz-Expires", "300")
req.URL.RawQuery = query.Encode()
signer := NewSigner()
signed, headers, err := signer.PresignHTTP(context.Background(), testCredentials, req, body, "dynamodb", "us-east-1", time.Unix(0, 0))
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
expectedDate := "19700101T000000Z"
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
expectedSig := "122f0b9e091e4ba84286097e2b3404a1f1f4c4aad479adda95b7dff0ccbe5581"
expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request"
expectedTarget := "prefix.Operation"
q, err := url.ParseQuery(signed[strings.Index(signed, "?"):])
if err != nil {
t.Errorf("expect no error, got %v", err)
}
if e, a := expectedSig, q.Get("X-Amz-Signature"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := expectedCred, q.Get("X-Amz-Credential"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
t.Errorf("expect %v to be empty", a)
}
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
for _, h := range strings.Split(expectedHeaders, ";") {
v := headers.Get(h)
if len(v) == 0 {
t.Errorf("expect %v, to be present in header map", h)
}
}
}
func TestPresignBodyWithArrayRequest(t *testing.T) {
req, body := buildRequest("dynamodb", "us-east-1", "{}")
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
req.Header.Set("Content-Length", "2")
query := req.URL.Query()
query.Set("X-Amz-Expires", "300")
req.URL.RawQuery = query.Encode()
signer := NewSigner()
signed, headers, err := signer.PresignHTTP(context.Background(), testCredentials, req, body, "dynamodb", "us-east-1", time.Unix(0, 0))
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
q, err := url.ParseQuery(signed[strings.Index(signed, "?"):])
if err != nil {
t.Errorf("expect no error, got %v", err)
}
expectedDate := "19700101T000000Z"
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
expectedSig := "e3ac55addee8711b76c6d608d762cff285fe8b627a057f8b5ec9268cf82c08b1"
expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request"
expectedTarget := "prefix.Operation"
if e, a := expectedSig, q.Get("X-Amz-Signature"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := expectedCred, q.Get("X-Amz-Credential"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := expectedHeaders, q.Get("X-Amz-SignedHeaders"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if a := q.Get("X-Amz-Meta-Other-Header"); len(a) != 0 {
t.Errorf("expect %v to be empty, was not", a)
}
if e, a := expectedTarget, q.Get("X-Amz-Target"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
for _, h := range strings.Split(expectedHeaders, ";") {
v := headers.Get(h)
if len(v) == 0 {
t.Errorf("expect %v, to be present in header map", h)
}
}
}
func TestSignRequest(t *testing.T) {
req, body := buildRequest("dynamodb", "us-east-1", "{}")
req.Header.Set("Content-Length", "2")
signer := NewSigner()
err := signer.SignHTTP(context.Background(), testCredentials, req, body, "dynamodb", "us-east-1", time.Unix(0, 0))
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
expectedDate := "19700101T000000Z"
expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-security-token;x-amz-target, Signature=a518299330494908a70222cec6899f6f32f297f8595f6df1776d998936652ad9"
q := req.Header
if e, a := expectedSig, q.Get("Authorization"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestBuildCanonicalRequest(t *testing.T) {
req, _ := buildRequest("dynamodb", "us-east-1", "{}")
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
ctx := &httpSigner{
ServiceName: "dynamodb",
Region: "us-east-1",
Request: req,
Time: v4Internal.NewSigningTime(time.Now()),
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
}
build, err := ctx.Build()
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
expected := "https://example.org/bucket/key-._~,!@#$%^&*()?Foo=a&Foo=m&Foo=o&Foo=z"
if e, a := expected, build.Request.URL.String(); e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestSigner_SignHTTP_NoReplaceRequestBody(t *testing.T) {
req, bodyHash := buildRequest("dynamodb", "us-east-1", "{}")
req.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
s := NewSigner()
origBody := req.Body
err := s.SignHTTP(context.Background(), testCredentials, req, bodyHash, "dynamodb", "us-east-1", time.Now())
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
if req.Body != origBody {
t.Errorf("expect request body to not be chagned")
}
}
func TestRequestHost(t *testing.T) {
req, _ := buildRequest("dynamodb", "us-east-1", "{}")
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
req.Host = "myhost"
query := req.URL.Query()
query.Set("X-Amz-Expires", "5")
req.URL.RawQuery = query.Encode()
ctx := &httpSigner{
ServiceName: "dynamodb",
Region: "us-east-1",
Request: req,
Time: v4Internal.NewSigningTime(time.Now()),
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
}
build, err := ctx.Build()
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
if !strings.Contains(build.CanonicalString, "host:"+req.Host) {
t.Errorf("canonical host header invalid")
}
}
func TestSign_buildCanonicalHeadersContentLengthPresent(t *testing.T) {
body := `{"description": "this is a test"}`
req, _ := buildRequest("dynamodb", "us-east-1", body)
req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
req.Host = "myhost"
contentLength := fmt.Sprintf("%d", len([]byte(body)))
req.Header.Add("Content-Length", contentLength)
query := req.URL.Query()
query.Set("X-Amz-Expires", "5")
req.URL.RawQuery = query.Encode()
ctx := &httpSigner{
ServiceName: "dynamodb",
Region: "us-east-1",
Request: req,
Time: v4Internal.NewSigningTime(time.Now()),
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
}
build, err := ctx.Build()
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
if !strings.Contains(build.CanonicalString, "content-length:"+contentLength+"\n") {
t.Errorf("canonical header content-length invalid")
}
}
func TestSign_buildCanonicalHeaders(t *testing.T) {
serviceName := "mockAPI"
region := "mock-region"
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
req, err := http.NewRequest("POST", endpoint, nil)
if err != nil {
t.Fatalf("failed to create request, %v", err)
}
req.Header.Set("FooInnerSpace", " inner space ")
req.Header.Set("FooLeadingSpace", " leading-space")
req.Header.Add("FooMultipleSpace", "no-space")
req.Header.Add("FooMultipleSpace", "\ttab-space")
req.Header.Add("FooMultipleSpace", "trailing-space ")
req.Header.Set("FooNoSpace", "no-space")
req.Header.Set("FooTabSpace", "\ttab-space\t")
req.Header.Set("FooTrailingSpace", "trailing-space ")
req.Header.Set("FooWrappedSpace", " wrapped-space ")
ctx := &httpSigner{
ServiceName: serviceName,
Region: region,
Request: req,
Time: v4Internal.NewSigningTime(time.Date(2021, 10, 20, 12, 42, 0, 0, time.UTC)),
KeyDerivator: v4Internal.NewSigningKeyDeriver(),
}
build, err := ctx.Build()
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
expectCanonicalString := strings.Join([]string{
`POST`,
`/`,
``,
`fooinnerspace:inner space`,
`fooleadingspace:leading-space`,
`foomultiplespace:no-space,tab-space,trailing-space`,
`foonospace:no-space`,
`footabspace:tab-space`,
`footrailingspace:trailing-space`,
`foowrappedspace:wrapped-space`,
`host:mockAPI.mock-region.amazonaws.com`,
`x-amz-date:20211020T124200Z`,
``,
`fooinnerspace;fooleadingspace;foomultiplespace;foonospace;footabspace;footrailingspace;foowrappedspace;host;x-amz-date`,
``,
}, "\n")
if diff := cmpDiff(expectCanonicalString, build.CanonicalString); diff != "" {
t.Errorf("expect match, got\n%s", diff)
}
}
func BenchmarkPresignRequest(b *testing.B) {
signer := NewSigner()
req, bodyHash := buildRequest("dynamodb", "us-east-1", "{}")
query := req.URL.Query()
query.Set("X-Amz-Expires", "5")
req.URL.RawQuery = query.Encode()
for i := 0; i < b.N; i++ {
signer.PresignHTTP(context.Background(), testCredentials, req, bodyHash, "dynamodb", "us-east-1", time.Now())
}
}
func BenchmarkSignRequest(b *testing.B) {
signer := NewSigner()
req, bodyHash := buildRequest("dynamodb", "us-east-1", "{}")
for i := 0; i < b.N; i++ {
signer.SignHTTP(context.Background(), testCredentials, req, bodyHash, "dynamodb", "us-east-1", time.Now())
}
}
func cmpDiff(e, a interface{}) string {
if !reflect.DeepEqual(e, a) {
return fmt.Sprintf("%v != %v", e, a)
}
return ""
}

View file

@ -4,6 +4,7 @@ import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/bluele/gcache"
"go.uber.org/zap"
@ -46,7 +47,7 @@ func (o *AccessControlCache) Get(owner user.ID, key string) bool {
result, ok := entry.(bool)
if !ok {
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return false
}

View file

@ -5,6 +5,8 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/bluele/gcache"
"go.uber.org/zap"
@ -23,6 +25,13 @@ type (
Lifetime time.Duration
Logger *zap.Logger
}
AccessBoxCacheValue struct {
Box *accessbox.Box
Attributes []object.Attribute
PutTime time.Time
Address *oid.Address
}
)
const (
@ -41,23 +50,23 @@ func DefaultAccessBoxConfig(logger *zap.Logger) *Config {
}
}
// NewAccessBoxCache creates an object of BucketCache.
// NewAccessBoxCache creates an object of AccessBoxCache.
func NewAccessBoxCache(config *Config) *AccessBoxCache {
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
return &AccessBoxCache{cache: gc, logger: config.Logger}
}
// Get returns a cached object.
func (o *AccessBoxCache) Get(address oid.Address) *accessbox.Box {
entry, err := o.cache.Get(address)
// Get returns a cached accessbox.
func (o *AccessBoxCache) Get(accessKeyID string) *AccessBoxCacheValue {
entry, err := o.cache.Get(accessKeyID)
if err != nil {
return nil
}
result, ok := entry.(*accessbox.Box)
result, ok := entry.(*AccessBoxCacheValue)
if !ok {
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
@ -65,7 +74,12 @@ func (o *AccessBoxCache) Get(address oid.Address) *accessbox.Box {
return result
}
// Put stores an object to cache.
func (o *AccessBoxCache) Put(address oid.Address, box *accessbox.Box) error {
return o.cache.Set(address, box)
// Put stores an accessbox to cache.
func (o *AccessBoxCache) Put(accessKeyID string, val *AccessBoxCacheValue) error {
return o.cache.Set(accessKeyID, val)
}
// Delete removes an accessbox from cache.
func (o *AccessBoxCache) Delete(accessKeyID string) {
o.cache.Remove(accessKeyID)
}

17
api/cache/buckets.go vendored
View file

@ -5,6 +5,7 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"github.com/bluele/gcache"
"go.uber.org/zap"
)
@ -38,15 +39,15 @@ func NewBucketCache(config *Config) *BucketCache {
}
// Get returns a cached object.
func (o *BucketCache) Get(key string) *data.BucketInfo {
entry, err := o.cache.Get(key)
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
entry, err := o.cache.Get(formKey(ns, bktName))
if err != nil {
return nil
}
result, ok := entry.(*data.BucketInfo)
if !ok {
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
@ -56,10 +57,14 @@ func (o *BucketCache) Get(key string) *data.BucketInfo {
// Put puts an object to cache.
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
return o.cache.Set(bkt.Name, bkt)
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
}
// Delete deletes an object from cache.
func (o *BucketCache) Delete(key string) bool {
return o.cache.Remove(key)
func (o *BucketCache) Delete(bkt *data.BucketInfo) bool {
return o.cache.Remove(formKey(bkt.Zone, bkt.Name))
}
func formKey(zone, name string) string {
return name + "." + zone
}

View file

@ -1,12 +1,17 @@
package cache
import (
"strings"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"go.uber.org/zap/zaptest/observer"
@ -18,16 +23,21 @@ func TestAccessBoxCacheType(t *testing.T) {
addr := oidtest.Address()
box := &accessbox.Box{}
val := &AccessBoxCacheValue{
Box: box,
}
err := cache.Put(addr, box)
accessKeyID := getAccessKeyID(addr)
err := cache.Put(accessKeyID, val)
require.NoError(t, err)
val := cache.Get(addr)
require.Equal(t, box, val)
resVal := cache.Get(accessKeyID)
require.Equal(t, box, resVal.Box)
require.Equal(t, 0, observedLog.Len())
err = cache.cache.Set(addr, "tmp")
err = cache.cache.Set(accessKeyID, "tmp")
require.NoError(t, err)
assertInvalidCacheEntry(t, cache.Get(addr), observedLog)
assertInvalidCacheEntry(t, cache.Get(accessKeyID), observedLog)
}
func TestBucketsCacheType(t *testing.T) {
@ -38,13 +48,13 @@ func TestBucketsCacheType(t *testing.T) {
err := cache.Put(bktInfo)
require.NoError(t, err)
val := cache.Get(bktInfo.Name)
val := cache.Get("", bktInfo.Name)
require.Equal(t, bktInfo, val)
require.Equal(t, 0, observedLog.Len())
err = cache.cache.Set(bktInfo.Name, "tmp")
err = cache.cache.Set(bktInfo.Name+"."+bktInfo.Zone, "tmp")
require.NoError(t, err)
assertInvalidCacheEntry(t, cache.Get(bktInfo.Name), observedLog)
assertInvalidCacheEntry(t, cache.Get(bktInfo.Zone, bktInfo.Name), observedLog)
}
func TestObjectNamesCacheType(t *testing.T) {
@ -176,22 +186,42 @@ func TestSettingsCacheType(t *testing.T) {
assertInvalidCacheEntry(t, cache.GetSettings(key), observedLog)
}
func TestNotificationConfigurationCacheType(t *testing.T) {
func TestFrostFSIDSubjectCacheType(t *testing.T) {
logger, observedLog := getObservedLogger()
cache := NewSystemCache(DefaultSystemConfig(logger))
cache := NewFrostfsIDCache(DefaultFrostfsIDConfig(logger))
key := "key"
notificationConfig := &data.NotificationConfiguration{}
err := cache.PutNotificationConfiguration(key, notificationConfig)
key, err := util.Uint160DecodeStringLE("4ea976429703418ef00fc4912a409b6a0b973034")
require.NoError(t, err)
val := cache.GetNotificationConfiguration(key)
require.Equal(t, notificationConfig, val)
value := &client.SubjectExtended{}
err = cache.PutSubject(key, value)
require.NoError(t, err)
val := cache.GetSubject(key)
require.Equal(t, value, val)
require.Equal(t, 0, observedLog.Len())
err = cache.cache.Set(key, "tmp")
require.NoError(t, err)
assertInvalidCacheEntry(t, cache.GetNotificationConfiguration(key), observedLog)
assertInvalidCacheEntry(t, cache.GetSubject(key), observedLog)
}
func TestFrostFSIDUserKeyCacheType(t *testing.T) {
logger, observedLog := getObservedLogger()
cache := NewFrostfsIDCache(DefaultFrostfsIDConfig(logger))
ns, name := "ns", "name"
value, err := keys.NewPrivateKey()
require.NoError(t, err)
err = cache.PutUserKey(ns, name, value.PublicKey())
require.NoError(t, err)
val := cache.GetUserKey(ns, name)
require.Equal(t, value.PublicKey(), val)
require.Equal(t, 0, observedLog.Len())
err = cache.cache.Set(ns+"/"+name, "tmp")
require.NoError(t, err)
assertInvalidCacheEntry(t, cache.GetUserKey(ns, name), observedLog)
}
func assertInvalidCacheEntry(t *testing.T, val interface{}, observedLog *observer.ObservedLogs) {
@ -204,3 +234,7 @@ func getObservedLogger() (*zap.Logger, *observer.ObservedLogs) {
loggerCore, observedLog := observer.New(zap.WarnLevel)
return zap.New(loggerCore), observedLog
}
func getAccessKeyID(addr oid.Address) string {
return strings.ReplaceAll(addr.EncodeToString(), "/", "0")
}

77
api/cache/frostfsid.go vendored Normal file
View file

@ -0,0 +1,77 @@
package cache
import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"github.com/bluele/gcache"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
"go.uber.org/zap"
)
// FrostfsIDCache provides lru cache for frostfsid contract.
type FrostfsIDCache struct {
cache gcache.Cache
logger *zap.Logger
}
const (
// DefaultFrostfsIDCacheSize is a default maximum number of entries in cache.
DefaultFrostfsIDCacheSize = 1e4
// DefaultFrostfsIDCacheLifetime is a default lifetime of entries in cache.
DefaultFrostfsIDCacheLifetime = time.Minute
)
// DefaultFrostfsIDConfig returns new default cache expiration values.
func DefaultFrostfsIDConfig(logger *zap.Logger) *Config {
return &Config{
Size: DefaultFrostfsIDCacheSize,
Lifetime: DefaultFrostfsIDCacheLifetime,
Logger: logger,
}
}
// NewFrostfsIDCache creates an object of FrostfsIDCache.
func NewFrostfsIDCache(config *Config) *FrostfsIDCache {
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
return &FrostfsIDCache{cache: gc, logger: config.Logger}
}
// GetSubject returns a cached client.SubjectExtended. Returns nil if value is missing.
func (c *FrostfsIDCache) GetSubject(key util.Uint160) *client.SubjectExtended {
return get[client.SubjectExtended](c, key)
}
// PutSubject puts a client.SubjectExtended to cache.
func (c *FrostfsIDCache) PutSubject(key util.Uint160, subject *client.SubjectExtended) error {
return c.cache.Set(key, subject)
}
// GetUserKey returns a cached *keys.PublicKey. Returns nil if value is missing.
func (c *FrostfsIDCache) GetUserKey(ns, name string) *keys.PublicKey {
return get[keys.PublicKey](c, ns+"/"+name)
}
// PutUserKey puts a client.SubjectExtended to cache.
func (c *FrostfsIDCache) PutUserKey(ns, name string, userKey *keys.PublicKey) error {
return c.cache.Set(ns+"/"+name, userKey)
}
func get[T any](c *FrostfsIDCache, key any) *T {
entry, err := c.cache.Get(key)
if err != nil {
return nil
}
result, ok := entry.(*T)
if !ok {
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
return result
}

107
api/cache/listsession.go vendored Normal file
View file

@ -0,0 +1,107 @@
package cache
import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/bluele/gcache"
"go.uber.org/zap"
)
type (
// ListSessionCache contains cache for list session (during pagination).
ListSessionCache struct {
cache gcache.Cache
logger *zap.Logger
}
// ListSessionKey is a key to find a ListSessionCache's entry.
ListSessionKey struct {
cid cid.ID
prefix string
token string
}
)
const (
// DefaultListSessionCacheLifetime is a default lifetime of entries in cache of ListObjects.
DefaultListSessionCacheLifetime = time.Second * 60
// DefaultListSessionCacheSize is a default size of cache of ListObjects.
DefaultListSessionCacheSize = 100
)
// DefaultListSessionConfig returns new default cache expiration values.
func DefaultListSessionConfig(logger *zap.Logger) *Config {
return &Config{
Size: DefaultListSessionCacheSize,
Lifetime: DefaultListSessionCacheLifetime,
Logger: logger,
}
}
func (k *ListSessionKey) String() string {
return k.cid.EncodeToString() + k.prefix + k.token
}
// NewListSessionCache is a constructor which creates an object of ListObjectsCache with the given lifetime of entries.
func NewListSessionCache(config *Config) *ListSessionCache {
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).EvictedFunc(func(_ interface{}, val interface{}) {
session, ok := val.(*data.ListSession)
if !ok {
config.Logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", val)),
zap.String("expected", fmt.Sprintf("%T", session)))
}
if !session.Acquired.Load() {
session.Cancel()
}
}).Build()
return &ListSessionCache{cache: gc, logger: config.Logger}
}
// GetListSession returns a list of ObjectInfo.
func (l *ListSessionCache) GetListSession(key ListSessionKey) *data.ListSession {
entry, err := l.cache.Get(key)
if err != nil {
return nil
}
result, ok := entry.(*data.ListSession)
if !ok {
l.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
return result
}
// PutListSession puts a list of object versions to cache.
func (l *ListSessionCache) PutListSession(key ListSessionKey, session *data.ListSession) error {
s := l.GetListSession(key)
if s != nil && s != session {
if !s.Acquired.Load() {
s.Cancel()
}
}
return l.cache.Set(key, session)
}
// DeleteListSession removes key from cache.
func (l *ListSessionCache) DeleteListSession(key ListSessionKey) {
l.cache.Remove(key)
}
// CreateListSessionCacheKey returns ListSessionKey with the given CID, prefix and token.
func CreateListSessionCacheKey(cnr cid.ID, prefix, token string) ListSessionKey {
p := ListSessionKey{
cid: cnr,
prefix: prefix,
token: token,
}
return p
}

3
api/cache/names.go vendored
View file

@ -4,6 +4,7 @@ import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/bluele/gcache"
"go.uber.org/zap"
@ -48,7 +49,7 @@ func (o *ObjectsNameCache) Get(key string) *oid.Address {
result, ok := entry.(oid.Address)
if !ok {
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}

65
api/cache/network_info.go vendored Normal file
View file

@ -0,0 +1,65 @@
package cache
import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/bluele/gcache"
"go.uber.org/zap"
)
type (
// NetworkInfoCache provides cache for network info.
NetworkInfoCache struct {
cache gcache.Cache
logger *zap.Logger
}
// NetworkInfoCacheConfig stores expiration params for cache.
NetworkInfoCacheConfig struct {
Lifetime time.Duration
Logger *zap.Logger
}
)
const (
DefaultNetworkInfoCacheLifetime = 1 * time.Minute
networkInfoCacheSize = 1
networkInfoKey = "network_info"
)
// DefaultNetworkInfoConfig returns new default cache expiration values.
func DefaultNetworkInfoConfig(logger *zap.Logger) *NetworkInfoCacheConfig {
return &NetworkInfoCacheConfig{
Lifetime: DefaultNetworkInfoCacheLifetime,
Logger: logger,
}
}
// NewNetworkInfoCache creates an object of NetworkInfoCache.
func NewNetworkInfoCache(config *NetworkInfoCacheConfig) *NetworkInfoCache {
gc := gcache.New(networkInfoCacheSize).LRU().Expiration(config.Lifetime).Build()
return &NetworkInfoCache{cache: gc, logger: config.Logger}
}
func (c *NetworkInfoCache) Get() *netmap.NetworkInfo {
entry, err := c.cache.Get(networkInfoKey)
if err != nil {
return nil
}
result, ok := entry.(netmap.NetworkInfo)
if !ok {
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
return &result
}
func (c *NetworkInfoCache) Put(info netmap.NetworkInfo) error {
return c.cache.Set(networkInfoKey, info)
}

View file

@ -5,6 +5,7 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/bluele/gcache"
"go.uber.org/zap"
@ -47,7 +48,7 @@ func (o *ObjectsCache) GetObject(address oid.Address) *data.ExtendedObjectInfo {
result, ok := entry.(*data.ExtendedObjectInfo)
if !ok {
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}

View file

@ -7,6 +7,7 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/bluele/gcache"
"go.uber.org/zap"
@ -75,7 +76,7 @@ func (l *ObjectsListCache) GetVersions(key ObjectsListKey) []*data.NodeVersion {
result, ok := entry.([]*data.NodeVersion)
if !ok {
l.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
l.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
@ -94,7 +95,7 @@ func (l *ObjectsListCache) CleanCacheEntriesContainingObject(objectName string,
for _, key := range keys {
k, ok := key.(ObjectsListKey)
if !ok {
l.logger.Warn("invalid cache key type", zap.String("actual", fmt.Sprintf("%T", key)),
l.logger.Warn(logs.InvalidCacheKeyType, zap.String("actual", fmt.Sprintf("%T", key)),
zap.String("expected", fmt.Sprintf("%T", k)))
continue
}

72
api/cache/policy.go vendored Normal file
View file

@ -0,0 +1,72 @@
package cache
import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
"github.com/bluele/gcache"
"go.uber.org/zap"
)
// MorphPolicyCache provides lru cache for listing policies stored in policy contract.
type MorphPolicyCache struct {
cache gcache.Cache
logger *zap.Logger
}
type MorphPolicyCacheKey struct {
Target engine.Target
Name chain.Name
}
const (
// DefaultMorphPolicyCacheSize is a default maximum number of entries in cache.
DefaultMorphPolicyCacheSize = 1e4
// DefaultMorphPolicyCacheLifetime is a default lifetime of entries in cache.
DefaultMorphPolicyCacheLifetime = time.Minute
)
// DefaultMorphPolicyConfig returns new default cache expiration values.
func DefaultMorphPolicyConfig(logger *zap.Logger) *Config {
return &Config{
Size: DefaultMorphPolicyCacheSize,
Lifetime: DefaultMorphPolicyCacheLifetime,
Logger: logger,
}
}
// NewMorphPolicyCache creates an object of MorphPolicyCache.
func NewMorphPolicyCache(config *Config) *MorphPolicyCache {
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
return &MorphPolicyCache{cache: gc, logger: config.Logger}
}
// Get returns a cached object. Returns nil if value is missing.
func (o *MorphPolicyCache) Get(key MorphPolicyCacheKey) []*chain.Chain {
entry, err := o.cache.Get(key)
if err != nil {
return nil
}
result, ok := entry.([]*chain.Chain)
if !ok {
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
return result
}
// Put puts an object to cache.
func (o *MorphPolicyCache) Put(key MorphPolicyCacheKey, list []*chain.Chain) error {
return o.cache.Set(key, list)
}
// Delete deletes an object from cache.
func (o *MorphPolicyCache) Delete(key MorphPolicyCacheKey) bool {
return o.cache.Remove(key)
}

47
api/cache/system.go vendored
View file

@ -5,6 +5,7 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"github.com/bluele/gcache"
"go.uber.org/zap"
)
@ -48,7 +49,7 @@ func (o *SystemCache) GetObject(key string) *data.ObjectInfo {
result, ok := entry.(*data.ObjectInfo)
if !ok {
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
@ -79,7 +80,23 @@ func (o *SystemCache) GetCORS(key string) *data.CORSConfiguration {
result, ok := entry.(*data.CORSConfiguration)
if !ok {
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
return result
}
func (o *SystemCache) GetLifecycleConfiguration(key string) *data.LifecycleConfiguration {
entry, err := o.cache.Get(key)
if err != nil {
return nil
}
result, ok := entry.(*data.LifecycleConfiguration)
if !ok {
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
@ -95,23 +112,7 @@ func (o *SystemCache) GetSettings(key string) *data.BucketSettings {
result, ok := entry.(*data.BucketSettings)
if !ok {
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
return result
}
func (o *SystemCache) GetNotificationConfiguration(key string) *data.NotificationConfiguration {
entry, err := o.cache.Get(key)
if err != nil {
return nil
}
result, ok := entry.(*data.NotificationConfiguration)
if !ok {
o.logger.Warn("invalid cache entry type", zap.String("actual", fmt.Sprintf("%T", entry)),
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
@ -148,12 +149,12 @@ func (o *SystemCache) PutCORS(key string, obj *data.CORSConfiguration) error {
return o.cache.Set(key, obj)
}
func (o *SystemCache) PutSettings(key string, settings *data.BucketSettings) error {
return o.cache.Set(key, settings)
func (o *SystemCache) PutLifecycleConfiguration(key string, obj *data.LifecycleConfiguration) error {
return o.cache.Set(key, obj)
}
func (o *SystemCache) PutNotificationConfiguration(key string, obj *data.NotificationConfiguration) error {
return o.cache.Set(key, obj)
func (o *SystemCache) PutSettings(key string, settings *data.BucketSettings) error {
return o.cache.Set(key, settings)
}
// PutTagging puts tags of a bucket or an object.

View file

@ -2,17 +2,19 @@ package data
import (
"encoding/xml"
"strings"
"time"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
const (
bktSettingsObject = ".s3-settings"
bktCORSConfigurationObject = ".s3-cors"
bktNotificationConfigurationObject = ".s3-notifications"
bktLifecycleConfigurationObject = ".s3-lifecycle"
VersioningUnversioned = "Unversioned"
VersioningEnabled = "Enabled"
@ -29,37 +31,32 @@ type (
Created time.Time
LocationConstraint string
ObjectLockEnabled bool
HomomorphicHashDisabled bool
}
// ObjectInfo holds S3 object data.
ObjectInfo struct {
ID oid.ID
CID cid.ID
IsDir bool
IsDeleteMarker bool
Bucket string
Name string
Size int64
Size uint64
ContentType string
Created time.Time
CreationEpoch uint64
HashSum string
MD5Sum string
Owner user.ID
Headers map[string]string
}
// NotificationInfo store info to send s3 notification.
NotificationInfo struct {
Name string
Version string
Size int64
HashSum string
}
// BucketSettings stores settings such as versioning.
BucketSettings struct {
Versioning string `json:"versioning"`
LockConfiguration *ObjectLockConfiguration `json:"lock_configuration"`
Versioning string
LockConfiguration *ObjectLockConfiguration
CannedACL string
OwnerKey *keys.PublicKey
}
// CORSConfiguration stores CORS configuration of a request.
@ -77,26 +74,35 @@ type (
ExposeHeaders []string `xml:"ExposeHeader" json:"ExposeHeaders"`
MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty" json:"MaxAgeSeconds,omitempty"`
}
)
// NotificationInfoFromObject creates new NotificationInfo from ObjectInfo.
func NotificationInfoFromObject(objInfo *ObjectInfo) *NotificationInfo {
return &NotificationInfo{
Name: objInfo.Name,
Version: objInfo.VersionID(),
Size: objInfo.Size,
HashSum: objInfo.HashSum,
// ObjectVersion stores object version info.
ObjectVersion struct {
BktInfo *BucketInfo
ObjectName string
VersionID string
NoErrorOnDeleteMarker bool
}
}
// CreatedObjectInfo stores created object info.
CreatedObjectInfo struct {
ID oid.ID
Size uint64
HashSum []byte
MD5Sum []byte
CreationEpoch uint64
}
)
// SettingsObjectName is a system name for a bucket settings file.
func (b *BucketInfo) SettingsObjectName() string { return bktSettingsObject }
// CORSObjectName returns a system name for a bucket CORS configuration file.
func (b *BucketInfo) CORSObjectName() string { return bktCORSConfigurationObject }
func (b *BucketInfo) CORSObjectName() string {
return b.CID.EncodeToString() + bktCORSConfigurationObject
}
func (b *BucketInfo) NotificationConfigurationObjectName() string {
return bktNotificationConfigurationObject
func (b *BucketInfo) LifecycleConfigurationObjectName() string {
return b.CID.EncodeToString() + bktLifecycleConfigurationObject
}
// VersionID returns object version from ObjectInfo.
@ -114,6 +120,13 @@ func (o *ObjectInfo) Address() oid.Address {
return addr
}
func (o *ObjectInfo) ETag(md5Enabled bool) string {
if md5Enabled && len(o.MD5Sum) > 0 {
return o.MD5Sum
}
return o.HashSum
}
func (b BucketSettings) Unversioned() bool {
return b.Versioning == VersioningUnversioned
}
@ -125,3 +138,11 @@ func (b BucketSettings) VersioningEnabled() bool {
func (b BucketSettings) VersioningSuspended() bool {
return b.Versioning == VersioningSuspended
}
func Quote(val string) string {
return "\"" + val + "\""
}
func UnQuote(val string) string {
return strings.Trim(val, "\"")
}

56
api/data/lifecycle.go Normal file
View file

@ -0,0 +1,56 @@
package data
import "encoding/xml"
const (
LifecycleStatusEnabled = "Enabled"
LifecycleStatusDisabled = "Disabled"
)
type (
LifecycleConfiguration struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LifecycleConfiguration" json:"-"`
Rules []LifecycleRule `xml:"Rule"`
}
LifecycleRule struct {
Status string `xml:"Status,omitempty"`
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty"`
Expiration *LifecycleExpiration `xml:"Expiration,omitempty"`
Filter *LifecycleRuleFilter `xml:"Filter,omitempty"`
ID string `xml:"ID,omitempty"`
NonCurrentVersionExpiration *NonCurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
Prefix string `xml:"Prefix,omitempty"`
}
AbortIncompleteMultipartUpload struct {
DaysAfterInitiation *int `xml:"DaysAfterInitiation,omitempty"`
}
LifecycleExpiration struct {
Date string `xml:"Date,omitempty"`
Days *int `xml:"Days,omitempty"`
Epoch *uint64 `xml:"Epoch,omitempty"`
ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"`
}
LifecycleRuleFilter struct {
And *LifecycleRuleAndOperator `xml:"And,omitempty"`
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
Prefix string `xml:"Prefix,omitempty"`
Tag *Tag `xml:"Tag,omitempty"`
}
LifecycleRuleAndOperator struct {
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
Prefix string `xml:"Prefix,omitempty"`
Tags []Tag `xml:"Tag"`
}
NonCurrentVersionExpiration struct {
NewerNonCurrentVersions *int `xml:"NewerNoncurrentVersions,omitempty"`
NonCurrentDays *int `xml:"NoncurrentDays,omitempty"`
}
)

19
api/data/listsession.go Normal file
View file

@ -0,0 +1,19 @@
package data
import (
"context"
"sync/atomic"
)
type VersionsStream interface {
Next(ctx context.Context) (*NodeVersion, error)
}
type ListSession struct {
Next []*ExtendedNodeVersion
Stream VersionsStream
NamesMap map[string]struct{}
Context context.Context
Cancel context.CancelFunc
Acquired atomic.Bool
}

View file

@ -1,39 +0,0 @@
package data
type (
NotificationConfiguration struct {
QueueConfigurations []QueueConfiguration `xml:"QueueConfiguration" json:"QueueConfigurations"`
// Not supported topics
TopicConfigurations []TopicConfiguration `xml:"TopicConfiguration" json:"TopicConfigurations"`
LambdaFunctionConfigurations []LambdaFunctionConfiguration `xml:"CloudFunctionConfiguration" json:"CloudFunctionConfigurations"`
}
QueueConfiguration struct {
ID string `xml:"Id" json:"Id"`
QueueArn string `xml:"Queue" json:"Queue"`
Events []string `xml:"Event" json:"Events"`
Filter Filter `xml:"Filter" json:"Filter"`
}
Filter struct {
Key Key `xml:"S3Key" json:"S3Key"`
}
Key struct {
FilterRules []FilterRule `xml:"FilterRule" json:"FilterRules"`
}
FilterRule struct {
Name string `xml:"Name" json:"Name"`
Value string `xml:"Value" json:"Value"`
}
// TopicConfiguration and LambdaFunctionConfiguration -- we don't support these configurations,
// but we need them to detect in notification configurations in requests.
TopicConfiguration struct{}
LambdaFunctionConfiguration struct{}
)
func (n NotificationConfiguration) IsEmpty() bool {
return len(n.QueueConfigurations) == 0 && len(n.TopicConfigurations) == 0 && len(n.LambdaFunctionConfigurations) == 0
}

30
api/data/tagging.go Normal file
View file

@ -0,0 +1,30 @@
package data
import "encoding/xml"
// Tagging contains tag set.
type Tagging struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Tagging"`
TagSet []Tag `xml:"TagSet>Tag"`
}
// Tag is an AWS key-value tag.
type Tag struct {
Key string
Value string
}
type GetObjectTaggingParams struct {
ObjectVersion *ObjectVersion
// NodeVersion can be nil. If not nil we save one request to tree service.
NodeVersion *NodeVersion // optional
}
type PutObjectTaggingParams struct {
ObjectVersion *ObjectVersion
TagSet map[string]string
// NodeVersion can be nil. If not nil we save one request to tree service.
NodeVersion *NodeVersion // optional
}

View file

@ -16,19 +16,31 @@ const (
// NodeVersion represent node from tree service.
type NodeVersion struct {
BaseNodeVersion
DeleteMarker *DeleteMarkerInfo
IsUnversioned bool
IsCombined bool
}
func (v NodeVersion) IsDeleteMarker() bool {
return v.DeleteMarker != nil
// ExtendedNodeVersion contains additional node info to be able to sort versions by timestamp.
type ExtendedNodeVersion struct {
NodeVersion *NodeVersion
IsLatest bool
DirName string
}
// DeleteMarkerInfo is used to save object info if node in the tree service is delete marker.
// We need this information because the "delete marker" object is no longer stored in FrostFS.
type DeleteMarkerInfo struct {
Created time.Time
Owner user.ID
func (e ExtendedNodeVersion) Version() string {
if e.NodeVersion.IsUnversioned {
return UnversionedObjectVersionID
}
return e.NodeVersion.OID.EncodeToString()
}
func (e ExtendedNodeVersion) Name() string {
if e.DirName != "" {
return e.DirName
}
return e.NodeVersion.FilePath
}
// ExtendedObjectInfo contains additional node info to be able to sort versions by timestamp.
@ -53,9 +65,32 @@ type BaseNodeVersion struct {
ParenID uint64
OID oid.ID
Timestamp uint64
Size int64
Size uint64
ETag string
MD5 string
FilePath string
Created *time.Time
Owner *user.ID
IsDeleteMarker bool
CreationEpoch uint64
}
func (v *BaseNodeVersion) GetETag(md5Enabled bool) string {
if md5Enabled && len(v.MD5) > 0 {
return v.MD5
}
return v.ETag
}
// IsFilledExtra returns true is node was created by version of gate v0.29.x and later.
func (v BaseNodeVersion) IsFilledExtra() bool {
return v.Created != nil && v.Owner != nil
}
func (v *BaseNodeVersion) FillExtra(owner *user.ID, created *time.Time, realSize uint64) {
v.Owner = owner
v.Created = created
v.Size = realSize
}
type ObjectTaggingInfo struct {
@ -74,23 +109,42 @@ type MultipartInfo struct {
Owner user.ID
Created time.Time
Meta map[string]string
CopiesNumber uint32
CopiesNumbers []uint32
Finished bool
CreationEpoch uint64
}
// PartInfo is upload information about part.
type PartInfo struct {
Key string
UploadID string
Number int
OID oid.ID
Size int64
ETag string
Created time.Time
Key string `json:"key"`
UploadID string `json:"uploadId"`
Number int `json:"number"`
OID oid.ID `json:"oid"`
Size uint64 `json:"size"`
ETag string `json:"etag"`
MD5 string `json:"md5"`
Created time.Time `json:"created"`
}
type PartInfoExtended struct {
PartInfo
// Timestamp is used to find the latest version of part info in case of tree split
// when there are multiple nodes for the same part.
Timestamp uint64
}
// ToHeaderString form short part representation to use in S3-Completed-Parts header.
func (p *PartInfo) ToHeaderString() string {
return strconv.Itoa(p.Number) + "-" + strconv.FormatInt(p.Size, 10) + "-" + p.ETag
// ETag value contains SHA256 checksum which is used while getting object parts attributes.
return strconv.Itoa(p.Number) + "-" + strconv.FormatUint(p.Size, 10) + "-" + p.ETag
}
func (p *PartInfo) GetETag(md5Enabled bool) string {
if md5Enabled && len(p.MD5) > 0 {
return p.MD5
}
return p.ETag
}
// LockInfo is lock information to create appropriate tree node.

View file

@ -1,8 +1,13 @@
package errors
import (
"errors"
"fmt"
"net/http"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
)
type (
@ -24,6 +29,7 @@ type (
const (
_ ErrorCode = iota
ErrAccessDenied
ErrAccessControlListNotSupported
ErrBadDigest
ErrEntityTooSmall
ErrEntityTooLarge
@ -71,6 +77,7 @@ const (
ErrInvalidArgument
ErrInvalidTagKey
ErrInvalidTagValue
ErrInvalidTagKeyUniqueness
ErrInvalidTagsSizeExceed
ErrNotImplemented
ErrPreconditionFailed
@ -87,6 +94,7 @@ const (
ErrBucketNotEmpty
ErrAllAccessDisabled
ErrMalformedPolicy
ErrMalformedPolicyNotPrincipal
ErrMissingFields
ErrMissingCredTag
ErrCredMalformed
@ -146,6 +154,7 @@ const (
ErrInvalidEncryptionAlgorithm
ErrInvalidSSECustomerKey
ErrMissingSSECustomerKey
ErrMissingSSECustomerAlgorithm
ErrMissingSSECustomerKeyMD5
ErrSSECustomerKeyMD5Mismatch
ErrInvalidSSECustomerParameters
@ -175,9 +184,15 @@ const (
// Add new extended error codes here.
ErrInvalidObjectName
ErrOperationTimedOut
ErrGatewayTimeout
ErrOperationMaxedOut
ErrInvalidRequest
ErrInvalidRequestLargeCopy
ErrInvalidStorageClass
VersionIDMarkerWithoutKeyMarker
ErrInvalidRangeLength
ErrRangeOutOfBounds
ErrMissingContentRange
ErrMalformedJSON
ErrInsecureClientRequest
@ -309,6 +324,12 @@ var errorCodes = errorCodeMap{
Description: "Invalid storage class.",
HTTPStatusCode: http.StatusBadRequest,
},
VersionIDMarkerWithoutKeyMarker: {
ErrCode: VersionIDMarkerWithoutKeyMarker,
Code: "VersionIDMarkerWithoutKeyMarker",
Description: "A version-id marker cannot be specified without a key marker.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidRequestBody: {
ErrCode: ErrInvalidRequestBody,
Code: "InvalidArgument",
@ -318,7 +339,7 @@ var errorCodes = errorCodeMap{
ErrInvalidMaxUploads: {
ErrCode: ErrInvalidMaxUploads,
Code: "InvalidArgument",
Description: "Argument max-uploads must be an integer between 0 and 2147483647",
Description: "Argument max-uploads must be an integer from 1 to 1000",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidMaxKeys: {
@ -363,6 +384,12 @@ var errorCodes = errorCodeMap{
Description: "Access Denied.",
HTTPStatusCode: http.StatusForbidden,
},
ErrAccessControlListNotSupported: {
ErrCode: ErrAccessControlListNotSupported,
Code: "AccessControlListNotSupported",
Description: "The bucket does not allow ACLs.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBadDigest: {
ErrCode: ErrBadDigest,
Code: "BadDigest",
@ -522,13 +549,19 @@ var errorCodes = errorCodeMap{
ErrInvalidTagKey: {
ErrCode: ErrInvalidTagKey,
Code: "InvalidTag",
Description: "The TagValue you have provided is invalid",
Description: "The TagKey you have provided is invalid",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidTagValue: {
ErrCode: ErrInvalidTagValue,
Code: "InvalidTag",
Description: "The TagKey you have provided is invalid",
Description: "The TagValue you have provided is invalid",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidTagKeyUniqueness: {
ErrCode: ErrInvalidTagKeyUniqueness,
Code: "InvalidTag",
Description: "Cannot provide multiple Tags with the same key",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidTagsSizeExceed: {
@ -594,7 +627,7 @@ var errorCodes = errorCodeMap{
ErrAuthorizationHeaderMalformed: {
ErrCode: ErrAuthorizationHeaderMalformed,
Code: "AuthorizationHeaderMalformed",
Description: "The authorization header is malformed; the region is wrong; expecting 'us-east-1'.",
Description: "The authorization header that you provided is not valid.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMalformedPOSTRequest: {
@ -639,6 +672,12 @@ var errorCodes = errorCodeMap{
Description: "Policy has invalid resource.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMalformedPolicyNotPrincipal: {
ErrCode: ErrMalformedPolicyNotPrincipal,
Code: "MalformedPolicy",
Description: "Allow with NotPrincipal is not allowed.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMissingFields: {
ErrCode: ErrMissingFields,
Code: "MissingFields",
@ -1044,6 +1083,12 @@ var errorCodes = errorCodeMap{
Description: "Requests specifying Server Side Encryption with Customer provided keys must provide an appropriate secret key.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMissingSSECustomerAlgorithm: {
ErrCode: ErrMissingSSECustomerAlgorithm,
Code: "InvalidArgument",
Description: "Requests specifying Server Side Encryption with Customer provided keys must provide a valid encryption algorithm.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMissingSSECustomerKeyMD5: {
ErrCode: ErrMissingSSECustomerKeyMD5,
Code: "InvalidArgument",
@ -1124,6 +1169,12 @@ var errorCodes = errorCodeMap{
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrGatewayTimeout: {
ErrCode: ErrGatewayTimeout,
Code: "GatewayTimeout",
Description: "The server is acting as a gateway and cannot get a response in time",
HTTPStatusCode: http.StatusGatewayTimeout,
},
ErrOperationMaxedOut: {
ErrCode: ErrOperationMaxedOut,
Code: "SlowDown",
@ -1152,6 +1203,12 @@ var errorCodes = errorCodeMap{
Description: "Invalid Request",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidRequestLargeCopy: {
ErrCode: ErrInvalidRequestLargeCopy,
Code: "InvalidRequest",
Description: "CopyObject request made on objects larger than 5GB in size.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrIncorrectContinuationToken: {
ErrCode: ErrIncorrectContinuationToken,
Code: "InvalidArgument",
@ -1688,11 +1745,30 @@ var errorCodes = errorCodeMap{
Description: "Part number must be an integer between 1 and 10000, inclusive",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidRangeLength: {
ErrCode: ErrInvalidRangeLength,
Code: "InvalidRange",
Description: "Provided range length must be equal to content length",
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
},
ErrRangeOutOfBounds: {
ErrCode: ErrRangeOutOfBounds,
Code: "InvalidRange",
Description: "Provided range is outside of object bounds",
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
},
ErrMissingContentRange: {
ErrCode: ErrMissingContentRange,
Code: "MissingContentRange",
Description: "Content-Range header is mandatory for this type of request",
HTTPStatusCode: http.StatusBadRequest,
},
// Add your error structure here.
}
// IsS3Error checks if the provided error is a specific s3 error.
func IsS3Error(err error, code ErrorCode) bool {
err = frosterr.UnwrapErr(err)
e, ok := err.(Error)
return ok && e.ErrCode == code
}
@ -1729,6 +1805,26 @@ func GetAPIErrorWithError(code ErrorCode, err error) Error {
return errorCodes.toAPIErrWithErr(code, err)
}
// TransformToS3Error converts FrostFS error to the corresponding S3 error type.
func TransformToS3Error(err error) error {
err = frosterr.UnwrapErr(err) // this wouldn't work with errors.Join
var s3err Error
if errors.As(err, &s3err) {
return err
}
if errors.Is(err, frostfs.ErrAccessDenied) ||
errors.Is(err, tree.ErrNodeAccessDenied) {
return GetAPIError(ErrAccessDenied)
}
if errors.Is(err, frostfs.ErrGatewayTimeout) {
return GetAPIError(ErrGatewayTimeout)
}
return GetAPIError(ErrInternalError)
}
// ObjectError -- error that is linked to a specific object.
type ObjectError struct {
Err error

View file

@ -2,7 +2,12 @@ package errors
import (
"errors"
"fmt"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
"github.com/stretchr/testify/require"
)
func BenchmarkErrCode(b *testing.B) {
@ -24,3 +29,56 @@ func BenchmarkErrorsIs(b *testing.B) {
}
}
}
func TestTransformS3Errors(t *testing.T) {
for _, tc := range []struct {
name string
err error
expected ErrorCode
}{
{
name: "simple std error to internal error",
err: errors.New("some error"),
expected: ErrInternalError,
},
{
name: "layer access denied error to s3 access denied error",
err: frostfs.ErrAccessDenied,
expected: ErrAccessDenied,
},
{
name: "wrapped layer access denied error to s3 access denied error",
err: fmt.Errorf("wrap: %w", frostfs.ErrAccessDenied),
expected: ErrAccessDenied,
},
{
name: "layer node access denied error to s3 access denied error",
err: tree.ErrNodeAccessDenied,
expected: ErrAccessDenied,
},
{
name: "layer gateway timeout error to s3 gateway timeout error",
err: frostfs.ErrGatewayTimeout,
expected: ErrGatewayTimeout,
},
{
name: "s3 error to s3 error",
err: GetAPIError(ErrInvalidPart),
expected: ErrInvalidPart,
},
{
name: "wrapped s3 error to s3 error",
err: fmt.Errorf("wrap: %w", GetAPIError(ErrInvalidPart)),
expected: ErrInvalidPart,
},
} {
t.Run(tc.name, func(t *testing.T) {
err := TransformToS3Error(tc.err)
s3err, ok := err.(Error)
require.True(t, ok, "error must be s3 error")
require.Equalf(t, tc.expected, s3err.ErrCode,
"expected: '%s', got: '%s'",
GetAPIError(tc.expected).Code, GetAPIError(s3err.ErrCode).Code)
})
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -3,78 +3,129 @@ package handler
import (
"encoding/xml"
"errors"
"fmt"
"io"
"strconv"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"go.uber.org/zap"
)
type (
handler struct {
log *zap.Logger
obj layer.Client
notificator Notificator
cfg *Config
}
Notificator interface {
SendNotifications(topics map[string]string, p *SendNotificationParams) error
SendTestNotification(topic, bucketName, requestID, HostID string, now time.Time) error
obj *layer.Layer
cfg Config
ape APE
frostfsid FrostFSID
}
// Config contains data which handler needs to keep.
Config struct {
Policy PlacementPolicy
XMLDecoder XMLDecoderProvider
DefaultMaxAge int
NotificatorEnabled bool
CopiesNumber uint32
ResolveZoneList []string
IsResolveListAllow bool // True if ResolveZoneList contains allowed zones
CompleteMultipartKeepalive time.Duration
Config interface {
DefaultPlacementPolicy(namespace string) netmap.PlacementPolicy
PlacementPolicy(namespace, constraint string) (netmap.PlacementPolicy, bool)
CopiesNumbers(namespace, constraint string) ([]uint32, bool)
DefaultCopiesNumbers(namespace string) []uint32
NewXMLDecoder(reader io.Reader, agent string) *xml.Decoder
DefaultMaxAge() int
ResolveZoneList() []string
IsResolveListAllow() bool
BypassContentEncodingInChunks(agent string) bool
MD5Enabled() bool
RetryMaxAttempts() int
RetryMaxBackoff() time.Duration
RetryStrategy() RetryStrategy
TLSTerminationHeader() string
}
PlacementPolicy interface {
Default() netmap.PlacementPolicy
Get(string) (netmap.PlacementPolicy, bool)
FrostFSID interface {
GetUserAddress(account, user string) (string, error)
GetUserKey(account, name string) (string, error)
}
XMLDecoderProvider interface {
NewCompleteMultipartDecoder(io.Reader) *xml.Decoder
// APE is Access Policy Engine that needs to save policy and acl info to different places.
APE interface {
PutBucketPolicy(ns string, cnrID cid.ID, policy []byte, chains []*chain.Chain) error
DeleteBucketPolicy(ns string, cnrID cid.ID, chainIDs []chain.ID) error
GetBucketPolicy(ns string, cnrID cid.ID) ([]byte, error)
SaveACLChains(cid string, chains []*chain.Chain) error
}
)
type RetryStrategy string
const (
// DefaultPolicy is a default policy of placing containers in FrostFS if it's not set at the request.
DefaultPolicy = "REP 3"
// DefaultCopiesNumber is a default number of object copies that is enough to consider put successful if it's not set in config.
DefaultCopiesNumber uint32 = 0
RetryStrategyExponential = "exponential"
RetryStrategyConstant = "constant"
)
var _ api.Handler = (*handler)(nil)
// New creates new api.Handler using given logger and client.
func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg *Config) (api.Handler, error) {
func New(log *zap.Logger, obj *layer.Layer, cfg Config, storage APE, ffsid FrostFSID) (api.Handler, error) {
switch {
case obj == nil:
return nil, errors.New("empty FrostFS Object Layer")
case log == nil:
return nil, errors.New("empty logger")
}
if !cfg.NotificatorEnabled {
log.Warn("notificator is disabled, s3 won't produce notification events")
} else if notificator == nil {
return nil, errors.New("empty notificator")
case storage == nil:
return nil, errors.New("empty policy storage")
case ffsid == nil:
return nil, errors.New("empty frostfsid")
}
return &handler{
log: log,
obj: obj,
cfg: cfg,
notificator: notificator,
ape: storage,
frostfsid: ffsid,
}, nil
}
// pickCopiesNumbers chooses the return values following this logic:
// 1) array of copies numbers sent in request's header has the highest priority.
// 2) array of copies numbers with corresponding location constraint provided in the config file.
// 3) default copies number from the config file wrapped into array.
func (h *handler) pickCopiesNumbers(metadata map[string]string, namespace, locationConstraint string) ([]uint32, error) {
copiesNumbersStr, ok := metadata[layer.AttributeFrostfsCopiesNumber]
if ok {
result, err := parseCopiesNumbers(copiesNumbersStr)
if err != nil {
return nil, err
}
return result, nil
}
copiesNumbers, ok := h.cfg.CopiesNumbers(namespace, locationConstraint)
if ok {
return copiesNumbers, nil
}
return h.cfg.DefaultCopiesNumbers(namespace), nil
}
func parseCopiesNumbers(copiesNumbersStr string) ([]uint32, error) {
var result []uint32
copiesNumbersSplit := strings.Split(copiesNumbersStr, ",")
for i := range copiesNumbersSplit {
item := strings.ReplaceAll(copiesNumbersSplit[i], " ", "")
if len(item) == 0 {
continue
}
copiesNumber, err := strconv.ParseUint(item, 10, 32)
if err != nil {
return nil, fmt.Errorf("pasrse copies number: %w", err)
}
result = append(result, uint32(copiesNumber))
}
return result, nil
}

69
api/handler/api_test.go Normal file
View file

@ -0,0 +1,69 @@
package handler
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestCopiesNumberPicker(t *testing.T) {
var locationConstraints = map[string][]uint32{}
locationConstraint1 := "one"
locationConstraint2 := "two"
locationConstraints[locationConstraint1] = []uint32{2, 3, 4}
config := &configMock{
copiesNumbers: locationConstraints,
defaultCopiesNumbers: []uint32{1},
}
h := handler{
cfg: config,
}
metadata := map[string]string{}
t.Run("pick default copies number", func(t *testing.T) {
metadata["somekey1"] = "5, 6, 7"
expectedCopiesNumbers := []uint32{1}
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
require.NoError(t, err)
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
})
t.Run("pick copies number vector according to location constraint", func(t *testing.T) {
metadata["somekey2"] = "6, 7, 8"
expectedCopiesNumbers := []uint32{2, 3, 4}
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint1)
require.NoError(t, err)
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
})
t.Run("pick copies number from metadata", func(t *testing.T) {
metadata["frostfs-copies-number"] = "7, 8, 9"
expectedCopiesNumbers := []uint32{7, 8, 9}
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
require.NoError(t, err)
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
})
t.Run("pick copies number from metadata with no space", func(t *testing.T) {
metadata["frostfs-copies-number"] = "7,8,9"
expectedCopiesNumbers := []uint32{7, 8, 9}
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
require.NoError(t, err)
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
})
t.Run("pick copies number from metadata with trailing comma", func(t *testing.T) {
metadata["frostfs-copies-number"] = "11, 12, 13, "
expectedCopiesNumbers := []uint32{11, 12, 13}
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
require.NoError(t, err)
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
})
}

View file

@ -1,6 +1,8 @@
package handler
import (
"encoding/base64"
"encoding/hex"
"fmt"
"net/http"
"strconv"
@ -10,6 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"go.uber.org/zap"
)
@ -17,7 +20,7 @@ type (
GetObjectAttributesResponse struct {
ETag string `xml:"ETag,omitempty"`
Checksum *Checksum `xml:"Checksum,omitempty"`
ObjectSize int64 `xml:"ObjectSize,omitempty"`
ObjectSize uint64 `xml:"ObjectSize,omitempty"`
StorageClass string `xml:"StorageClass,omitempty"`
ObjectParts *ObjectParts `xml:"ObjectParts,omitempty"`
}
@ -67,17 +70,18 @@ var validAttributes = map[string]struct{}{
}
func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
params, err := parseGetObjectAttributeArgs(r)
params, err := parseGetObjectAttributeArgs(r, h.reqLogger(ctx))
if err != nil {
h.logAndSendError(w, "invalid request", reqInfo, err)
h.logAndSendError(ctx, w, "invalid request", reqInfo, err)
return
}
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
return
}
@ -87,44 +91,44 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
VersionID: params.VersionID,
}
extendedInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), p)
extendedInfo, err := h.obj.GetExtendedObjectInfo(ctx, p)
if err != nil {
h.logAndSendError(w, "could not fetch object info", reqInfo, err)
h.logAndSendError(ctx, w, "could not fetch object info", reqInfo, err)
return
}
info := extendedInfo.ObjectInfo
encryptionParams, err := formEncryptionParams(r)
encryptionParams, err := h.formEncryptionParams(r)
if err != nil {
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
return
}
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
return
}
if err = checkPreconditions(info, params.Conditional); err != nil {
h.logAndSendError(w, "precondition failed", reqInfo, err)
if err = checkPreconditions(info, params.Conditional, h.cfg.MD5Enabled()); err != nil {
h.logAndSendError(ctx, w, "precondition failed", reqInfo, err)
return
}
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
if err != nil {
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
return
}
response, err := encodeToObjectAttributesResponse(info, params)
response, err := encodeToObjectAttributesResponse(info, params, h.cfg.MD5Enabled())
if err != nil {
h.logAndSendError(w, "couldn't encode object info to response", reqInfo, err)
h.logAndSendError(ctx, w, "couldn't encode object info to response", reqInfo, err)
return
}
writeAttributesHeaders(w.Header(), extendedInfo, bktSettings.Unversioned())
if err = api.EncodeToResponse(w, response); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
if err = middleware.EncodeToResponse(w, response); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
}
}
@ -134,14 +138,14 @@ func writeAttributesHeaders(h http.Header, info *data.ExtendedObjectInfo, isBuck
h.Set(api.AmzVersionID, info.Version())
}
if info.NodeVersion.IsDeleteMarker() {
if info.NodeVersion.IsDeleteMarker {
h.Set(api.AmzDeleteMarker, strconv.FormatBool(true))
}
// x-amz-request-charged
}
func parseGetObjectAttributeArgs(r *http.Request) (*GetObjectAttributesArgs, error) {
func parseGetObjectAttributeArgs(r *http.Request, log *zap.Logger) (*GetObjectAttributesArgs, error) {
res := &GetObjectAttributesArgs{
VersionID: r.URL.Query().Get(api.QueryVersionID),
}
@ -174,23 +178,27 @@ func parseGetObjectAttributeArgs(r *http.Request) (*GetObjectAttributesArgs, err
}
}
res.Conditional, err = parseConditionalHeaders(r.Header)
return res, err
res.Conditional = parseConditionalHeaders(r.Header, log)
return res, nil
}
func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttributesArgs) (*GetObjectAttributesResponse, error) {
func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttributesArgs, md5Enabled bool) (*GetObjectAttributesResponse, error) {
resp := &GetObjectAttributesResponse{}
for _, attr := range p.Attributes {
switch attr {
case eTag:
resp.ETag = info.HashSum
resp.ETag = data.Quote(info.ETag(md5Enabled))
case storageClass:
resp.StorageClass = "STANDARD"
resp.StorageClass = api.DefaultStorageClass
case objectSize:
resp.ObjectSize = info.Size
case checksum:
resp.Checksum = &Checksum{ChecksumSHA256: info.HashSum}
checksumBytes, err := hex.DecodeString(info.HashSum)
if err != nil {
return nil, fmt.Errorf("form upload attributes: %w", err)
}
resp.Checksum = &Checksum{ChecksumSHA256: base64.StdEncoding.EncodeToString(checksumBytes)}
case objectParts:
parts, err := formUploadAttributes(info, p.MaxParts, p.PartNumberMarker)
if err != nil {
@ -218,10 +226,15 @@ func formUploadAttributes(info *data.ObjectInfo, maxParts, marker int) (*ObjectP
if err != nil {
return nil, fmt.Errorf("invalid completed part: %w", err)
}
// ETag value contains SHA256 checksum.
checksumBytes, err := hex.DecodeString(part.ETag)
if err != nil {
return nil, fmt.Errorf("invalid sha256 checksum in completed part: %w", err)
}
parts[i] = Part{
PartNumber: part.PartNumber,
Size: int(part.Size),
ChecksumSHA256: part.ETag,
ChecksumSHA256: base64.StdEncoding.EncodeToString(checksumBytes),
}
}

View file

@ -1,6 +1,8 @@
package handler
import (
"encoding/base64"
"encoding/hex"
"strings"
"testing"
@ -17,18 +19,20 @@ func TestGetObjectPartsAttributes(t *testing.T) {
createTestBucket(hc, bktName)
putObject(t, hc, bktName, objName)
putObject(hc, bktName, objName)
result := getObjectAttributes(hc, bktName, objName, objectParts)
require.Nil(t, result.ObjectParts)
multipartUpload := createMultipartUpload(hc, bktName, objMultipartName, map[string]string{})
etag, _ := uploadPart(hc, bktName, objMultipartName, multipartUpload.UploadID, 1, partSize)
completeMultipartUpload(hc, bktName, objMultipartName, multipartUpload.UploadID, []string{etag})
etagBytes, err := hex.DecodeString(etag[1 : len(etag)-1])
require.NoError(t, err)
result = getObjectAttributes(hc, bktName, objMultipartName, objectParts)
require.NotNil(t, result.ObjectParts)
require.Len(t, result.ObjectParts.Parts, 1)
require.Equal(t, etag, result.ObjectParts.Parts[0].ChecksumSHA256)
require.Equal(t, base64.StdEncoding.EncodeToString(etagBytes), result.ObjectParts.Parts[0].ChecksumSHA256)
require.Equal(t, partSize, result.ObjectParts.Parts[0].Size)
require.Equal(t, 1, result.ObjectParts.PartsCount)
}

View file

@ -11,7 +11,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"go.uber.org/zap"
)
@ -44,11 +45,11 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
versionID string
metadata map[string]string
tagSet map[string]string
sessionTokenEACL *session.Container
reqInfo = api.GetReqInfo(r.Context())
ctx = r.Context()
reqInfo = middleware.GetReqInfo(ctx)
containsACL = containsACLHeaders(r)
cannedACLStatus = aclHeadersStatus(r)
)
src := r.Header.Get(api.AmzCopySource)
@ -64,7 +65,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
srcBucket, srcObject, err := path2BucketObject(src)
if err != nil {
h.logAndSendError(w, "invalid source copy", reqInfo, err)
h.logAndSendError(ctx, w, "invalid source copy", reqInfo, err)
return
}
@ -74,44 +75,74 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
}
if srcObjPrm.BktInfo, err = h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner); err != nil {
h.logAndSendError(w, "couldn't get source bucket", reqInfo, err)
h.logAndSendError(ctx, w, "couldn't get source bucket", reqInfo, err)
return
}
dstBktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "couldn't get target bucket", reqInfo, err)
h.logAndSendError(ctx, w, "couldn't get target bucket", reqInfo, err)
return
}
settings, err := h.obj.GetBucketSettings(r.Context(), dstBktInfo)
settings, err := h.obj.GetBucketSettings(ctx, dstBktInfo)
if err != nil {
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
return
}
if containsACL {
if sessionTokenEACL, err = getSessionTokenSetEACL(r.Context()); err != nil {
h.logAndSendError(w, "could not get eacl session token from a box", reqInfo, err)
if cannedACLStatus == aclStatusYes {
h.logAndSendError(ctx, w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
return
}
}
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), srcObjPrm)
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm)
if err != nil {
h.logAndSendError(w, "could not find object", reqInfo, err)
h.logAndSendError(ctx, w, "could not find object", reqInfo, err)
return
}
srcObjInfo := extendedSrcObjInfo.ObjectInfo
srcEncryptionParams, err := h.formCopySourceEncryptionParams(r)
if err != nil {
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
return
}
dstEncryptionParams, err := h.formEncryptionParams(r)
if err != nil {
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
return
}
if err = srcEncryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
if errors.IsS3Error(err, errors.ErrInvalidEncryptionParameters) || errors.IsS3Error(err, errors.ErrSSEEncryptedObject) ||
errors.IsS3Error(err, errors.ErrInvalidSSECustomerParameters) {
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, err, zap.Error(err))
return
}
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
return
}
var dstSize uint64
srcSize, err := layer.GetObjectSize(srcObjInfo)
if err != nil {
h.logAndSendError(ctx, w, "failed to get source object size", reqInfo, err)
return
} else if srcSize > layer.UploadMaxSize { // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
h.logAndSendError(ctx, w, "too bid object to copy with single copy operation, use multipart upload copy instead", reqInfo, errors.GetAPIError(errors.ErrInvalidRequestLargeCopy))
return
}
dstSize = srcSize
args, err := parseCopyObjectArgs(r.Header)
if err != nil {
h.logAndSendError(w, "could not parse request params", reqInfo, err)
h.logAndSendError(ctx, w, "could not parse request params", reqInfo, err)
return
}
if isCopyingToItselfForbidden(reqInfo, srcBucket, srcObject, settings, args) {
h.logAndSendError(w, "copying to itself without changing anything", reqInfo, errors.GetAPIError(errors.ErrInvalidCopyDest))
h.logAndSendError(ctx, w, "copying to itself without changing anything", reqInfo, errors.GetAPIError(errors.ErrInvalidCopyDest))
return
}
@ -122,12 +153,12 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
if args.TaggingDirective == replaceDirective {
tagSet, err = parseTaggingHeader(r.Header)
if err != nil {
h.logAndSendError(w, "could not parse tagging header", reqInfo, err)
h.logAndSendError(ctx, w, "could not parse tagging header", reqInfo, err)
return
}
} else {
tagPrm := &layer.GetObjectTaggingParams{
ObjectVersion: &layer.ObjectVersion{
tagPrm := &data.GetObjectTaggingParams{
ObjectVersion: &data.ObjectVersion{
BktInfo: srcObjPrm.BktInfo,
ObjectName: srcObject,
VersionID: srcObjInfo.VersionID(),
@ -135,26 +166,15 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
NodeVersion: extendedSrcObjInfo.NodeVersion,
}
_, tagSet, err = h.obj.GetObjectTagging(r.Context(), tagPrm)
_, tagSet, err = h.obj.GetObjectTagging(ctx, tagPrm)
if err != nil {
h.logAndSendError(w, "could not get object tagging", reqInfo, err)
h.logAndSendError(ctx, w, "could not get object tagging", reqInfo, err)
return
}
}
encryptionParams, err := formEncryptionParams(r)
if err != nil {
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
return
}
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
return
}
if err = checkPreconditions(srcObjInfo, args.Conditional); err != nil {
h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed))
if err = checkPreconditions(srcObjInfo, args.Conditional, h.cfg.MD5Enabled()); err != nil {
h.logAndSendError(ctx, w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed))
return
}
@ -162,69 +182,55 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
if len(srcObjInfo.ContentType) > 0 {
srcObjInfo.Headers[api.ContentType] = srcObjInfo.ContentType
}
metadata = srcObjInfo.Headers
metadata = makeCopyMap(srcObjInfo.Headers)
filterMetadataMap(metadata)
} else if contentType := r.Header.Get(api.ContentType); len(contentType) > 0 {
metadata[api.ContentType] = contentType
}
copiesNumber, err := getCopiesNumberOrDefault(metadata, h.cfg.CopiesNumber)
if err != nil {
h.logAndSendError(w, "invalid copies number", reqInfo, err)
return
}
params := &layer.CopyObjectParams{
SrcVersioned: srcObjPrm.Versioned(),
SrcObject: srcObjInfo,
ScrBktInfo: srcObjPrm.BktInfo,
DstBktInfo: dstBktInfo,
DstObject: reqInfo.ObjectName,
SrcSize: srcObjInfo.Size,
DstSize: dstSize,
Header: metadata,
Encryption: encryptionParams,
CopiesNuber: copiesNumber,
SrcEncryption: srcEncryptionParams,
DstEncryption: dstEncryptionParams,
}
params.Lock, err = formObjectLock(r.Context(), dstBktInfo, settings.LockConfiguration, r.Header)
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, dstBktInfo.LocationConstraint)
if err != nil {
h.logAndSendError(w, "could not form object lock", reqInfo, err)
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
return
}
params.Lock, err = formObjectLock(ctx, dstBktInfo, settings.LockConfiguration, r.Header)
if err != nil {
h.logAndSendError(ctx, w, "could not form object lock", reqInfo, err)
return
}
additional := []zap.Field{zap.String("src_bucket_name", srcBucket), zap.String("src_object_name", srcObject)}
extendedDstObjInfo, err := h.obj.CopyObject(r.Context(), params)
extendedDstObjInfo, err := h.obj.CopyObject(ctx, params)
if err != nil {
h.logAndSendError(w, "couldn't copy object", reqInfo, err, additional...)
h.logAndSendError(ctx, w, "couldn't copy object", reqInfo, err, additional...)
return
}
dstObjInfo := extendedDstObjInfo.ObjectInfo
if err = api.EncodeToResponse(w, &CopyObjectResponse{LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339), ETag: dstObjInfo.HashSum}); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
if err = middleware.EncodeToResponse(w, &CopyObjectResponse{
LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339),
ETag: data.Quote(dstObjInfo.ETag(h.cfg.MD5Enabled())),
}); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err, additional...)
return
}
if containsACL {
newEaclTable, err := h.getNewEAclTable(r, dstBktInfo, dstObjInfo)
if err != nil {
h.logAndSendError(w, "could not get new eacl table", reqInfo, err)
return
}
p := &layer.PutBucketACLParams{
BktInfo: dstBktInfo,
EACL: newEaclTable,
SessionToken: sessionTokenEACL,
}
if err = h.obj.PutBucketACL(r.Context(), p); err != nil {
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
return
}
}
if tagSet != nil {
tagPrm := &layer.PutObjectTaggingParams{
ObjectVersion: &layer.ObjectVersion{
tagPrm := &data.PutObjectTaggingParams{
ObjectVersion: &data.ObjectVersion{
BktInfo: dstBktInfo,
ObjectName: reqInfo.ObjectName,
VersionID: dstObjInfo.VersionID(),
@ -232,33 +238,35 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
TagSet: tagSet,
NodeVersion: extendedDstObjInfo.NodeVersion,
}
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
if err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
h.logAndSendError(ctx, w, "could not upload object tagging", reqInfo, err)
return
}
}
h.log.Info("object is copied",
zap.String("bucket", dstObjInfo.Bucket),
zap.String("object", dstObjInfo.Name),
zap.Stringer("object_id", dstObjInfo.ID))
h.reqLogger(ctx).Info(logs.ObjectIsCopied, zap.Stringer("object_id", dstObjInfo.ID))
s := &SendNotificationParams{
Event: EventObjectCreatedCopy,
NotificationInfo: data.NotificationInfoFromObject(dstObjInfo),
BktInfo: dstBktInfo,
ReqInfo: reqInfo,
}
if err = h.sendNotifications(r.Context(), s); err != nil {
h.log.Error("couldn't send notification: %w", zap.Error(err))
}
if encryptionParams.Enabled() {
if dstEncryptionParams.Enabled() {
addSSECHeaders(w.Header(), r.Header)
}
}
func isCopyingToItselfForbidden(reqInfo *api.ReqInfo, srcBucket string, srcObject string, settings *data.BucketSettings, args *copyObjectArgs) bool {
func makeCopyMap(headers map[string]string) map[string]string {
res := make(map[string]string, len(headers))
for key, val := range headers {
res[key] = val
}
return res
}
func filterMetadataMap(metadata map[string]string) {
delete(metadata, layer.MultipartObjectSize) // object payload will be real one rather than list of compound parts
for key := range layer.EncryptionMetadata {
delete(metadata, key)
}
}
func isCopyingToItselfForbidden(reqInfo *middleware.ReqInfo, srcBucket string, srcObject string, settings *data.BucketSettings, args *copyObjectArgs) bool {
if reqInfo.BucketName != srcBucket || reqInfo.ObjectName != srcObject {
return false
}
@ -273,8 +281,8 @@ func isCopyingToItselfForbidden(reqInfo *api.ReqInfo, srcBucket string, srcObjec
func parseCopyObjectArgs(headers http.Header) (*copyObjectArgs, error) {
var err error
args := &conditionalArgs{
IfMatch: headers.Get(api.AmzCopyIfMatch),
IfNoneMatch: headers.Get(api.AmzCopyIfNoneMatch),
IfMatch: data.UnQuote(headers.Get(api.AmzCopyIfMatch)),
IfNoneMatch: data.UnQuote(headers.Get(api.AmzCopyIfNoneMatch)),
}
if args.IfModifiedSince, err = parseHTTPTime(headers.Get(api.AmzCopyIfModifiedSince)); err != nil {

View file

@ -1,12 +1,21 @@
package handler
import (
"crypto/md5"
"crypto/tls"
"encoding/base64"
"encoding/xml"
"net/http"
"net/url"
"strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"github.com/stretchr/testify/require"
)
@ -15,6 +24,7 @@ type CopyMeta struct {
Tags map[string]string
MetadataDirective string
Metadata map[string]string
Headers map[string]string
}
func TestCopyWithTaggingDirective(t *testing.T) {
@ -29,14 +39,14 @@ func TestCopyWithTaggingDirective(t *testing.T) {
copyMeta := CopyMeta{
Tags: map[string]string{"key2": "val"},
}
copyObject(t, tc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
copyObject(tc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
tagging := getObjectTagging(t, tc, bktName, objToCopy, emptyVersion)
require.Len(t, tagging.TagSet, 1)
require.Equal(t, "key", tagging.TagSet[0].Key)
require.Equal(t, "val", tagging.TagSet[0].Value)
copyMeta.TaggingDirective = replaceDirective
copyObject(t, tc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
copyObject(tc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
tagging = getObjectTagging(t, tc, bktName, objToCopy2, emptyVersion)
require.Len(t, tagging.TagSet, 1)
require.Equal(t, "key2", tagging.TagSet[0].Key)
@ -51,20 +61,213 @@ func TestCopyToItself(t *testing.T) {
copyMeta := CopyMeta{MetadataDirective: replaceDirective}
copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusBadRequest)
copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK)
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusBadRequest)
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
putBucketVersioning(t, tc, bktName, true)
copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK)
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
putBucketVersioning(t, tc, bktName, false)
copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK)
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
}
func copyObject(t *testing.T, tc *handlerContext, bktName, fromObject, toObject string, copyMeta CopyMeta, statusCode int) {
w, r := prepareTestRequest(tc, bktName, toObject, nil)
func TestCopyMultipart(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket-for-copy", "object-for-copy"
createTestBucket(hc, bktName)
partSize := layer.UploadMinSize
objLen := 6 * partSize
headers := map[string]string{}
data := multipartUpload(hc, bktName, objName, headers, objLen, partSize)
require.Equal(t, objLen, len(data))
objToCopy := "copy-target"
var copyMeta CopyMeta
copyObject(hc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
copiedData, _ := getObject(hc, bktName, objToCopy)
equalDataSlices(t, data, copiedData)
result := getObjectAttributes(hc, bktName, objToCopy, objectParts)
require.NotNil(t, result.ObjectParts)
objToCopy2 := "copy-target2"
copyMeta.MetadataDirective = replaceDirective
copyObject(hc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
result = getObjectAttributes(hc, bktName, objToCopy2, objectParts)
require.Nil(t, result.ObjectParts)
copiedData, _ = getObject(hc, bktName, objToCopy2)
equalDataSlices(t, data, copiedData)
}
func TestCopyEncryptedToUnencrypted(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, srcObjName := "bucket-for-copy", "object-for-copy"
key1 := []byte("firstencriptionkeyofsourceobject")
key1Md5 := md5.Sum(key1)
key2 := []byte("anotherencriptionkeysourceobject")
key2Md5 := md5.Sum(key2)
bktInfo := createTestBucket(tc, bktName)
srcEnc, err := encryption.NewParams(key1)
require.NoError(t, err)
srcObjInfo := createTestObject(tc, bktInfo, srcObjName, *srcEnc)
require.True(t, containEncryptionMetadataHeaders(srcObjInfo.Headers))
dstObjName := "copy-object"
// empty copy-source-sse headers
w, r := prepareTestRequest(tc, bktName, dstObjName, nil)
r.TLS = &tls.ConnectionState{}
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
tc.Handler().CopyObjectHandler(w, r)
assertStatus(t, w, http.StatusBadRequest)
assertS3Error(t, w, errors.GetAPIError(errors.ErrSSEEncryptedObject))
// empty copy-source-sse-custom-key
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
r.TLS = &tls.ConnectionState{}
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
tc.Handler().CopyObjectHandler(w, r)
assertStatus(t, w, http.StatusBadRequest)
assertS3Error(t, w, errors.GetAPIError(errors.ErrMissingSSECustomerKey))
// empty copy-source-sse-custom-algorithm
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
r.TLS = &tls.ConnectionState{}
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key1))
tc.Handler().CopyObjectHandler(w, r)
assertStatus(t, w, http.StatusBadRequest)
assertS3Error(t, w, errors.GetAPIError(errors.ErrMissingSSECustomerAlgorithm))
// invalid copy-source-sse-custom-key
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
r.TLS = &tls.ConnectionState{}
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key2))
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key2Md5[:]))
tc.Handler().CopyObjectHandler(w, r)
assertStatus(t, w, http.StatusBadRequest)
assertS3Error(t, w, errors.GetAPIError(errors.ErrInvalidSSECustomerParameters))
// success copy
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
r.TLS = &tls.ConnectionState{}
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key1))
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key1Md5[:]))
tc.Handler().CopyObjectHandler(w, r)
assertStatus(t, w, http.StatusOK)
dstObjInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: dstObjName})
require.NoError(t, err)
require.Equal(t, srcObjInfo.Headers[layer.AttributeDecryptedSize], strconv.Itoa(int(dstObjInfo.Size)))
require.False(t, containEncryptionMetadataHeaders(dstObjInfo.Headers))
}
func TestCopyUnencryptedToEncrypted(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, srcObjName := "bucket-for-copy", "object-for-copy"
key := []byte("firstencriptionkeyofsourceobject")
keyMd5 := md5.Sum(key)
bktInfo := createTestBucket(tc, bktName)
srcObjInfo := createTestObject(tc, bktInfo, srcObjName, encryption.Params{})
require.False(t, containEncryptionMetadataHeaders(srcObjInfo.Headers))
dstObjName := "copy-object"
// invalid copy-source-sse headers
w, r := prepareTestRequest(tc, bktName, dstObjName, nil)
r.TLS = &tls.ConnectionState{}
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key))
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMd5[:]))
tc.Handler().CopyObjectHandler(w, r)
assertStatus(t, w, http.StatusBadRequest)
assertS3Error(t, w, errors.GetAPIError(errors.ErrInvalidEncryptionParameters))
// success copy
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
r.TLS = &tls.ConnectionState{}
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
r.Header.Set(api.AmzServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
r.Header.Set(api.AmzServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key))
r.Header.Set(api.AmzServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMd5[:]))
tc.Handler().CopyObjectHandler(w, r)
assertStatus(t, w, http.StatusOK)
dstObjInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: dstObjName})
require.NoError(t, err)
require.True(t, containEncryptionMetadataHeaders(dstObjInfo.Headers))
require.Equal(t, strconv.Itoa(int(srcObjInfo.Size)), dstObjInfo.Headers[layer.AttributeDecryptedSize])
}
func TestCopyEncryptedToEncryptedWithAnotherKey(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, srcObjName := "bucket-for-copy", "object-for-copy"
key1 := []byte("firstencriptionkeyofsourceobject")
key1Md5 := md5.Sum(key1)
key2 := []byte("anotherencriptionkeysourceobject")
key2Md5 := md5.Sum(key2)
bktInfo := createTestBucket(tc, bktName)
srcEnc, err := encryption.NewParams(key1)
require.NoError(t, err)
srcObjInfo := createTestObject(tc, bktInfo, srcObjName, *srcEnc)
require.True(t, containEncryptionMetadataHeaders(srcObjInfo.Headers))
dstObjName := "copy-object"
w, r := prepareTestRequest(tc, bktName, dstObjName, nil)
r.TLS = &tls.ConnectionState{}
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key1))
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key1Md5[:]))
r.Header.Set(api.AmzServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
r.Header.Set(api.AmzServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key2))
r.Header.Set(api.AmzServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key2Md5[:]))
tc.Handler().CopyObjectHandler(w, r)
assertStatus(t, w, http.StatusOK)
dstObjInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: dstObjName})
require.NoError(t, err)
require.True(t, containEncryptionMetadataHeaders(dstObjInfo.Headers))
require.Equal(t, srcObjInfo.Headers[layer.AttributeDecryptedSize], dstObjInfo.Headers[layer.AttributeDecryptedSize])
}
func containEncryptionMetadataHeaders(headers map[string]string) bool {
for k := range headers {
if _, ok := layer.EncryptionMetadata[k]; ok {
return true
}
}
return false
}
func copyObject(hc *handlerContext, bktName, fromObject, toObject string, copyMeta CopyMeta, statusCode int) {
w, r := prepareTestRequest(hc, bktName, toObject, nil)
r.Header.Set(api.AmzCopySource, bktName+"/"+fromObject)
r.Header.Set(api.AmzMetadataDirective, copyMeta.MetadataDirective)
@ -79,28 +282,33 @@ func copyObject(t *testing.T, tc *handlerContext, bktName, fromObject, toObject
}
r.Header.Set(api.AmzTagging, tagsQuery.Encode())
tc.Handler().CopyObjectHandler(w, r)
assertStatus(t, w, statusCode)
for key, val := range copyMeta.Headers {
r.Header.Set(key, val)
}
hc.Handler().CopyObjectHandler(w, r)
assertStatus(hc.t, w, statusCode)
}
func putObjectTagging(t *testing.T, tc *handlerContext, bktName, objName string, tags map[string]string) {
body := &Tagging{
TagSet: make([]Tag, 0, len(tags)),
body := &data.Tagging{
TagSet: make([]data.Tag, 0, len(tags)),
}
for key, val := range tags {
body.TagSet = append(body.TagSet, Tag{
body.TagSet = append(body.TagSet, data.Tag{
Key: key,
Value: val,
})
}
w, r := prepareTestRequest(tc, bktName, objName, body)
middleware.GetReqInfo(r.Context()).Tagging = body
tc.Handler().PutObjectTaggingHandler(w, r)
assertStatus(t, w, http.StatusOK)
}
func getObjectTagging(t *testing.T, tc *handlerContext, bktName, objName, version string) *Tagging {
func getObjectTagging(t *testing.T, tc *handlerContext, bktName, objName, version string) *data.Tagging {
query := make(url.Values)
query.Add(api.QueryVersionID, version)
@ -108,7 +316,7 @@ func getObjectTagging(t *testing.T, tc *handlerContext, bktName, objName, versio
tc.Handler().GetObjectTaggingHandler(w, r)
assertStatus(t, w, http.StatusOK)
tagging := &Tagging{}
tagging := &data.Tagging{}
err := xml.NewDecoder(w.Result().Body).Decode(tagging)
require.NoError(t, err)
return tagging

View file

@ -8,6 +8,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"go.uber.org/zap"
)
@ -18,60 +20,73 @@ const (
)
func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
return
}
cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
cors, err := h.obj.GetBucketCORS(ctx, bktInfo)
if err != nil {
h.logAndSendError(w, "could not get cors", reqInfo, err)
h.logAndSendError(ctx, w, "could not get cors", reqInfo, err)
return
}
if err = api.EncodeToResponse(w, cors); err != nil {
h.logAndSendError(w, "could not encode cors to response", reqInfo, err)
if err = middleware.EncodeToResponse(w, cors); err != nil {
h.logAndSendError(ctx, w, "could not encode cors to response", reqInfo, err)
return
}
}
func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
return
}
p := &layer.PutCORSParams{
BktInfo: bktInfo,
Reader: r.Body,
CopiesNumber: h.cfg.CopiesNumber,
NewDecoder: h.cfg.NewXMLDecoder,
UserAgent: r.UserAgent(),
}
if err = h.obj.PutBucketCORS(r.Context(), p); err != nil {
h.logAndSendError(w, "could not put cors configuration", reqInfo, err)
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
if err != nil {
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
return
}
api.WriteSuccessResponseHeadersOnly(w)
if err = h.obj.PutBucketCORS(ctx, p); err != nil {
h.logAndSendError(ctx, w, "could not put cors configuration", reqInfo, err)
return
}
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
h.logAndSendError(ctx, w, "write response", reqInfo, err)
return
}
}
func (h *handler) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
return
}
if err = h.obj.DeleteBucketCORS(r.Context(), bktInfo); err != nil {
h.logAndSendError(w, "could not delete cors", reqInfo, err)
if err = h.obj.DeleteBucketCORS(ctx, bktInfo); err != nil {
h.logAndSendError(ctx, w, "could not delete cors", reqInfo, err)
}
w.WriteHeader(http.StatusNoContent)
@ -85,19 +100,21 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
if origin == "" {
return
}
reqInfo := api.GetReqInfo(r.Context())
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
if reqInfo.BucketName == "" {
return
}
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
bktInfo, err := h.getBucketInfo(ctx, reqInfo.BucketName)
if err != nil {
h.log.Warn("get bucket info", zap.Error(err))
h.reqLogger(ctx).Warn(logs.GetBucketInfo, zap.Error(err))
return
}
cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
cors, err := h.obj.GetBucketCORS(ctx, bktInfo)
if err != nil {
h.log.Warn("get bucket cors", zap.Error(err))
h.reqLogger(ctx).Warn(logs.GetBucketCors, zap.Error(err))
return
}
@ -136,21 +153,22 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
}
func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketInfo(ctx, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
return
}
origin := r.Header.Get(api.Origin)
if origin == "" {
h.logAndSendError(w, "origin request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
h.logAndSendError(ctx, w, "origin request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
}
method := r.Header.Get(api.AccessControlRequestMethod)
if method == "" {
h.logAndSendError(w, "Access-Control-Request-Method request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
h.logAndSendError(ctx, w, "Access-Control-Request-Method request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
return
}
@ -160,9 +178,9 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
headers = strings.Split(requestHeaders, ", ")
}
cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
cors, err := h.obj.GetBucketCORS(ctx, bktInfo)
if err != nil {
h.logAndSendError(w, "could not get cors", reqInfo, err)
h.logAndSendError(ctx, w, "could not get cors", reqInfo, err)
return
}
@ -174,8 +192,8 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
if !checkSubslice(rule.AllowedHeaders, headers) {
continue
}
w.Header().Set(api.AccessControlAllowOrigin, o)
w.Header().Set(api.AccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
w.Header().Set(api.AccessControlAllowOrigin, origin)
w.Header().Set(api.AccessControlAllowMethods, method)
if headers != nil {
w.Header().Set(api.AccessControlAllowHeaders, requestHeaders)
}
@ -185,19 +203,22 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
if rule.MaxAgeSeconds > 0 || rule.MaxAgeSeconds == -1 {
w.Header().Set(api.AccessControlMaxAge, strconv.Itoa(rule.MaxAgeSeconds))
} else {
w.Header().Set(api.AccessControlMaxAge, strconv.Itoa(h.cfg.DefaultMaxAge))
w.Header().Set(api.AccessControlMaxAge, strconv.Itoa(h.cfg.DefaultMaxAge()))
}
if o != wildcard {
w.Header().Set(api.AccessControlAllowCredentials, "true")
}
api.WriteSuccessResponseHeadersOnly(w)
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
h.logAndSendError(ctx, w, "write response", reqInfo, err)
return
}
return
}
}
}
}
}
h.logAndSendError(w, "Forbidden", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
h.logAndSendError(ctx, w, "Forbidden", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
}
func checkSubslice(slice []string, subSlice []string) bool {

View file

@ -1,12 +1,13 @@
package handler
import (
"context"
"net/http"
"strings"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"github.com/stretchr/testify/require"
)
func TestCORSOriginWildcard(t *testing.T) {
@ -23,14 +24,14 @@ func TestCORSOriginWildcard(t *testing.T) {
bktName := "bucket-for-cors"
box, _ := createAccessBox(t)
w, r := prepareTestRequest(hc, bktName, "", nil)
ctx := context.WithValue(r.Context(), api.BoxData, box)
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
r = r.WithContext(ctx)
r.Header.Add(api.AmzACL, "public-read")
hc.Handler().CreateBucketHandler(w, r)
assertStatus(t, w, http.StatusOK)
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(body))
ctx = context.WithValue(r.Context(), api.BoxData, box)
ctx = middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
r = r.WithContext(ctx)
hc.Handler().PutBucketCorsHandler(w, r)
assertStatus(t, w, http.StatusOK)
@ -39,3 +40,181 @@ func TestCORSOriginWildcard(t *testing.T) {
hc.Handler().GetBucketCorsHandler(w, r)
assertStatus(t, w, http.StatusOK)
}
func TestPreflight(t *testing.T) {
body := `
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<CORSRule>
<AllowedMethod>GET</AllowedMethod>
<AllowedOrigin>http://www.example.com</AllowedOrigin>
<AllowedHeader>Authorization</AllowedHeader>
<ExposeHeader>x-amz-*</ExposeHeader>
<ExposeHeader>X-Amz-*</ExposeHeader>
<MaxAgeSeconds>600</MaxAgeSeconds>
</CORSRule>
</CORSConfiguration>
`
hc := prepareHandlerContext(t)
bktName := "bucket-preflight-test"
box, _ := createAccessBox(t)
w, r := prepareTestRequest(hc, bktName, "", nil)
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
r = r.WithContext(ctx)
hc.Handler().CreateBucketHandler(w, r)
assertStatus(t, w, http.StatusOK)
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(body))
ctx = middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
r = r.WithContext(ctx)
hc.Handler().PutBucketCorsHandler(w, r)
assertStatus(t, w, http.StatusOK)
for _, tc := range []struct {
name string
origin string
method string
headers string
expectedStatus int
}{
{
name: "Valid",
origin: "http://www.example.com",
method: "GET",
headers: "Authorization",
expectedStatus: http.StatusOK,
},
{
name: "Empty origin",
method: "GET",
headers: "Authorization",
expectedStatus: http.StatusBadRequest,
},
{
name: "Empty request method",
origin: "http://www.example.com",
headers: "Authorization",
expectedStatus: http.StatusBadRequest,
},
{
name: "Not allowed method",
origin: "http://www.example.com",
method: "PUT",
headers: "Authorization",
expectedStatus: http.StatusForbidden,
},
{
name: "Not allowed headers",
origin: "http://www.example.com",
method: "GET",
headers: "Authorization, Last-Modified",
expectedStatus: http.StatusForbidden,
},
} {
t.Run(tc.name, func(t *testing.T) {
w, r = prepareTestPayloadRequest(hc, bktName, "", nil)
r.Header.Set(api.Origin, tc.origin)
r.Header.Set(api.AccessControlRequestMethod, tc.method)
r.Header.Set(api.AccessControlRequestHeaders, tc.headers)
hc.Handler().Preflight(w, r)
assertStatus(t, w, tc.expectedStatus)
if tc.expectedStatus == http.StatusOK {
require.Equal(t, tc.origin, w.Header().Get(api.AccessControlAllowOrigin))
require.Equal(t, tc.method, w.Header().Get(api.AccessControlAllowMethods))
require.Equal(t, tc.headers, w.Header().Get(api.AccessControlAllowHeaders))
require.Equal(t, "x-amz-*, X-Amz-*", w.Header().Get(api.AccessControlExposeHeaders))
require.Equal(t, "true", w.Header().Get(api.AccessControlAllowCredentials))
require.Equal(t, "600", w.Header().Get(api.AccessControlMaxAge))
}
})
}
}
func TestPreflightWildcardOrigin(t *testing.T) {
body := `
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<CORSRule>
<AllowedMethod>GET</AllowedMethod>
<AllowedMethod>PUT</AllowedMethod>
<AllowedOrigin>*</AllowedOrigin>
<AllowedHeader>*</AllowedHeader>
</CORSRule>
</CORSConfiguration>
`
hc := prepareHandlerContext(t)
bktName := "bucket-preflight-wildcard-test"
box, _ := createAccessBox(t)
w, r := prepareTestRequest(hc, bktName, "", nil)
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
r = r.WithContext(ctx)
hc.Handler().CreateBucketHandler(w, r)
assertStatus(t, w, http.StatusOK)
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(body))
ctx = middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
r = r.WithContext(ctx)
hc.Handler().PutBucketCorsHandler(w, r)
assertStatus(t, w, http.StatusOK)
for _, tc := range []struct {
name string
origin string
method string
headers string
expectedStatus int
}{
{
name: "Valid get",
origin: "http://www.example.com",
method: "GET",
headers: "Authorization, Last-Modified",
expectedStatus: http.StatusOK,
},
{
name: "Valid put",
origin: "http://example.com",
method: "PUT",
headers: "Authorization, Content-Type",
expectedStatus: http.StatusOK,
},
{
name: "Empty origin",
method: "GET",
headers: "Authorization, Last-Modified",
expectedStatus: http.StatusBadRequest,
},
{
name: "Empty request method",
origin: "http://www.example.com",
headers: "Authorization, Last-Modified",
expectedStatus: http.StatusBadRequest,
},
{
name: "Not allowed method",
origin: "http://www.example.com",
method: "DELETE",
headers: "Authorization, Last-Modified",
expectedStatus: http.StatusForbidden,
},
} {
t.Run(tc.name, func(t *testing.T) {
w, r = prepareTestPayloadRequest(hc, bktName, "", nil)
r.Header.Set(api.Origin, tc.origin)
r.Header.Set(api.AccessControlRequestMethod, tc.method)
r.Header.Set(api.AccessControlRequestHeaders, tc.headers)
hc.Handler().Preflight(w, r)
assertStatus(t, w, tc.expectedStatus)
if tc.expectedStatus == http.StatusOK {
require.Equal(t, tc.origin, w.Header().Get(api.AccessControlAllowOrigin))
require.Equal(t, tc.method, w.Header().Get(api.AccessControlAllowMethods))
require.Equal(t, tc.headers, w.Header().Get(api.AccessControlAllowHeaders))
require.Empty(t, w.Header().Get(api.AccessControlExposeHeaders))
require.Empty(t, w.Header().Get(api.AccessControlAllowCredentials))
require.Equal(t, "0", w.Header().Get(api.AccessControlMaxAge))
}
})
}
}

View file

@ -2,19 +2,18 @@ package handler
import (
"encoding/xml"
"fmt"
"net/http"
"strconv"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
)
// limitation of AWS https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
@ -22,8 +21,9 @@ const maxObjectsToDelete = 1000
// DeleteObjectsRequest -- xml carrying the object key names which should be deleted.
type DeleteObjectsRequest struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delete" json:"-"`
// Element to enable quiet mode for the request
Quiet bool
Quiet bool `xml:"Quiet,omitempty"`
// List of objects to be deleted
Objects []ObjectIdentifier `xml:"Object"`
}
@ -43,10 +43,10 @@ type DeletedObject struct {
// DeleteError structure.
type DeleteError struct {
Code string
Message string
Key string
VersionID string `xml:"versionId,omitempty"`
Code string `xml:"Code,omitempty"`
Message string `xml:"Message,omitempty"`
Key string `xml:"Key,omitempty"`
VersionID string `xml:"VersionId,omitempty"`
}
// DeleteObjectsResponse container for multiple object deletes.
@ -61,7 +61,8 @@ type DeleteObjectsResponse struct {
}
func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
versionID := reqInfo.URL.Query().Get(api.QueryVersionID)
versionedObject := []*layer.VersionedObject{{
Name: reqInfo.ObjectName,
@ -70,13 +71,19 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
return
}
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
if err != nil {
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
return
}
networkInfo, err := h.obj.GetNetworkInfo(ctx)
if err != nil {
h.logAndSendError(ctx, w, "could not get network info", reqInfo, err)
return
}
@ -84,53 +91,19 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
BktInfo: bktInfo,
Objects: versionedObject,
Settings: bktSettings,
NetworkInfo: networkInfo,
}
deletedObjects := h.obj.DeleteObjects(r.Context(), p)
deletedObjects := h.obj.DeleteObjects(ctx, p)
deletedObject := deletedObjects[0]
if deletedObject.Error != nil {
if isErrObjectLocked(deletedObject.Error) {
h.logAndSendError(w, "object is locked", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
h.logAndSendError(ctx, w, "object is locked", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
} else {
h.logAndSendError(w, "could not delete object", reqInfo, deletedObject.Error)
h.logAndSendError(ctx, w, "could not delete object", reqInfo, deletedObject.Error)
}
return
}
var m *SendNotificationParams
if bktSettings.VersioningEnabled() && len(versionID) == 0 {
m = &SendNotificationParams{
Event: EventObjectRemovedDeleteMarkerCreated,
NotificationInfo: &data.NotificationInfo{
Name: reqInfo.ObjectName,
HashSum: deletedObject.DeleteMarkerEtag,
},
BktInfo: bktInfo,
ReqInfo: reqInfo,
}
} else {
var objID oid.ID
if len(versionID) != 0 {
if err = objID.DecodeString(versionID); err != nil {
h.log.Error("couldn't send notification: %w", zap.Error(err))
}
}
m = &SendNotificationParams{
Event: EventObjectRemovedDelete,
NotificationInfo: &data.NotificationInfo{
Name: reqInfo.ObjectName,
Version: objID.EncodeToString(),
},
BktInfo: bktInfo,
ReqInfo: reqInfo,
}
}
if err = h.sendNotifications(r.Context(), m); err != nil {
h.log.Error("couldn't send notification: %w", zap.Error(err))
}
if deletedObject.VersionID != "" {
w.Header().Set(api.AmzVersionID, deletedObject.VersionID)
}
@ -148,51 +121,54 @@ func isErrObjectLocked(err error) bool {
switch err.(type) {
default:
return strings.Contains(err.Error(), "object is locked")
case apistatus.ObjectLocked,
*apistatus.ObjectLocked:
case *apistatus.ObjectLocked:
return true
}
}
// DeleteMultipleObjectsHandler handles multiple delete requests.
func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
// Content-Md5 is required and should be set
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if _, ok := r.Header[api.ContentMD5]; !ok {
h.logAndSendError(w, "missing Content-MD5", reqInfo, errors.GetAPIError(errors.ErrMissingContentMD5))
h.logAndSendError(ctx, w, "missing Content-MD5", reqInfo, errors.GetAPIError(errors.ErrMissingContentMD5))
return
}
// Content-Length is required and should be non-zero
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if r.ContentLength <= 0 {
h.logAndSendError(w, "missing Content-Length", reqInfo, errors.GetAPIError(errors.ErrMissingContentLength))
h.logAndSendError(ctx, w, "missing Content-Length", reqInfo, errors.GetAPIError(errors.ErrMissingContentLength))
return
}
// Unmarshal list of keys to be deleted.
requested := &DeleteObjectsRequest{}
if err := xml.NewDecoder(r.Body).Decode(requested); err != nil {
h.logAndSendError(w, "couldn't decode body", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
if err := h.cfg.NewXMLDecoder(r.Body, r.UserAgent()).Decode(requested); err != nil {
h.logAndSendError(ctx, w, "couldn't decode body", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error()))
return
}
if len(requested.Objects) == 0 || len(requested.Objects) > maxObjectsToDelete {
h.logAndSendError(w, "number of objects to delete must be greater than 0 and less or equal to 1000", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
h.logAndSendError(ctx, w, "number of objects to delete must be greater than 0 and less or equal to 1000", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
return
}
removed := make(map[string]*layer.VersionedObject)
unique := make(map[string]struct{})
toRemove := make([]*layer.VersionedObject, 0, len(requested.Objects))
for _, obj := range requested.Objects {
versionedObj := &layer.VersionedObject{
Name: obj.ObjectName,
VersionID: obj.VersionID,
}
key := versionedObj.String()
if _, ok := unique[key]; !ok {
toRemove = append(toRemove, versionedObj)
removed[versionedObj.String()] = versionedObj
unique[key] = struct{}{}
}
}
response := &DeleteObjectsResponse{
@ -202,31 +178,31 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
return
}
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
if err != nil {
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
return
}
marshaler := zapcore.ArrayMarshalerFunc(func(encoder zapcore.ArrayEncoder) error {
for _, obj := range toRemove {
encoder.AppendString(obj.String())
networkInfo, err := h.obj.GetNetworkInfo(ctx)
if err != nil {
h.logAndSendError(ctx, w, "could not get network info", reqInfo, err)
return
}
return nil
})
p := &layer.DeleteObjectParams{
BktInfo: bktInfo,
Objects: toRemove,
Settings: bktSettings,
NetworkInfo: networkInfo,
IsMultiple: true,
}
deletedObjects := h.obj.DeleteObjects(r.Context(), p)
deletedObjects := h.obj.DeleteObjects(ctx, p)
var errs []error
for _, obj := range deletedObjects {
if obj.Error != nil {
code := "BadRequest"
@ -239,7 +215,6 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
Key: obj.Name,
VersionID: obj.VersionID,
})
errs = append(errs, obj.Error)
} else if !requested.Quiet {
deletedObj := DeletedObject{
ObjectIdentifier: ObjectIdentifier{
@ -254,40 +229,61 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
response.DeletedObjects = append(response.DeletedObjects, deletedObj)
}
}
if len(errs) != 0 {
fields := []zap.Field{
zap.Array("objects", marshaler),
zap.Errors("errors", errs),
}
h.log.Error("couldn't delete objects", fields...)
}
if err = api.EncodeToResponse(w, response); err != nil {
h.logAndSendError(w, "could not write response", reqInfo, err, zap.Array("objects", marshaler))
if err = middleware.EncodeToResponse(w, response); err != nil {
h.logAndSendError(ctx, w, "could not write response", reqInfo, err)
return
}
}
func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
return
}
if err = checkOwner(bktInfo, reqInfo.User); err != nil {
h.logAndSendError(ctx, w, "request owner id does not match bucket owner id", reqInfo, err)
return
}
var sessionToken *session.Container
boxData, err := layer.GetBoxData(r.Context())
boxData, err := middleware.GetBoxData(ctx)
if err == nil {
sessionToken = boxData.Gate.SessionTokenForDelete()
}
if err = h.obj.DeleteBucket(r.Context(), &layer.DeleteBucketParams{
skipObjCheck := false
if value, ok := r.Header[api.AmzForceBucketDelete]; ok {
s := value[0]
if s == "true" {
skipObjCheck = true
}
}
if err = h.obj.DeleteBucket(ctx, &layer.DeleteBucketParams{
BktInfo: bktInfo,
SessionToken: sessionToken,
SkipCheck: skipObjCheck,
}); err != nil {
h.logAndSendError(w, "couldn't delete bucket", reqInfo, err)
h.logAndSendError(ctx, w, "couldn't delete bucket", reqInfo, err)
return
}
chainIDs := []chain.ID{
getBucketChainID(chain.S3, bktInfo),
getBucketChainID(chain.Ingress, bktInfo),
getBucketCannedChainID(chain.S3, bktInfo.CID),
getBucketCannedChainID(chain.Ingress, bktInfo.CID),
}
if err = h.ape.DeleteBucketPolicy(reqInfo.Namespace, bktInfo.CID, chainIDs); err != nil {
h.logAndSendError(ctx, w, "failed to delete policy from storage", reqInfo, err)
return
}
w.WriteHeader(http.StatusNoContent)
}

View file

@ -2,12 +2,19 @@ package handler
import (
"bytes"
"encoding/xml"
"io"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
)
@ -15,15 +22,42 @@ const (
emptyVersion = ""
)
func TestDeleteBucketOnAlreadyRemovedError(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket-for-removal", "object-to-delete"
bktInfo := createTestBucket(hc, bktName)
putObject(hc, bktName, objName)
addr := getAddressOfLastVersion(hc, bktInfo, objName)
hc.tp.SetObjectError(addr, &apistatus.ObjectAlreadyRemoved{})
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
hc.owner = bktInfo.Owner
deleteBucket(t, hc, bktName, http.StatusNoContent)
}
func getAddressOfLastVersion(hc *handlerContext, bktInfo *data.BucketInfo, objName string) oid.Address {
nodeVersion, err := hc.tree.GetLatestVersion(hc.context, bktInfo, objName)
require.NoError(hc.t, err)
var addr oid.Address
addr.SetContainer(bktInfo.CID)
addr.SetObject(nodeVersion.OID)
return addr
}
func TestDeleteBucket(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, objName := "bucket-for-removal", "object-to-delete"
_, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
bktInfo, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
require.True(t, isDeleteMarker)
tc.owner = bktInfo.Owner
deleteBucket(t, tc, bktName, http.StatusConflict)
deleteObject(t, tc, bktName, objName, objInfo.VersionID())
deleteBucket(t, tc, bktName, http.StatusConflict)
@ -31,6 +65,92 @@ func TestDeleteBucket(t *testing.T) {
deleteBucket(t, tc, bktName, http.StatusNoContent)
}
func TestDeleteBucketOnNotFoundError(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket-for-removal", "object-to-delete"
bktInfo := createTestBucket(hc, bktName)
putObject(hc, bktName, objName)
nodeVersion, err := hc.tree.GetUnversioned(hc.context, bktInfo, objName)
require.NoError(t, err)
var addr oid.Address
addr.SetContainer(bktInfo.CID)
addr.SetObject(nodeVersion.OID)
hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{})
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
hc.owner = bktInfo.Owner
deleteBucket(t, hc, bktName, http.StatusNoContent)
}
func TestForceDeleteBucket(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket-for-removal", "object-to-delete"
bktInfo := createTestBucket(hc, bktName)
putObject(hc, bktName, objName)
nodeVersion, err := hc.tree.GetUnversioned(hc.context, bktInfo, objName)
require.NoError(t, err)
var addr oid.Address
addr.SetContainer(bktInfo.CID)
addr.SetObject(nodeVersion.OID)
hc.owner = bktInfo.Owner
deleteBucketForce(t, hc, bktName, http.StatusConflict, "false")
deleteBucketForce(t, hc, bktName, http.StatusNoContent, "true")
}
func TestDeleteMultipleObjectCheckUniqueness(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket", "object"
createTestBucket(hc, bktName)
putObject(hc, bktName, objName)
resp := deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}, {objName, emptyVersion}})
require.Empty(t, resp.Errors)
require.Len(t, resp.DeletedObjects, 1)
}
func TestDeleteObjectsError(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket-for-removal", "object-to-delete"
bktInfo := createTestBucket(hc, bktName)
putBucketVersioning(t, hc, bktName, true)
putObject(hc, bktName, objName)
nodeVersion, err := hc.tree.GetLatestVersion(hc.context, bktInfo, objName)
require.NoError(t, err)
var addr oid.Address
addr.SetContainer(bktInfo.CID)
addr.SetObject(nodeVersion.OID)
expectedError := apierr.GetAPIError(apierr.ErrAccessDenied)
hc.tp.SetObjectError(addr, expectedError)
w := deleteObjectsBase(hc, bktName, [][2]string{{objName, nodeVersion.OID.EncodeToString()}})
var buf bytes.Buffer
res := &DeleteObjectsResponse{}
err = xml.NewDecoder(io.TeeReader(w.Result().Body, &buf)).Decode(res)
require.NoError(t, err)
require.Contains(t, buf.String(), "VersionId")
require.ElementsMatch(t, []DeleteError{{
Code: expectedError.Code,
Key: objName,
Message: expectedError.Error(),
VersionID: nodeVersion.OID.EncodeToString(),
}}, res.Errors)
}
func TestDeleteObject(t *testing.T) {
tc := prepareHandlerContext(t)
@ -49,7 +169,7 @@ func TestDeleteObjectFromSuspended(t *testing.T) {
bktName, objName := "bucket-versioned-for-removal", "object-to-delete"
createSuspendedBucket(t, tc, bktName)
putObject(t, tc, bktName, objName)
putObject(tc, bktName, objName)
versionID, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
require.True(t, isDeleteMarker)
@ -82,7 +202,7 @@ func TestDeleteDeletedObject(t *testing.T) {
})
t.Run("versioned bucket not found obj", func(t *testing.T) {
bktName, objName := "bucket-versioned-for-removal", "object-to-delete"
bktName, objName := "bucket-versioned-for-removal-not-found", "object-to-delete"
_, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
versionID, isDeleteMarker := deleteObject(t, tc, bktName, objName, objInfo.VersionID())
@ -147,6 +267,82 @@ func TestRemoveDeleteMarker(t *testing.T) {
require.True(t, existInMockedFrostFS(tc, bktInfo, objInfo), "object doesn't exist but should")
}
func TestDeleteMarkerVersioned(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, objName := "bucket-for-removal", "object-to-delete"
createVersionedBucketAndObject(t, tc, bktName, objName)
t.Run("not create new delete marker if last version is delete marker", func(t *testing.T) {
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
require.True(t, isDeleteMarker)
versions := listVersions(t, tc, bktName)
require.Len(t, versions.DeleteMarker, 1)
require.Equal(t, deleteMarkerVersion, versions.DeleteMarker[0].VersionID)
_, isDeleteMarker = deleteObject(t, tc, bktName, objName, emptyVersion)
require.True(t, isDeleteMarker)
versions = listVersions(t, tc, bktName)
require.Len(t, versions.DeleteMarker, 1)
require.Equal(t, deleteMarkerVersion, versions.DeleteMarker[0].VersionID)
})
t.Run("do not create delete marker if object does not exist", func(t *testing.T) {
versionsBefore := listVersions(t, tc, bktName)
_, isDeleteMarker := deleteObject(t, tc, bktName, "dummy", emptyVersion)
require.False(t, isDeleteMarker)
versionsAfter := listVersions(t, tc, bktName)
require.Equal(t, versionsBefore, versionsAfter)
})
}
func TestDeleteMarkerSuspended(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, objName := "bucket-for-removal", "object-to-delete"
bktInfo, _ := createVersionedBucketAndObject(t, tc, bktName, objName)
putBucketVersioning(t, tc, bktName, false)
t.Run("not create new delete marker if last version is delete marker", func(t *testing.T) {
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
require.True(t, isDeleteMarker)
require.Equal(t, data.UnversionedObjectVersionID, deleteMarkerVersion)
deleteMarkerVersion, isDeleteMarker = deleteObject(t, tc, bktName, objName, emptyVersion)
require.True(t, isDeleteMarker)
require.Equal(t, data.UnversionedObjectVersionID, deleteMarkerVersion)
versions := listVersions(t, tc, bktName)
require.Len(t, versions.DeleteMarker, 1)
require.Equal(t, deleteMarkerVersion, versions.DeleteMarker[0].VersionID)
})
t.Run("do not create delete marker if object does not exist", func(t *testing.T) {
versionsBefore := listVersions(t, tc, bktName)
_, isDeleteMarker := deleteObject(t, tc, bktName, "dummy", emptyVersion)
require.False(t, isDeleteMarker)
versionsAfter := listVersions(t, tc, bktName)
require.Equal(t, versionsBefore, versionsAfter)
})
t.Run("remove last unversioned non delete marker", func(t *testing.T) {
objName := "obj3"
putObject(tc, bktName, objName)
nodeVersion, err := tc.tree.GetUnversioned(tc.Context(), bktInfo, objName)
require.NoError(t, err)
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
require.True(t, isDeleteMarker)
require.Equal(t, data.UnversionedObjectVersionID, deleteMarkerVersion)
objVersions := getVersion(listVersions(t, tc, bktName), objName)
require.Len(t, objVersions, 0)
require.False(t, tc.MockedPool().ObjectExists(nodeVersion.OID))
})
}
func TestDeleteObjectCombined(t *testing.T) {
tc := prepareHandlerContext(t)
@ -197,19 +393,40 @@ func TestDeleteMarkers(t *testing.T) {
deleteObject(t, tc, bktName, objName, emptyVersion)
versions := listVersions(t, tc, bktName)
require.Len(t, versions.DeleteMarker, 3, "invalid delete markers length")
require.Len(t, versions.DeleteMarker, 0, "invalid delete markers length")
require.Len(t, versions.Version, 0, "versions must be empty")
require.Len(t, listOIDsFromMockedFrostFS(t, tc, bktName), 0, "shouldn't be any object in frostfs")
}
func TestGetHeadDeleteMarker(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket-for-removal", "object-to-delete"
createTestBucket(hc, bktName)
putBucketVersioning(t, hc, bktName, true)
putObject(hc, bktName, objName)
deleteMarkerVersionID, _ := deleteObject(t, hc, bktName, objName, emptyVersion)
w := headObjectBase(hc, bktName, objName, deleteMarkerVersionID)
require.Equal(t, w.Code, http.StatusMethodNotAllowed)
require.Equal(t, w.Result().Header.Get(api.AmzDeleteMarker), "true")
w, r := prepareTestRequest(hc, bktName, objName, nil)
hc.Handler().GetObjectHandler(w, r)
assertStatus(hc.t, w, http.StatusNotFound)
require.Equal(t, w.Result().Header.Get(api.AmzDeleteMarker), "true")
}
func TestDeleteObjectFromListCache(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, objName := "bucket-for-removal", "object-to-delete"
bktInfo, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
versions := listObjectsV1(t, tc, bktName, "", "", "", -1)
versions := listObjectsV1(tc, bktName, "", "", "", -1)
require.Len(t, versions.Contents, 1)
checkFound(t, tc, bktName, objName, objInfo.VersionID())
@ -217,7 +434,7 @@ func TestDeleteObjectFromListCache(t *testing.T) {
checkNotFound(t, tc, bktName, objName, objInfo.VersionID())
// check cache is clean after object removal
versions = listObjectsV1(t, tc, bktName, "", "", "", -1)
versions = listObjectsV1(tc, bktName, "", "", "", -1)
require.Len(t, versions.Contents, 0)
require.False(t, existInMockedFrostFS(tc, bktInfo, objInfo))
@ -243,25 +460,39 @@ func TestDeleteObjectCheckMarkerReturn(t *testing.T) {
require.Equal(t, deleteMarkerVersion, deleteMarkerVersion2)
}
func TestDeleteBucketByNotOwner(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-name"
bktInfo := createTestBucket(hc, bktName)
deleteBucket(t, hc, bktName, http.StatusForbidden)
hc.owner = bktInfo.Owner
deleteBucket(t, hc, bktName, http.StatusNoContent)
}
func createBucketAndObject(tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
bktInfo := createTestBucket(tc, bktName)
objInfo := createTestObject(tc, bktInfo, objName)
objInfo := createTestObject(tc, bktInfo, objName, encryption.Params{})
return bktInfo, objInfo
}
func createVersionedBucketAndObject(t *testing.T, tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
createTestBucket(tc, bktName)
bktInfo, err := tc.Layer().GetBucketInfo(tc.Context(), bktName)
require.NoError(t, err)
putBucketVersioning(t, tc, bktName, true)
objInfo := createTestObject(tc, bktInfo, objName)
func createVersionedBucketAndObject(_ *testing.T, tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
bktInfo := createVersionedBucket(tc, bktName)
objInfo := createTestObject(tc, bktInfo, objName, encryption.Params{})
return bktInfo, objInfo
}
func createVersionedBucket(hc *handlerContext, bktName string) *data.BucketInfo {
bktInfo := createTestBucket(hc, bktName)
putBucketVersioning(hc.t, hc, bktName, true)
return bktInfo
}
func putBucketVersioning(t *testing.T, tc *handlerContext, bktName string, enabled bool) {
cfg := &VersioningConfiguration{Status: "Suspended"}
if enabled {
@ -272,6 +503,16 @@ func putBucketVersioning(t *testing.T, tc *handlerContext, bktName string, enabl
assertStatus(t, w, http.StatusOK)
}
func getBucketVersioning(hc *handlerContext, bktName string) *VersioningConfiguration {
w, r := prepareTestRequest(hc, bktName, "", nil)
hc.Handler().GetBucketVersioningHandler(w, r)
assertStatus(hc.t, w, http.StatusOK)
res := &VersioningConfiguration{}
parseTestResponse(hc.t, w, res)
return res
}
func deleteObject(t *testing.T, tc *handlerContext, bktName, objName, version string) (string, bool) {
query := make(url.Values)
query.Add(api.QueryVersionID, version)
@ -283,44 +524,99 @@ func deleteObject(t *testing.T, tc *handlerContext, bktName, objName, version st
return w.Header().Get(api.AmzVersionID), w.Header().Get(api.AmzDeleteMarker) != ""
}
func deleteObjects(t *testing.T, tc *handlerContext, bktName string, objVersions [][2]string) *DeleteObjectsResponse {
w := deleteObjectsBase(tc, bktName, objVersions)
res := &DeleteObjectsResponse{}
parseTestResponse(t, w, res)
return res
}
func deleteObjectsBase(hc *handlerContext, bktName string, objVersions [][2]string) *httptest.ResponseRecorder {
req := &DeleteObjectsRequest{}
for _, version := range objVersions {
req.Objects = append(req.Objects, ObjectIdentifier{
ObjectName: version[0],
VersionID: version[1],
})
}
w, r := prepareTestRequest(hc, bktName, "", req)
r.Header.Set(api.ContentMD5, "")
hc.Handler().DeleteMultipleObjectsHandler(w, r)
assertStatus(hc.t, w, http.StatusOK)
return w
}
func deleteBucketForce(t *testing.T, tc *handlerContext, bktName string, code int, value string) {
w, r := prepareTestRequest(tc, bktName, "", nil)
r.Header.Set(api.AmzForceBucketDelete, value)
tc.Handler().DeleteBucketHandler(w, r)
assertStatus(t, w, code)
}
func deleteBucket(t *testing.T, tc *handlerContext, bktName string, code int) {
w, r := prepareTestRequest(tc, bktName, "", nil)
tc.Handler().DeleteBucketHandler(w, r)
assertStatus(t, w, code)
}
func checkNotFound(t *testing.T, tc *handlerContext, bktName, objName, version string) {
query := make(url.Values)
query.Add(api.QueryVersionID, version)
w, r := prepareTestFullRequest(tc, bktName, objName, query, nil)
tc.Handler().HeadObjectHandler(w, r)
func checkNotFound(t *testing.T, hc *handlerContext, bktName, objName, version string) {
w := headObjectBase(hc, bktName, objName, version)
assertStatus(t, w, http.StatusNotFound)
}
func checkFound(t *testing.T, tc *handlerContext, bktName, objName, version string) {
func headObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apierr.ErrorCode) {
w := headObjectBase(hc, bktName, objName, version)
assertS3Error(hc.t, w, apierr.GetAPIError(code))
}
func checkFound(t *testing.T, hc *handlerContext, bktName, objName, version string) {
w := headObjectBase(hc, bktName, objName, version)
assertStatus(t, w, http.StatusOK)
}
func headObjectWithHeaders(hc *handlerContext, bktName, objName, version string, headers map[string]string) *httptest.ResponseRecorder {
query := make(url.Values)
query.Add(api.QueryVersionID, version)
w, r := prepareTestFullRequest(tc, bktName, objName, query, nil)
tc.Handler().HeadObjectHandler(w, r)
assertStatus(t, w, http.StatusOK)
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
for k, v := range headers {
r.Header.Set(k, v)
}
hc.Handler().HeadObjectHandler(w, r)
return w
}
func listVersions(t *testing.T, tc *handlerContext, bktName string) *ListObjectsVersionsResponse {
w, r := prepareTestRequest(tc, bktName, "", nil)
tc.Handler().ListBucketObjectVersionsHandler(w, r)
assertStatus(t, w, http.StatusOK)
res := &ListObjectsVersionsResponse{}
parseTestResponse(t, w, res)
func headObjectBase(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
query := make(url.Values)
query.Add(api.QueryVersionID, version)
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
hc.Handler().HeadObjectHandler(w, r)
return w
}
func listVersions(_ *testing.T, tc *handlerContext, bktName string) *ListObjectsVersionsResponse {
return listObjectsVersions(tc, bktName, "", "", "", "", -1)
}
func getVersion(resp *ListObjectsVersionsResponse, objName string) []*ObjectVersionResponse {
var res []*ObjectVersionResponse
for i, version := range resp.Version {
if version.Key == objName {
res = append(res, &resp.Version[i])
}
}
return res
}
func putObject(t *testing.T, tc *handlerContext, bktName, objName string) {
func putObject(hc *handlerContext, bktName, objName string) {
body := bytes.NewReader([]byte("content"))
w, r := prepareTestPayloadRequest(tc, bktName, objName, body)
tc.Handler().PutObjectHandler(w, r)
assertStatus(t, w, http.StatusOK)
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
hc.Handler().PutObjectHandler(w, r)
assertStatus(hc.t, w, http.StatusOK)
}
func createSuspendedBucket(t *testing.T, tc *handlerContext, bktName string) *data.BucketInfo {

Some files were not shown because too many files have changed in this diff Show more