forked from TrueCloudLab/frostfs-s3-gw
Compare commits
70 commits
406075aebb
...
cc34f659d1
Author | SHA1 | Date | |
---|---|---|---|
cc34f659d1 | |||
924e87face | |||
5121c73d3f | |||
c334adeb6d | |||
a74d498df2 | |||
69297a4a38 | |||
71d82d1cc8 | |||
fafe4af529 | |||
88f1acbdfc | |||
4e15452853 | |||
da642a498a | |||
de32dfd7ce | |||
8a30f18ff6 | |||
2d7973b3f1 | |||
6d52f46012 | |||
0ae49eaab0 | |||
093de13f54 | |||
cf4fc3b602 | |||
739a6ec9df | |||
c7ee628ab0 | |||
4ad84b9b94 | |||
3e20f736a6 | |||
b52552e8c2 | |||
6e8960b2ab | |||
29ac91dfd5 | |||
84af85ed67 | |||
4804904d9d | |||
e23cc43824 | |||
12434d5f4d | |||
2e870e99c7 | |||
45e025320f | |||
eae49908da | |||
c32220762f | |||
6f9ee3da76 | |||
08019f1574 | |||
899213b3f3 | |||
3b6d2bc522 | |||
6509639540 | |||
5698d5844e | |||
43cae9ee04 | |||
a17ff66975 | |||
38c5503a02 | |||
8273af8bf8 | |||
6dbb07f0fa | |||
340e6b807b | |||
0850d21ff3 | |||
9272f4e108 | |||
be6a37ada5 | |||
836874a761 | |||
7a285d1464 | |||
f58a0d04ff | |||
94c2674f44 | |||
6b1b43a364 | |||
43abf58068 | |||
ca15acf1d3 | |||
473239bf36 | |||
93cf7c462b | |||
6c5f9b2764 | |||
42862fd69e | |||
c7a65bd075 | |||
28c6bb4cb8 | |||
0db6cd6727 | |||
ff1ec56d24 | |||
9ebfca654b | |||
055cc6a22a | |||
a61ff3b8cb | |||
6304d7bfda | |||
cf7254f8cd | |||
861454e499 | |||
b28ecef43b |
105 changed files with 9479 additions and 1944 deletions
|
@ -18,3 +18,6 @@ jobs:
|
|||
|
||||
- name: Build binary
|
||||
run: make
|
||||
|
||||
- name: Check dirty suffix
|
||||
run: if [[ $(make version) == *"dirty"* ]]; then echo "Version has dirty suffix" && exit 1; fi
|
||||
|
|
|
@ -15,6 +15,6 @@ jobs:
|
|||
go-version: '1.21'
|
||||
|
||||
- name: Run commit format checker
|
||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v1
|
||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||
with:
|
||||
from: 3fbad97a
|
||||
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
||||
|
|
78
CHANGELOG.md
78
CHANGELOG.md
|
@ -4,6 +4,50 @@ This document outlines major changes between releases.
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
### Fixed
|
||||
- Fix marshaling errors in `DeleteObjects` method (#222)
|
||||
- Fix status code in GET/HEAD delete marker (#226)
|
||||
- Fix `NextVersionIDMarker` in `list-object-versions` (#248)
|
||||
- Fix possibility of panic during SIGHUP (#288)
|
||||
|
||||
### Added
|
||||
- Add new `frostfs.buffer_max_size_for_put` config param and sync TZ hash for PUT operations (#197)
|
||||
- Add `X-Amz-Version-Id` header after complete multipart upload (#227)
|
||||
- Add handling of `X-Amz-Copy-Source-Server-Side-Encryption-Customer-*` headers during copy (#217)
|
||||
- Add new `logger.destination` config param (#236)
|
||||
- Add `X-Amz-Content-Sha256` header validation (#218)
|
||||
- Support frostfsid contract. See `frostfsid` config section (#260)
|
||||
- Support per namespace placement policies configuration (see `namespaces.config` config param) (#266)
|
||||
- Support control api to manage policies. See `control` config section (#258)
|
||||
- Add `namespace` label to billing metrics (#271)
|
||||
- Support policy-engine (#257)
|
||||
- Support `policy` contract (#259)
|
||||
- Support `proxy` contract (#287)
|
||||
- Authmate: support custom attributes (#292)
|
||||
|
||||
### Changed
|
||||
- Generalise config param `use_default_xmlns_for_complete_multipart` to `use_default_xmlns` so that use default xmlns for all requests (#221)
|
||||
- Set server IdleTimeout and ReadHeaderTimeout to `30s` and allow to configure them (#220)
|
||||
- Return `ETag` value in quotes (#219)
|
||||
- Use tombstone when delete multipart upload (#275)
|
||||
- Support new parameter `cache.accessbox.removing_check_interval` (#XX)
|
||||
|
||||
### Removed
|
||||
- Drop sending whitespace characters during complete multipart upload and related config param `kludge.complete_multipart_keepalive` (#227)
|
||||
|
||||
## [0.28.1] - 2024-01-24
|
||||
|
||||
### Added
|
||||
- MD5 hash as ETag and response header (#205)
|
||||
- Tree pool traversal limit (#262)
|
||||
|
||||
### Updating from 0.28.0
|
||||
|
||||
See new `features.md5.enabled` and `frostfs.tree_pool_max_attempts` config
|
||||
parameters.
|
||||
|
||||
## [0.28.0] - Academy of Sciences - 2023-12-07
|
||||
|
||||
### Fixed
|
||||
- Handle negative `Content-Length` on put (#125)
|
||||
- Use `DisableURIPathEscaping` to presign urls (#125)
|
||||
|
@ -13,17 +57,17 @@ This document outlines major changes between releases.
|
|||
- Replace part on re-upload when use multipart upload (#176)
|
||||
- Fix goroutine leak on put object error (#178)
|
||||
- Fix parsing signed headers in presigned urls (#182)
|
||||
- Fix url escaping (#188, #224)
|
||||
- Fix url escaping (#188)
|
||||
- Use correct keys in `list-multipart-uploads` response (#185)
|
||||
- Fix parsing `key-marker` for object list versions (#243)
|
||||
- Fix marshaling errors in `DeleteObjects` method (#222)
|
||||
- Fix status code in GET/HEAD delete marker (#226)
|
||||
- Fix `NextVersionIDMarker` in `list-object-versions` (#248)
|
||||
- Fix parsing `key-marker` for object list versions (#237)
|
||||
- `GetSubTree` failures (#179)
|
||||
- Unexpected EOF during multipart download (#210)
|
||||
- Produce clean version in debian build (#245)
|
||||
|
||||
### Added
|
||||
- Add `trace_id` value into log record when tracing is enabled (#142)
|
||||
- Add basic error types and exit codes to `frostfs-s3-authmate` (#152)
|
||||
- Add a metric with addresses of nodes of the same and highest priority that are currently healthy (#51)
|
||||
- Add a metric with addresses of nodes of the same and highest priority that are currently healthy (#186)
|
||||
- Support dump metrics descriptions (#80)
|
||||
- Add `copies_numbers` section to `placement_policy` in config file and support vectors of copies numbers (#70, #101)
|
||||
- Support impersonate bearer token (#81, #105)
|
||||
|
@ -34,10 +78,9 @@ This document outlines major changes between releases.
|
|||
- Implement chunk uploading (#106)
|
||||
- Add new `kludge.bypass_content_encoding_check_in_chunks` config param (#146)
|
||||
- Add new `frostfs.client_cut` config param (#192)
|
||||
- Add new `frostfs.buffer_max_size_for_put` config param and sync TZ hash for PUT operations (#197)
|
||||
- Add `X-Amz-Version-Id` header after complete multipart upload (#227)
|
||||
- Add handling of `X-Amz-Copy-Source-Server-Side-Encryption-Customer-*` headers during copy (#217)
|
||||
- Add new `logger.destination` config param (#236)
|
||||
- Add selection of the node of the latest version of the object (#231)
|
||||
- Soft memory limit with `runtime.soft_memory_limit` (#196)
|
||||
- `server_health` metric for every S3 endpoint status (#199)
|
||||
|
||||
### Changed
|
||||
- Update prometheus to v1.15.0 (#94)
|
||||
|
@ -48,16 +91,19 @@ This document outlines major changes between releases.
|
|||
- Use request scope logger (#111)
|
||||
- Add `s3-authmate update-secret` command (#131)
|
||||
- Use default registerer for app metrics (#155)
|
||||
- Use chi router instead of archived gorlilla/mux (#149)
|
||||
- Use chi router instead of archived gorlilla/mux (#149, #174, #188)
|
||||
- Complete multipart upload doesn't unnecessary copy now. Thus, the total time of multipart upload was reduced by 2 times (#63)
|
||||
- Use gate key to form object owner (#175)
|
||||
- Apply placement policies and copies if there is at least one valid value (#168)
|
||||
- Generalise config param `use_default_xmlns_for_complete_multipart` to `use_default_xmlns` so that use default xmlns for all requests (#221)
|
||||
- Set server IdleTimeout and ReadHeaderTimeout to `30s` and allow to configure them (#220)
|
||||
- `statistic_tx_bytes_total` and `statistic_rx_bytes_total` metric to `statistic_bytes_total` metric with `direction` label (#153)
|
||||
- Refactor of context-stored data receivers (#137)
|
||||
- Refactor fetch/parse config parameters functions (#117)
|
||||
- Move all log messages to constants (#96)
|
||||
- Allow zero value of `part-number-marker` (#207)
|
||||
- Clean tag node in the tree service instead of removal (#233)
|
||||
|
||||
### Removed
|
||||
- Drop `tree.service` param (now endpoints from `peers` section are used) (#133)
|
||||
- Drop sending whitespace characters during complete multipart upload and related config param `kludge.complete_multipart_keepalive` (#227)
|
||||
|
||||
## [0.27.0] - Karpinsky - 2023-07-12
|
||||
|
||||
|
@ -103,4 +149,6 @@ This project is a fork of [NeoFS S3 Gateway](https://github.com/nspcc-dev/neofs-
|
|||
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-s3-gw/blob/master/CHANGELOG.md.
|
||||
|
||||
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/b2148cc3...v0.27.0
|
||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.27.0...master
|
||||
[0.28.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.27.0...v0.28.0
|
||||
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.0...v0.28.1
|
||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.1...master
|
||||
|
|
8
Makefile
8
Makefile
|
@ -151,10 +151,16 @@ clean:
|
|||
|
||||
# Generate code from .proto files
|
||||
protoc:
|
||||
# Install specific version for protobuf lib
|
||||
@GOBIN=$(abspath $(BINDIR)) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen
|
||||
@for f in `find . -type f -name '*.proto' -not -path './vendor/*'`; do \
|
||||
echo "⇒ Processing $$f "; \
|
||||
protoc \
|
||||
--go_out=paths=source_relative:. $$f; \
|
||||
--go_out=paths=source_relative:. \
|
||||
--plugin=protoc-gen-go-frostfs=$(BINDIR)/protogen \
|
||||
--go-frostfs_out=. --go-frostfs_opt=paths=source_relative \
|
||||
--go-grpc_opt=require_unimplemented_servers=false \
|
||||
--go-grpc_out=. --go-grpc_opt=paths=source_relative $$f; \
|
||||
done
|
||||
rm -rf vendor
|
||||
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v0.27.0
|
||||
v0.28.1
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
|
@ -15,13 +14,12 @@ import (
|
|||
"time"
|
||||
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
)
|
||||
|
||||
// authorizationFieldRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
|
||||
|
@ -31,27 +29,13 @@ var authorizationFieldRegexp = regexp.MustCompile(`AWS4-HMAC-SHA256 Credential=(
|
|||
var postPolicyCredentialRegexp = regexp.MustCompile(`(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request`)
|
||||
|
||||
type (
|
||||
// Center is a user authentication interface.
|
||||
Center interface {
|
||||
Authenticate(request *http.Request) (*Box, error)
|
||||
}
|
||||
|
||||
// Box contains access box and additional info.
|
||||
Box struct {
|
||||
AccessBox *accessbox.Box
|
||||
ClientTime time.Time
|
||||
AuthHeaders *AuthHeader
|
||||
}
|
||||
|
||||
center struct {
|
||||
Center struct {
|
||||
reg *RegexpSubmatcher
|
||||
postReg *RegexpSubmatcher
|
||||
cli tokens.Credentials
|
||||
allowedAccessKeyIDPrefixes []string // empty slice means all access key ids are allowed
|
||||
}
|
||||
|
||||
prs int
|
||||
|
||||
//nolint:revive
|
||||
AuthHeader struct {
|
||||
AccessKeyID string
|
||||
|
@ -76,34 +60,38 @@ const (
|
|||
AmzSignedHeaders = "X-Amz-SignedHeaders"
|
||||
AmzExpires = "X-Amz-Expires"
|
||||
AmzDate = "X-Amz-Date"
|
||||
AmzContentSHA256 = "X-Amz-Content-Sha256"
|
||||
AuthorizationHdr = "Authorization"
|
||||
ContentTypeHdr = "Content-Type"
|
||||
|
||||
UnsignedPayload = "UNSIGNED-PAYLOAD"
|
||||
StreamingUnsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
|
||||
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
StreamingContentSHA256Trailer = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
|
||||
StreamingContentECDSASHA256 = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"
|
||||
StreamingContentECDSASHA256Trailer = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER"
|
||||
)
|
||||
|
||||
// ErrNoAuthorizationHeader is returned for unauthenticated requests.
|
||||
var ErrNoAuthorizationHeader = errors.New("no authorization header")
|
||||
|
||||
func (p prs) Read(_ []byte) (n int, err error) {
|
||||
panic("implement me")
|
||||
var ContentSHA256HeaderStandardValue = map[string]struct{}{
|
||||
UnsignedPayload: {},
|
||||
StreamingUnsignedPayloadTrailer: {},
|
||||
StreamingContentSHA256: {},
|
||||
StreamingContentSHA256Trailer: {},
|
||||
StreamingContentECDSASHA256: {},
|
||||
StreamingContentECDSASHA256Trailer: {},
|
||||
}
|
||||
|
||||
func (p prs) Seek(_ int64, _ int) (int64, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
var _ io.ReadSeeker = prs(0)
|
||||
|
||||
// New creates an instance of AuthCenter.
|
||||
func New(frostFS tokens.FrostFS, key *keys.PrivateKey, prefixes []string, config *cache.Config) Center {
|
||||
return ¢er{
|
||||
cli: tokens.New(frostFS, key, config),
|
||||
func New(creds tokens.Credentials, prefixes []string) *Center {
|
||||
return &Center{
|
||||
cli: creds,
|
||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||
allowedAccessKeyIDPrefixes: prefixes,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *center) parseAuthHeader(header string) (*AuthHeader, error) {
|
||||
func (c *Center) parseAuthHeader(header string) (*AuthHeader, error) {
|
||||
submatches := c.reg.GetSubmatches(header)
|
||||
if len(submatches) != authHeaderPartsNum {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed)
|
||||
|
@ -134,7 +122,12 @@ func (a *AuthHeader) getAddress() (oid.Address, error) {
|
|||
return addr, nil
|
||||
}
|
||||
|
||||
func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
||||
func IsStandardContentSHA256(key string) bool {
|
||||
_, ok := ContentSHA256HeaderStandardValue[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
||||
var (
|
||||
err error
|
||||
authHdr *AuthHeader
|
||||
|
@ -168,7 +161,7 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
|||
if strings.HasPrefix(r.Header.Get(ContentTypeHdr), "multipart/form-data") {
|
||||
return c.checkFormData(r)
|
||||
}
|
||||
return nil, ErrNoAuthorizationHeader
|
||||
return nil, middleware.ErrNoAuthorizationHeader
|
||||
}
|
||||
authHdr, err = c.parseAuthHeader(authHeaderField[0])
|
||||
if err != nil {
|
||||
|
@ -197,14 +190,22 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
|||
return nil, fmt.Errorf("get box: %w", err)
|
||||
}
|
||||
|
||||
if err = checkFormatHashContentSHA256(r.Header.Get(AmzContentSHA256)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clonedRequest := cloneRequest(r, authHdr)
|
||||
if err = c.checkSign(authHdr, box, clonedRequest, signatureDateTime); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &Box{
|
||||
result := &middleware.Box{
|
||||
AccessBox: box,
|
||||
AuthHeaders: authHdr,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: authHdr.AccessKeyID,
|
||||
Region: authHdr.Region,
|
||||
SignatureV4: authHdr.SignatureV4,
|
||||
},
|
||||
}
|
||||
if needClientTime {
|
||||
result.ClientTime = signatureDateTime
|
||||
|
@ -213,7 +214,21 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func (c center) checkAccessKeyID(accessKeyID string) error {
|
||||
func checkFormatHashContentSHA256(hash string) error {
|
||||
if !IsStandardContentSHA256(hash) {
|
||||
hashBinary, err := hex.DecodeString(hash)
|
||||
if err != nil {
|
||||
return apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
|
||||
}
|
||||
if len(hashBinary) != sha256.Size && len(hash) != 0 {
|
||||
return apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Center) checkAccessKeyID(accessKeyID string) error {
|
||||
if len(c.allowedAccessKeyIDPrefixes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -227,7 +242,7 @@ func (c center) checkAccessKeyID(accessKeyID string) error {
|
|||
return apiErrors.GetAPIError(apiErrors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
func (c *center) checkFormData(r *http.Request) (*Box, error) {
|
||||
func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
||||
if err := r.ParseMultipartForm(maxFormSizeMemory); err != nil {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidArgument)
|
||||
}
|
||||
|
@ -238,7 +253,7 @@ func (c *center) checkFormData(r *http.Request) (*Box, error) {
|
|||
|
||||
policy := MultipartFormValue(r, "policy")
|
||||
if policy == "" {
|
||||
return nil, ErrNoAuthorizationHeader
|
||||
return nil, middleware.ErrNoAuthorizationHeader
|
||||
}
|
||||
|
||||
submatches := c.postReg.GetSubmatches(MultipartFormValue(r, "x-amz-credential"))
|
||||
|
@ -269,7 +284,7 @@ func (c *center) checkFormData(r *http.Request) (*Box, error) {
|
|||
return nil, apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
|
||||
return &Box{AccessBox: box}, nil
|
||||
return &middleware.Box{AccessBox: box}, nil
|
||||
}
|
||||
|
||||
func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
||||
|
@ -293,7 +308,7 @@ func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
|||
return otherRequest
|
||||
}
|
||||
|
||||
func (c *center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
|
||||
func (c *Center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
|
||||
awsCreds := credentials.NewStaticCredentials(authHeader.AccessKeyID, box.Gate.SecretKey, "")
|
||||
signer := v4.NewSigner(awsCreds)
|
||||
signer.DisableURIPathEscaping = true
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
func TestAuthHeaderParse(t *testing.T) {
|
||||
defaultHeader := "AWS4-HMAC-SHA256 Credential=oid0cid/20210809/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=2811ccb9e242f41426738fb1f"
|
||||
|
||||
center := ¢er{
|
||||
center := &Center{
|
||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||
}
|
||||
|
||||
|
@ -99,3 +99,49 @@ func TestSignature(t *testing.T) {
|
|||
signature := signStr(secret, "s3", "us-east-1", signTime, strToSign)
|
||||
require.Equal(t, "dfbe886241d9e369cf4b329ca0f15eb27306c97aa1022cc0bb5a914c4ef87634", signature)
|
||||
}
|
||||
|
||||
func TestCheckFormatContentSHA256(t *testing.T) {
|
||||
defaultErr := errors.GetAPIError(errors.ErrContentSHA256Mismatch)
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
hash string
|
||||
error error
|
||||
}{
|
||||
{
|
||||
name: "invalid hash format: length and character",
|
||||
hash: "invalid-hash",
|
||||
error: defaultErr,
|
||||
},
|
||||
{
|
||||
name: "invalid hash format: length (63 characters)",
|
||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f7",
|
||||
error: defaultErr,
|
||||
},
|
||||
{
|
||||
name: "invalid hash format: character",
|
||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f7s",
|
||||
error: defaultErr,
|
||||
},
|
||||
{
|
||||
name: "unsigned payload",
|
||||
hash: "UNSIGNED-PAYLOAD",
|
||||
error: nil,
|
||||
},
|
||||
{
|
||||
name: "no hash",
|
||||
hash: "",
|
||||
error: nil,
|
||||
},
|
||||
{
|
||||
name: "correct hash format",
|
||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73",
|
||||
error: nil,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := checkFormatHashContentSHA256(tc.hash)
|
||||
require.Equal(t, tc.error, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,9 +11,7 @@ import (
|
|||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -42,11 +40,11 @@ func (m credentialsMock) GetBox(_ context.Context, addr oid.Address) (*accessbox
|
|||
return box, nil
|
||||
}
|
||||
|
||||
func (m credentialsMock) Put(context.Context, cid.ID, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error) {
|
||||
func (m credentialsMock) Put(context.Context, cid.ID, tokens.CredentialsParam) (oid.Address, error) {
|
||||
return oid.Address{}, nil
|
||||
}
|
||||
|
||||
func (m credentialsMock) Update(context.Context, oid.Address, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error) {
|
||||
func (m credentialsMock) Update(context.Context, oid.Address, tokens.CredentialsParam) (oid.Address, error) {
|
||||
return oid.Address{}, nil
|
||||
}
|
||||
|
||||
|
@ -84,7 +82,7 @@ func TestCheckSign(t *testing.T) {
|
|||
mock := newTokensFrostfsMock()
|
||||
mock.addBox(accessKeyAddr, expBox)
|
||||
|
||||
c := ¢er{
|
||||
c := &Center{
|
||||
cli: mock,
|
||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||
|
|
26
api/cache/accessbox.go
vendored
26
api/cache/accessbox.go
vendored
|
@ -24,6 +24,11 @@ type (
|
|||
Lifetime time.Duration
|
||||
Logger *zap.Logger
|
||||
}
|
||||
|
||||
AccessBoxCacheValue struct {
|
||||
Box *accessbox.Box
|
||||
PutTime time.Time
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -42,21 +47,21 @@ func DefaultAccessBoxConfig(logger *zap.Logger) *Config {
|
|||
}
|
||||
}
|
||||
|
||||
// NewAccessBoxCache creates an object of BucketCache.
|
||||
// NewAccessBoxCache creates an object of AccessBoxCache.
|
||||
func NewAccessBoxCache(config *Config) *AccessBoxCache {
|
||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
||||
|
||||
return &AccessBoxCache{cache: gc, logger: config.Logger}
|
||||
}
|
||||
|
||||
// Get returns a cached object.
|
||||
func (o *AccessBoxCache) Get(address oid.Address) *accessbox.Box {
|
||||
// Get returns a cached accessbox.
|
||||
func (o *AccessBoxCache) Get(address oid.Address) *AccessBoxCacheValue {
|
||||
entry, err := o.cache.Get(address)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result, ok := entry.(*accessbox.Box)
|
||||
result, ok := entry.(*AccessBoxCacheValue)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
|
@ -66,7 +71,16 @@ func (o *AccessBoxCache) Get(address oid.Address) *accessbox.Box {
|
|||
return result
|
||||
}
|
||||
|
||||
// Put stores an object to cache.
|
||||
// Put stores an accessbox to cache.
|
||||
func (o *AccessBoxCache) Put(address oid.Address, box *accessbox.Box) error {
|
||||
return o.cache.Set(address, box)
|
||||
val := &AccessBoxCacheValue{
|
||||
Box: box,
|
||||
PutTime: time.Now(),
|
||||
}
|
||||
return o.cache.Set(address, val)
|
||||
}
|
||||
|
||||
// Delete removes an accessbox from cache.
|
||||
func (o *AccessBoxCache) Delete(address oid.Address) {
|
||||
o.cache.Remove(address)
|
||||
}
|
||||
|
|
14
api/cache/buckets.go
vendored
14
api/cache/buckets.go
vendored
|
@ -39,8 +39,8 @@ func NewBucketCache(config *Config) *BucketCache {
|
|||
}
|
||||
|
||||
// Get returns a cached object.
|
||||
func (o *BucketCache) Get(key string) *data.BucketInfo {
|
||||
entry, err := o.cache.Get(key)
|
||||
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
||||
entry, err := o.cache.Get(formKey(ns, bktName))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -57,10 +57,14 @@ func (o *BucketCache) Get(key string) *data.BucketInfo {
|
|||
|
||||
// Put puts an object to cache.
|
||||
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
|
||||
return o.cache.Set(bkt.Name, bkt)
|
||||
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
|
||||
}
|
||||
|
||||
// Delete deletes an object from cache.
|
||||
func (o *BucketCache) Delete(key string) bool {
|
||||
return o.cache.Remove(key)
|
||||
func (o *BucketCache) Delete(bkt *data.BucketInfo) bool {
|
||||
return o.cache.Remove(formKey(bkt.Zone, bkt.Name))
|
||||
}
|
||||
|
||||
func formKey(ns, name string) string {
|
||||
return name + "." + ns
|
||||
}
|
||||
|
|
8
api/cache/cache_test.go
vendored
8
api/cache/cache_test.go
vendored
|
@ -22,7 +22,7 @@ func TestAccessBoxCacheType(t *testing.T) {
|
|||
err := cache.Put(addr, box)
|
||||
require.NoError(t, err)
|
||||
val := cache.Get(addr)
|
||||
require.Equal(t, box, val)
|
||||
require.Equal(t, box, val.Box)
|
||||
require.Equal(t, 0, observedLog.Len())
|
||||
|
||||
err = cache.cache.Set(addr, "tmp")
|
||||
|
@ -38,13 +38,13 @@ func TestBucketsCacheType(t *testing.T) {
|
|||
|
||||
err := cache.Put(bktInfo)
|
||||
require.NoError(t, err)
|
||||
val := cache.Get(bktInfo.Name)
|
||||
val := cache.Get("", bktInfo.Name)
|
||||
require.Equal(t, bktInfo, val)
|
||||
require.Equal(t, 0, observedLog.Len())
|
||||
|
||||
err = cache.cache.Set(bktInfo.Name, "tmp")
|
||||
err = cache.cache.Set(bktInfo.Name+"."+bktInfo.Zone, "tmp")
|
||||
require.NoError(t, err)
|
||||
assertInvalidCacheEntry(t, cache.Get(bktInfo.Name), observedLog)
|
||||
assertInvalidCacheEntry(t, cache.Get(bktInfo.Zone, bktInfo.Name), observedLog)
|
||||
}
|
||||
|
||||
func TestObjectNamesCacheType(t *testing.T) {
|
||||
|
|
107
api/cache/listsession.go
vendored
Normal file
107
api/cache/listsession.go
vendored
Normal file
|
@ -0,0 +1,107 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/bluele/gcache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
// ListSessionCache contains cache for list session (during pagination).
|
||||
ListSessionCache struct {
|
||||
cache gcache.Cache
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// ListSessionKey is a key to find a ListSessionCache's entry.
|
||||
ListSessionKey struct {
|
||||
cid cid.ID
|
||||
prefix string
|
||||
token string
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultListSessionCacheLifetime is a default lifetime of entries in cache of ListObjects.
|
||||
DefaultListSessionCacheLifetime = time.Second * 60
|
||||
// DefaultListSessionCacheSize is a default size of cache of ListObjects.
|
||||
DefaultListSessionCacheSize = 100
|
||||
)
|
||||
|
||||
// DefaultListSessionConfig returns new default cache expiration values.
|
||||
func DefaultListSessionConfig(logger *zap.Logger) *Config {
|
||||
return &Config{
|
||||
Size: DefaultListSessionCacheSize,
|
||||
Lifetime: DefaultListSessionCacheLifetime,
|
||||
Logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
func (k *ListSessionKey) String() string {
|
||||
return k.cid.EncodeToString() + k.prefix + k.token
|
||||
}
|
||||
|
||||
// NewListSessionCache is a constructor which creates an object of ListObjectsCache with the given lifetime of entries.
|
||||
func NewListSessionCache(config *Config) *ListSessionCache {
|
||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).EvictedFunc(func(key interface{}, val interface{}) {
|
||||
session, ok := val.(*data.ListSession)
|
||||
if !ok {
|
||||
config.Logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", val)),
|
||||
zap.String("expected", fmt.Sprintf("%T", session)))
|
||||
}
|
||||
|
||||
if !session.Acquired.Load() {
|
||||
session.Cancel()
|
||||
}
|
||||
}).Build()
|
||||
return &ListSessionCache{cache: gc, logger: config.Logger}
|
||||
}
|
||||
|
||||
// GetListSession returns a list of ObjectInfo.
|
||||
func (l *ListSessionCache) GetListSession(key ListSessionKey) *data.ListSession {
|
||||
entry, err := l.cache.Get(key)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result, ok := entry.(*data.ListSession)
|
||||
if !ok {
|
||||
l.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// PutListSession puts a list of object versions to cache.
|
||||
func (l *ListSessionCache) PutListSession(key ListSessionKey, session *data.ListSession) error {
|
||||
s := l.GetListSession(key)
|
||||
if s != nil && s != session {
|
||||
if !s.Acquired.Load() {
|
||||
s.Cancel()
|
||||
}
|
||||
}
|
||||
return l.cache.Set(key, session)
|
||||
}
|
||||
|
||||
// DeleteListSession removes key from cache.
|
||||
func (l *ListSessionCache) DeleteListSession(key ListSessionKey) {
|
||||
l.cache.Remove(key)
|
||||
}
|
||||
|
||||
// CreateListSessionCacheKey returns ListSessionKey with the given CID, prefix and token.
|
||||
func CreateListSessionCacheKey(cnr cid.ID, prefix, token string) ListSessionKey {
|
||||
p := ListSessionKey{
|
||||
cid: cnr,
|
||||
prefix: prefix,
|
||||
token: token,
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
72
api/cache/policy.go
vendored
Normal file
72
api/cache/policy.go
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"github.com/bluele/gcache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// MorphPolicyCache provides lru cache for listing policies stored in policy contract.
|
||||
type MorphPolicyCache struct {
|
||||
cache gcache.Cache
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
type MorphPolicyCacheKey struct {
|
||||
Target engine.Target
|
||||
Name chain.Name
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultMorphPolicyCacheSize is a default maximum number of entries in cache.
|
||||
DefaultMorphPolicyCacheSize = 1e4
|
||||
// DefaultMorphPolicyCacheLifetime is a default lifetime of entries in cache.
|
||||
DefaultMorphPolicyCacheLifetime = time.Minute
|
||||
)
|
||||
|
||||
// DefaultMorphPolicyConfig returns new default cache expiration values.
|
||||
func DefaultMorphPolicyConfig(logger *zap.Logger) *Config {
|
||||
return &Config{
|
||||
Size: DefaultMorphPolicyCacheSize,
|
||||
Lifetime: DefaultMorphPolicyCacheLifetime,
|
||||
Logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// NewMorphPolicyCache creates an object of MorphPolicyCache.
|
||||
func NewMorphPolicyCache(config *Config) *MorphPolicyCache {
|
||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
||||
return &MorphPolicyCache{cache: gc, logger: config.Logger}
|
||||
}
|
||||
|
||||
// Get returns a cached object. Returns nil if value is missing.
|
||||
func (o *MorphPolicyCache) Get(key MorphPolicyCacheKey) []*chain.Chain {
|
||||
entry, err := o.cache.Get(key)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result, ok := entry.([]*chain.Chain)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Put puts an object to cache.
|
||||
func (o *MorphPolicyCache) Put(key MorphPolicyCacheKey, list []*chain.Chain) error {
|
||||
return o.cache.Set(key, list)
|
||||
}
|
||||
|
||||
// Delete deletes an object from cache.
|
||||
func (o *MorphPolicyCache) Delete(key MorphPolicyCacheKey) bool {
|
||||
return o.cache.Remove(key)
|
||||
}
|
|
@ -2,6 +2,7 @@ package data
|
|||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
|
@ -36,8 +37,6 @@ type (
|
|||
ObjectInfo struct {
|
||||
ID oid.ID
|
||||
CID cid.ID
|
||||
IsDir bool
|
||||
IsDeleteMarker bool
|
||||
|
||||
Bucket string
|
||||
Name string
|
||||
|
@ -83,12 +82,12 @@ type (
|
|||
)
|
||||
|
||||
// NotificationInfoFromObject creates new NotificationInfo from ObjectInfo.
|
||||
func NotificationInfoFromObject(objInfo *ObjectInfo) *NotificationInfo {
|
||||
func NotificationInfoFromObject(objInfo *ObjectInfo, md5Enabled bool) *NotificationInfo {
|
||||
return &NotificationInfo{
|
||||
Name: objInfo.Name,
|
||||
Version: objInfo.VersionID(),
|
||||
Size: objInfo.Size,
|
||||
HashSum: objInfo.HashSum,
|
||||
HashSum: Quote(objInfo.ETag(md5Enabled)),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,3 +134,11 @@ func (b BucketSettings) VersioningEnabled() bool {
|
|||
func (b BucketSettings) VersioningSuspended() bool {
|
||||
return b.Versioning == VersioningSuspended
|
||||
}
|
||||
|
||||
func Quote(val string) string {
|
||||
return "\"" + val + "\""
|
||||
}
|
||||
|
||||
func UnQuote(val string) string {
|
||||
return strings.Trim(val, "\"")
|
||||
}
|
||||
|
|
19
api/data/listsession.go
Normal file
19
api/data/listsession.go
Normal file
|
@ -0,0 +1,19 @@
|
|||
package data
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type VersionsStream interface {
|
||||
Next(ctx context.Context) (*NodeVersion, error)
|
||||
}
|
||||
|
||||
type ListSession struct {
|
||||
Next []*ExtendedNodeVersion
|
||||
Stream VersionsStream
|
||||
NamesMap map[string]struct{}
|
||||
Context context.Context
|
||||
Cancel context.CancelFunc
|
||||
Acquired atomic.Bool
|
||||
}
|
|
@ -16,20 +16,31 @@ const (
|
|||
// NodeVersion represent node from tree service.
|
||||
type NodeVersion struct {
|
||||
BaseNodeVersion
|
||||
DeleteMarker *DeleteMarkerInfo
|
||||
IsUnversioned bool
|
||||
IsCombined bool
|
||||
}
|
||||
|
||||
func (v NodeVersion) IsDeleteMarker() bool {
|
||||
return v.DeleteMarker != nil
|
||||
// ExtendedNodeVersion contains additional node info to be able to sort versions by timestamp.
|
||||
type ExtendedNodeVersion struct {
|
||||
NodeVersion *NodeVersion
|
||||
IsLatest bool
|
||||
DirName string
|
||||
}
|
||||
|
||||
// DeleteMarkerInfo is used to save object info if node in the tree service is delete marker.
|
||||
// We need this information because the "delete marker" object is no longer stored in FrostFS.
|
||||
type DeleteMarkerInfo struct {
|
||||
Created time.Time
|
||||
Owner user.ID
|
||||
func (e ExtendedNodeVersion) Version() string {
|
||||
if e.NodeVersion.IsUnversioned {
|
||||
return UnversionedObjectVersionID
|
||||
}
|
||||
|
||||
return e.NodeVersion.OID.EncodeToString()
|
||||
}
|
||||
|
||||
func (e ExtendedNodeVersion) Name() string {
|
||||
if e.DirName != "" {
|
||||
return e.DirName
|
||||
}
|
||||
|
||||
return e.NodeVersion.FilePath
|
||||
}
|
||||
|
||||
// ExtendedObjectInfo contains additional node info to be able to sort versions by timestamp.
|
||||
|
@ -58,6 +69,27 @@ type BaseNodeVersion struct {
|
|||
ETag string
|
||||
MD5 string
|
||||
FilePath string
|
||||
Created *time.Time
|
||||
Owner *user.ID
|
||||
IsDeleteMarker bool
|
||||
}
|
||||
|
||||
func (v *BaseNodeVersion) GetETag(md5Enabled bool) string {
|
||||
if md5Enabled && len(v.MD5) > 0 {
|
||||
return v.MD5
|
||||
}
|
||||
return v.ETag
|
||||
}
|
||||
|
||||
// IsFilledExtra returns true is node was created by version of gate v0.29.x and later.
|
||||
func (v BaseNodeVersion) IsFilledExtra() bool {
|
||||
return v.Created != nil && v.Owner != nil
|
||||
}
|
||||
|
||||
func (v *BaseNodeVersion) FillExtra(owner *user.ID, created *time.Time, realSize uint64) {
|
||||
v.Owner = owner
|
||||
v.Created = created
|
||||
v.Size = realSize
|
||||
}
|
||||
|
||||
type ObjectTaggingInfo struct {
|
||||
|
@ -77,6 +109,7 @@ type MultipartInfo struct {
|
|||
Created time.Time
|
||||
Meta map[string]string
|
||||
CopiesNumbers []uint32
|
||||
Finished bool
|
||||
}
|
||||
|
||||
// PartInfo is upload information about part.
|
||||
|
@ -93,9 +126,17 @@ type PartInfo struct {
|
|||
|
||||
// ToHeaderString form short part representation to use in S3-Completed-Parts header.
|
||||
func (p *PartInfo) ToHeaderString() string {
|
||||
// ETag value contains SHA256 checksum which is used while getting object parts attributes.
|
||||
return strconv.Itoa(p.Number) + "-" + strconv.FormatUint(p.Size, 10) + "-" + p.ETag
|
||||
}
|
||||
|
||||
func (p *PartInfo) GetETag(md5Enabled bool) string {
|
||||
if md5Enabled && len(p.MD5) > 0 {
|
||||
return p.MD5
|
||||
}
|
||||
return p.ETag
|
||||
}
|
||||
|
||||
// LockInfo is lock information to create appropriate tree node.
|
||||
type LockInfo struct {
|
||||
id uint64
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"encoding/json"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -24,6 +25,9 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
engineiam "git.frostfs.info/TrueCloudLab/policy-engine/iam"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -465,7 +469,7 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
if updated {
|
||||
s := &SendNotificationParams{
|
||||
Event: EventObjectACLPut,
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo),
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.MD5Enabled()),
|
||||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
|
@ -485,19 +489,45 @@ func (h *handler) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
bucketACL, err := h.obj.GetBucketACL(r.Context(), bktInfo)
|
||||
resolvedNamespace := h.cfg.ResolveNamespaceAlias(reqInfo.Namespace)
|
||||
jsonPolicy, err := h.ape.GetPolicy(resolvedNamespace, bktInfo.CID)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucketPolicy), err.Error())
|
||||
}
|
||||
h.logAndSendError(w, "failed to get policy from storage", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
ast := tableToAst(bucketACL.EACL, reqInfo.BucketName)
|
||||
bktPolicy := astToPolicy(ast)
|
||||
|
||||
w.Header().Set(api.ContentType, "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
if err = json.NewEncoder(w).Encode(bktPolicy); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
if _, err = w.Write(jsonPolicy); err != nil {
|
||||
h.logAndSendError(w, "write json policy to client", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
resolvedNamespace := h.cfg.ResolveNamespaceAlias(reqInfo.Namespace)
|
||||
|
||||
target := engine.NamespaceTarget(resolvedNamespace)
|
||||
chainID := getBucketChainID(bktInfo)
|
||||
if err = h.ape.RemoveChain(target, chainID); err != nil {
|
||||
h.logAndSendError(w, "failed to remove morph rule chain", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = h.ape.DeletePolicy(resolvedNamespace, bktInfo.CID); err != nil {
|
||||
h.logAndSendError(w, "failed to delete policy from storage", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -523,30 +553,52 @@ func (h *handler) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
token, err := getSessionTokenSetEACL(r.Context())
|
||||
jsonPolicy, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get eacl token", reqInfo, err)
|
||||
h.logAndSendError(w, "read body", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
bktPolicy := &bucketPolicy{Bucket: reqInfo.BucketName}
|
||||
if err = json.NewDecoder(r.Body).Decode(bktPolicy); err != nil {
|
||||
var bktPolicy engineiam.Policy
|
||||
if err = json.Unmarshal(jsonPolicy, &bktPolicy); err != nil {
|
||||
h.logAndSendError(w, "could not parse bucket policy", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
astPolicy, err := policyToAst(bktPolicy)
|
||||
s3Chain, err := engineiam.ConvertToS3Chain(bktPolicy, h.frostfsid)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not translate policy to ast", reqInfo, err)
|
||||
h.logAndSendError(w, "could not convert s3 policy to chain policy", reqInfo, err)
|
||||
return
|
||||
}
|
||||
s3Chain.ID = getBucketChainID(bktInfo)
|
||||
|
||||
for _, rule := range s3Chain.Rules {
|
||||
for _, resource := range rule.Resources.Names {
|
||||
if reqInfo.BucketName != strings.Split(strings.TrimPrefix(resource, arnAwsPrefix), "/")[0] {
|
||||
h.logAndSendError(w, "policy resource mismatched bucket", reqInfo, errors.GetAPIError(errors.ErrMalformedPolicy))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resolvedNamespace := h.cfg.ResolveNamespaceAlias(reqInfo.Namespace)
|
||||
|
||||
target := engine.NamespaceTarget(resolvedNamespace)
|
||||
if err = h.ape.AddChain(target, s3Chain); err != nil {
|
||||
h.logAndSendError(w, "failed to add morph rule chain", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = h.updateBucketACL(r, astPolicy, bktInfo, token); err != nil {
|
||||
h.logAndSendError(w, "could not update bucket acl", reqInfo, err)
|
||||
if err = h.ape.PutPolicy(resolvedNamespace, bktInfo.CID, jsonPolicy); err != nil {
|
||||
h.logAndSendError(w, "failed to save policy to storage", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func getBucketChainID(bktInfo *data.BucketInfo) chain.ID {
|
||||
return chain.ID("bkt" + string(bktInfo.CID[:]))
|
||||
}
|
||||
|
||||
func parseACLHeaders(header http.Header, key *keys.PublicKey) (*AccessControlPolicy, error) {
|
||||
var err error
|
||||
acp := &AccessControlPolicy{Owner: Owner{
|
||||
|
@ -642,7 +694,7 @@ func parseGrantee(grantees string) ([]*Grantee, error) {
|
|||
}
|
||||
|
||||
func formGrantee(granteeType, value string) (*Grantee, error) {
|
||||
value = strings.Trim(value, "\"")
|
||||
value = data.UnQuote(value)
|
||||
switch granteeType {
|
||||
case "id":
|
||||
return &Grantee{
|
||||
|
@ -1134,73 +1186,6 @@ func resourceInfoFromName(name, bucketName string) resourceInfo {
|
|||
return resInfo
|
||||
}
|
||||
|
||||
func astToPolicy(ast *ast) *bucketPolicy {
|
||||
bktPolicy := &bucketPolicy{}
|
||||
|
||||
for _, resource := range ast.Resources {
|
||||
allowed, denied := triageOperations(resource.Operations)
|
||||
handleResourceOperations(bktPolicy, allowed, eacl.ActionAllow, resource.Name())
|
||||
handleResourceOperations(bktPolicy, denied, eacl.ActionDeny, resource.Name())
|
||||
}
|
||||
|
||||
return bktPolicy
|
||||
}
|
||||
|
||||
func handleResourceOperations(bktPolicy *bucketPolicy, list []*astOperation, eaclAction eacl.Action, resourceName string) {
|
||||
userOpsMap := make(map[string][]eacl.Operation)
|
||||
|
||||
for _, op := range list {
|
||||
if !op.IsGroupGrantee() {
|
||||
for _, user := range op.Users {
|
||||
userOps := userOpsMap[user]
|
||||
userOps = append(userOps, op.Op)
|
||||
userOpsMap[user] = userOps
|
||||
}
|
||||
} else {
|
||||
userOps := userOpsMap[allUsersGroup]
|
||||
userOps = append(userOps, op.Op)
|
||||
userOpsMap[allUsersGroup] = userOps
|
||||
}
|
||||
}
|
||||
|
||||
for user, userOps := range userOpsMap {
|
||||
var actions []string
|
||||
LOOP:
|
||||
for action, ops := range actionToOpMap {
|
||||
for _, op := range ops {
|
||||
if !contains(userOps, op) {
|
||||
continue LOOP
|
||||
}
|
||||
}
|
||||
actions = append(actions, action)
|
||||
}
|
||||
if len(actions) != 0 {
|
||||
state := statement{
|
||||
Effect: actionToEffect(eaclAction),
|
||||
Principal: principal{CanonicalUser: user},
|
||||
Action: actions,
|
||||
Resource: []string{arnAwsPrefix + resourceName},
|
||||
}
|
||||
if user == allUsersGroup {
|
||||
state.Principal = principal{AWS: allUsersWildcard}
|
||||
}
|
||||
bktPolicy.Statement = append(bktPolicy.Statement, state)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func triageOperations(operations []*astOperation) ([]*astOperation, []*astOperation) {
|
||||
var allowed, denied []*astOperation
|
||||
for _, op := range operations {
|
||||
if op.Action == eacl.ActionAllow {
|
||||
allowed = append(allowed, op)
|
||||
} else {
|
||||
denied = append(denied, op)
|
||||
}
|
||||
}
|
||||
return allowed, denied
|
||||
}
|
||||
|
||||
func addTo(list []*astOperation, userID string, op eacl.Operation, groupGrantee bool, action eacl.Action) []*astOperation {
|
||||
var found *astOperation
|
||||
for _, astop := range list {
|
||||
|
@ -1387,17 +1372,6 @@ func effectToAction(effect string) eacl.Action {
|
|||
return eacl.ActionUnknown
|
||||
}
|
||||
|
||||
func actionToEffect(action eacl.Action) string {
|
||||
switch action {
|
||||
case eacl.ActionAllow:
|
||||
return "Allow"
|
||||
case eacl.ActionDeny:
|
||||
return "Deny"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func permissionToOperations(permission AWSACL) []eacl.Operation {
|
||||
switch permission {
|
||||
case aclFullControl:
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
engineiam "git.frostfs.info/TrueCloudLab/policy-engine/iam"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -1316,42 +1317,26 @@ func TestBucketPolicy(t *testing.T) {
|
|||
hc := prepareHandlerContext(t)
|
||||
bktName := "bucket-for-policy"
|
||||
|
||||
box, key := createAccessBox(t)
|
||||
createBucket(t, hc, bktName, box)
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
bktPolicy := getBucketPolicy(hc, bktName)
|
||||
for _, st := range bktPolicy.Statement {
|
||||
if st.Effect == "Allow" {
|
||||
require.Equal(t, hex.EncodeToString(key.PublicKey().Bytes()), st.Principal.CanonicalUser)
|
||||
require.Equal(t, []string{arnAwsPrefix + bktName}, st.Resource)
|
||||
} else {
|
||||
require.Equal(t, allUsersWildcard, st.Principal.AWS)
|
||||
require.Equal(t, "Deny", st.Effect)
|
||||
require.Equal(t, []string{arnAwsPrefix + bktName}, st.Resource)
|
||||
}
|
||||
}
|
||||
getBucketPolicy(hc, bktName, s3errors.ErrNoSuchBucketPolicy)
|
||||
|
||||
newPolicy := &bucketPolicy{
|
||||
Statement: []statement{{
|
||||
Effect: "Allow",
|
||||
Principal: principal{AWS: allUsersWildcard},
|
||||
Action: []string{s3GetObject},
|
||||
Resource: []string{arnAwsPrefix + "dummy"},
|
||||
newPolicy := engineiam.Policy{
|
||||
Statement: []engineiam.Statement{{
|
||||
Principal: map[engineiam.PrincipalType][]string{engineiam.Wildcard: {}},
|
||||
Effect: engineiam.DenyEffect,
|
||||
Action: engineiam.Action{"s3:PutObject"},
|
||||
Resource: engineiam.Resource{"arn:aws:s3:::test/*"},
|
||||
}},
|
||||
}
|
||||
|
||||
putBucketPolicy(hc, bktName, newPolicy, box, http.StatusInternalServerError)
|
||||
putBucketPolicy(hc, bktName, newPolicy, s3errors.ErrMalformedPolicy)
|
||||
|
||||
newPolicy.Statement[0].Resource[0] = arnAwsPrefix + bktName
|
||||
putBucketPolicy(hc, bktName, newPolicy, box, http.StatusOK)
|
||||
newPolicy.Statement[0].Resource[0] = arnAwsPrefix + bktName + "/*"
|
||||
putBucketPolicy(hc, bktName, newPolicy)
|
||||
|
||||
bktPolicy = getBucketPolicy(hc, bktName)
|
||||
for _, st := range bktPolicy.Statement {
|
||||
if st.Effect == "Allow" && st.Principal.AWS == allUsersWildcard {
|
||||
require.Equal(t, []string{arnAwsPrefix + bktName}, st.Resource)
|
||||
require.ElementsMatch(t, []string{s3GetObject, s3ListBucket}, st.Action)
|
||||
}
|
||||
}
|
||||
bktPolicy := getBucketPolicy(hc, bktName)
|
||||
require.Equal(t, newPolicy, bktPolicy)
|
||||
}
|
||||
|
||||
func TestBucketPolicyUnmarshal(t *testing.T) {
|
||||
|
@ -1411,9 +1396,7 @@ func TestPutBucketPolicy(t *testing.T) {
|
|||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Principal": {
|
||||
"AWS": "*"
|
||||
},
|
||||
"Principal": "*",
|
||||
"Effect": "Deny",
|
||||
"Action": "s3:GetObject",
|
||||
"Resource": "arn:aws:s3:::bucket-for-policy/*"
|
||||
|
@ -1423,36 +1406,41 @@ func TestPutBucketPolicy(t *testing.T) {
|
|||
hc := prepareHandlerContext(t)
|
||||
bktName := "bucket-for-policy"
|
||||
|
||||
box, _ := createAccessBox(t)
|
||||
createBucket(t, hc, bktName, box)
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, "", bytes.NewReader([]byte(bktPolicy)))
|
||||
ctx := middleware.SetBoxData(r.Context(), box)
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().PutBucketPolicyHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func getBucketPolicy(hc *handlerContext, bktName string) *bucketPolicy {
|
||||
func getBucketPolicy(hc *handlerContext, bktName string, errCode ...s3errors.ErrorCode) engineiam.Policy {
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
hc.Handler().GetBucketPolicyHandler(w, r)
|
||||
|
||||
var policy engineiam.Policy
|
||||
if len(errCode) == 0 {
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
policy := &bucketPolicy{}
|
||||
err := json.NewDecoder(w.Result().Body).Decode(policy)
|
||||
err := json.NewDecoder(w.Result().Body).Decode(&policy)
|
||||
require.NoError(hc.t, err)
|
||||
} else {
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(errCode[0]))
|
||||
}
|
||||
|
||||
return policy
|
||||
}
|
||||
|
||||
func putBucketPolicy(hc *handlerContext, bktName string, bktPolicy *bucketPolicy, box *accessbox.Box, status int) {
|
||||
func putBucketPolicy(hc *handlerContext, bktName string, bktPolicy engineiam.Policy, errCode ...s3errors.ErrorCode) {
|
||||
body, err := json.Marshal(bktPolicy)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, "", bytes.NewReader(body))
|
||||
ctx := middleware.SetBoxData(r.Context(), box)
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().PutBucketPolicyHandler(w, r)
|
||||
assertStatus(hc.t, w, status)
|
||||
|
||||
if len(errCode) == 0 {
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
} else {
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(errCode[0]))
|
||||
}
|
||||
}
|
||||
|
||||
func checkLastRecords(t *testing.T, tc *handlerContext, bktInfo *data.BucketInfo, action eacl.Action) {
|
||||
|
|
|
@ -12,7 +12,10 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -22,6 +25,8 @@ type (
|
|||
obj layer.Client
|
||||
notificator Notificator
|
||||
cfg Config
|
||||
ape APE
|
||||
frostfsid FrostFSID
|
||||
}
|
||||
|
||||
Notificator interface {
|
||||
|
@ -31,10 +36,10 @@ type (
|
|||
|
||||
// Config contains data which handler needs to keep.
|
||||
Config interface {
|
||||
DefaultPlacementPolicy() netmap.PlacementPolicy
|
||||
PlacementPolicy(string) (netmap.PlacementPolicy, bool)
|
||||
CopiesNumbers(string) ([]uint32, bool)
|
||||
DefaultCopiesNumbers() []uint32
|
||||
DefaultPlacementPolicy(namespace string) netmap.PlacementPolicy
|
||||
PlacementPolicy(namespace, constraint string) (netmap.PlacementPolicy, bool)
|
||||
CopiesNumbers(namespace, constraint string) ([]uint32, bool)
|
||||
DefaultCopiesNumbers(namespace string) []uint32
|
||||
NewXMLDecoder(io.Reader) *xml.Decoder
|
||||
DefaultMaxAge() int
|
||||
NotificatorEnabled() bool
|
||||
|
@ -42,18 +47,52 @@ type (
|
|||
IsResolveListAllow() bool
|
||||
BypassContentEncodingInChunks() bool
|
||||
MD5Enabled() bool
|
||||
ResolveNamespaceAlias(namespace string) string
|
||||
}
|
||||
|
||||
FrostFSID interface {
|
||||
GetUserAddress(account, user string) (string, error)
|
||||
}
|
||||
|
||||
// APE is Access Policy Engine that needs to save policy and acl info to different places.
|
||||
APE interface {
|
||||
MorphRuleChainStorage
|
||||
PolicyStorage
|
||||
}
|
||||
|
||||
// MorphRuleChainStorage is a similar to engine.MorphRuleChainStorage
|
||||
// but doesn't know anything about tx.
|
||||
MorphRuleChainStorage interface {
|
||||
AddChain(target engine.Target, c *chain.Chain) error
|
||||
RemoveChain(target engine.Target, chainID chain.ID) error
|
||||
ListChains(target engine.Target) ([]*chain.Chain, error)
|
||||
}
|
||||
|
||||
// PolicyStorage is interface to save intact initial user provided policy.
|
||||
PolicyStorage interface {
|
||||
PutPolicy(namespace string, cnrID cid.ID, policy []byte) error
|
||||
GetPolicy(namespace string, cnrID cid.ID) ([]byte, error)
|
||||
DeletePolicy(namespace string, cnrID cid.ID) error
|
||||
}
|
||||
|
||||
frostfsIDDisabled struct{}
|
||||
)
|
||||
|
||||
var _ api.Handler = (*handler)(nil)
|
||||
|
||||
// New creates new api.Handler using given logger and client.
|
||||
func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg Config) (api.Handler, error) {
|
||||
func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg Config, storage APE, ffsid FrostFSID) (api.Handler, error) {
|
||||
switch {
|
||||
case obj == nil:
|
||||
return nil, errors.New("empty FrostFS Object Layer")
|
||||
case log == nil:
|
||||
return nil, errors.New("empty logger")
|
||||
case storage == nil:
|
||||
return nil, errors.New("empty policy storage")
|
||||
}
|
||||
|
||||
if ffsid == nil {
|
||||
ffsid = frostfsIDDisabled{}
|
||||
}
|
||||
|
||||
if !cfg.NotificatorEnabled() {
|
||||
|
@ -66,15 +105,21 @@ func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg Config)
|
|||
log: log,
|
||||
obj: obj,
|
||||
cfg: cfg,
|
||||
ape: storage,
|
||||
notificator: notificator,
|
||||
frostfsid: ffsid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f frostfsIDDisabled) GetUserAddress(_, _ string) (string, error) {
|
||||
return "", errors.New("frostfsid disabled")
|
||||
}
|
||||
|
||||
// pickCopiesNumbers chooses the return values following this logic:
|
||||
// 1) array of copies numbers sent in request's header has the highest priority.
|
||||
// 2) array of copies numbers with corresponding location constraint provided in the config file.
|
||||
// 3) default copies number from the config file wrapped into array.
|
||||
func (h *handler) pickCopiesNumbers(metadata map[string]string, locationConstraint string) ([]uint32, error) {
|
||||
func (h *handler) pickCopiesNumbers(metadata map[string]string, namespace, locationConstraint string) ([]uint32, error) {
|
||||
copiesNumbersStr, ok := metadata[layer.AttributeFrostfsCopiesNumber]
|
||||
if ok {
|
||||
result, err := parseCopiesNumbers(copiesNumbersStr)
|
||||
|
@ -84,12 +129,12 @@ func (h *handler) pickCopiesNumbers(metadata map[string]string, locationConstrai
|
|||
return result, nil
|
||||
}
|
||||
|
||||
copiesNumbers, ok := h.cfg.CopiesNumbers(locationConstraint)
|
||||
copiesNumbers, ok := h.cfg.CopiesNumbers(namespace, locationConstraint)
|
||||
if ok {
|
||||
return copiesNumbers, nil
|
||||
}
|
||||
|
||||
return h.cfg.DefaultCopiesNumbers(), nil
|
||||
return h.cfg.DefaultCopiesNumbers(namespace), nil
|
||||
}
|
||||
|
||||
func parseCopiesNumbers(copiesNumbersStr string) ([]uint32, error) {
|
||||
|
|
|
@ -26,7 +26,7 @@ func TestCopiesNumberPicker(t *testing.T) {
|
|||
metadata["somekey1"] = "5, 6, 7"
|
||||
expectedCopiesNumbers := []uint32{1}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
@ -35,7 +35,7 @@ func TestCopiesNumberPicker(t *testing.T) {
|
|||
metadata["somekey2"] = "6, 7, 8"
|
||||
expectedCopiesNumbers := []uint32{2, 3, 4}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint1)
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
@ -44,7 +44,7 @@ func TestCopiesNumberPicker(t *testing.T) {
|
|||
metadata["frostfs-copies-number"] = "7, 8, 9"
|
||||
expectedCopiesNumbers := []uint32{7, 8, 9}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
@ -53,7 +53,7 @@ func TestCopiesNumberPicker(t *testing.T) {
|
|||
metadata["frostfs-copies-number"] = "7,8,9"
|
||||
expectedCopiesNumbers := []uint32{7, 8, 9}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
@ -62,7 +62,7 @@ func TestCopiesNumberPicker(t *testing.T) {
|
|||
metadata["frostfs-copies-number"] = "11, 12, 13, "
|
||||
expectedCopiesNumbers := []uint32{11, 12, 13}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
@ -106,7 +108,7 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
|
||||
if err = checkPreconditions(info, params.Conditional); err != nil {
|
||||
if err = checkPreconditions(info, params.Conditional, h.cfg.MD5Enabled()); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -117,7 +119,7 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
|
||||
response, err := encodeToObjectAttributesResponse(info, params)
|
||||
response, err := encodeToObjectAttributesResponse(info, params, h.cfg.MD5Enabled())
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't encode object info to response", reqInfo, err)
|
||||
return
|
||||
|
@ -135,7 +137,7 @@ func writeAttributesHeaders(h http.Header, info *data.ExtendedObjectInfo, isBuck
|
|||
h.Set(api.AmzVersionID, info.Version())
|
||||
}
|
||||
|
||||
if info.NodeVersion.IsDeleteMarker() {
|
||||
if info.NodeVersion.IsDeleteMarker {
|
||||
h.Set(api.AmzDeleteMarker, strconv.FormatBool(true))
|
||||
}
|
||||
|
||||
|
@ -179,19 +181,23 @@ func parseGetObjectAttributeArgs(r *http.Request) (*GetObjectAttributesArgs, err
|
|||
return res, err
|
||||
}
|
||||
|
||||
func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttributesArgs) (*GetObjectAttributesResponse, error) {
|
||||
func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttributesArgs, md5Enabled bool) (*GetObjectAttributesResponse, error) {
|
||||
resp := &GetObjectAttributesResponse{}
|
||||
|
||||
for _, attr := range p.Attributes {
|
||||
switch attr {
|
||||
case eTag:
|
||||
resp.ETag = info.HashSum
|
||||
resp.ETag = data.Quote(info.ETag(md5Enabled))
|
||||
case storageClass:
|
||||
resp.StorageClass = api.DefaultStorageClass
|
||||
case objectSize:
|
||||
resp.ObjectSize = info.Size
|
||||
case checksum:
|
||||
resp.Checksum = &Checksum{ChecksumSHA256: info.HashSum}
|
||||
checksumBytes, err := hex.DecodeString(info.HashSum)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("form upload attributes: %w", err)
|
||||
}
|
||||
resp.Checksum = &Checksum{ChecksumSHA256: base64.StdEncoding.EncodeToString(checksumBytes)}
|
||||
case objectParts:
|
||||
parts, err := formUploadAttributes(info, p.MaxParts, p.PartNumberMarker)
|
||||
if err != nil {
|
||||
|
@ -219,10 +225,15 @@ func formUploadAttributes(info *data.ObjectInfo, maxParts, marker int) (*ObjectP
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid completed part: %w", err)
|
||||
}
|
||||
// ETag value contains SHA256 checksum.
|
||||
checksumBytes, err := hex.DecodeString(part.ETag)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid sha256 checksum in completed part: %w", err)
|
||||
}
|
||||
parts[i] = Part{
|
||||
PartNumber: part.PartNumber,
|
||||
Size: int(part.Size),
|
||||
ChecksumSHA256: part.ETag,
|
||||
ChecksumSHA256: base64.StdEncoding.EncodeToString(checksumBytes),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -24,11 +26,13 @@ func TestGetObjectPartsAttributes(t *testing.T) {
|
|||
multipartUpload := createMultipartUpload(hc, bktName, objMultipartName, map[string]string{})
|
||||
etag, _ := uploadPart(hc, bktName, objMultipartName, multipartUpload.UploadID, 1, partSize)
|
||||
completeMultipartUpload(hc, bktName, objMultipartName, multipartUpload.UploadID, []string{etag})
|
||||
etagBytes, err := hex.DecodeString(etag[1 : len(etag)-1])
|
||||
require.NoError(t, err)
|
||||
|
||||
result = getObjectAttributes(hc, bktName, objMultipartName, objectParts)
|
||||
require.NotNil(t, result.ObjectParts)
|
||||
require.Len(t, result.ObjectParts.Parts, 1)
|
||||
require.Equal(t, etag, result.ObjectParts.Parts[0].ChecksumSHA256)
|
||||
require.Equal(t, base64.StdEncoding.EncodeToString(etagBytes), result.ObjectParts.Parts[0].ChecksumSHA256)
|
||||
require.Equal(t, partSize, result.ObjectParts.Parts[0].Size)
|
||||
require.Equal(t, 1, result.ObjectParts.PartsCount)
|
||||
}
|
||||
|
|
|
@ -177,7 +177,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
if err = checkPreconditions(srcObjInfo, args.Conditional); err != nil {
|
||||
if err = checkPreconditions(srcObjInfo, args.Conditional, h.cfg.MD5Enabled()); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed))
|
||||
return
|
||||
}
|
||||
|
@ -204,7 +204,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
DstEncryption: dstEncryptionParams,
|
||||
}
|
||||
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, dstBktInfo.LocationConstraint)
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, dstBktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
|
@ -224,7 +224,10 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
dstObjInfo := extendedDstObjInfo.ObjectInfo
|
||||
|
||||
if err = middleware.EncodeToResponse(w, &CopyObjectResponse{LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339), ETag: dstObjInfo.HashSum}); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, &CopyObjectResponse{
|
||||
LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339),
|
||||
ETag: data.Quote(dstObjInfo.ETag(h.cfg.MD5Enabled())),
|
||||
}); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
@ -268,7 +271,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
s := &SendNotificationParams{
|
||||
Event: EventObjectCreatedCopy,
|
||||
NotificationInfo: data.NotificationInfoFromObject(dstObjInfo),
|
||||
NotificationInfo: data.NotificationInfoFromObject(dstObjInfo, h.cfg.MD5Enabled()),
|
||||
BktInfo: dstBktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
|
@ -311,8 +314,8 @@ func isCopyingToItselfForbidden(reqInfo *middleware.ReqInfo, srcBucket string, s
|
|||
func parseCopyObjectArgs(headers http.Header) (*copyObjectArgs, error) {
|
||||
var err error
|
||||
args := &conditionalArgs{
|
||||
IfMatch: headers.Get(api.AmzCopyIfMatch),
|
||||
IfNoneMatch: headers.Get(api.AmzCopyIfNoneMatch),
|
||||
IfMatch: data.UnQuote(headers.Get(api.AmzCopyIfMatch)),
|
||||
IfNoneMatch: data.UnQuote(headers.Get(api.AmzCopyIfNoneMatch)),
|
||||
}
|
||||
|
||||
if args.IfModifiedSince, err = parseHTTPTime(headers.Get(api.AmzCopyIfModifiedSince)); err != nil {
|
||||
|
|
|
@ -55,7 +55,7 @@ func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
NewDecoder: h.cfg.NewXMLDecoder,
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
|
|
|
@ -243,6 +243,7 @@ func TestDeleteMarkerVersioned(t *testing.T) {
|
|||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
require.True(t, isDeleteMarker)
|
||||
versions := listVersions(t, tc, bktName)
|
||||
require.Len(t, versions.DeleteMarker, 1)
|
||||
require.Equal(t, deleteMarkerVersion, versions.DeleteMarker[0].VersionID)
|
||||
|
||||
_, isDeleteMarker = deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
|
@ -433,17 +434,20 @@ func createBucketAndObject(tc *handlerContext, bktName, objName string) (*data.B
|
|||
return bktInfo, objInfo
|
||||
}
|
||||
|
||||
func createVersionedBucketAndObject(t *testing.T, tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
|
||||
createTestBucket(tc, bktName)
|
||||
bktInfo, err := tc.Layer().GetBucketInfo(tc.Context(), bktName)
|
||||
require.NoError(t, err)
|
||||
putBucketVersioning(t, tc, bktName, true)
|
||||
|
||||
func createVersionedBucketAndObject(_ *testing.T, tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
|
||||
bktInfo := createVersionedBucket(tc, bktName)
|
||||
objInfo := createTestObject(tc, bktInfo, objName, encryption.Params{})
|
||||
|
||||
return bktInfo, objInfo
|
||||
}
|
||||
|
||||
func createVersionedBucket(hc *handlerContext, bktName string) *data.BucketInfo {
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
putBucketVersioning(hc.t, hc, bktName, true)
|
||||
|
||||
return bktInfo
|
||||
}
|
||||
|
||||
func putBucketVersioning(t *testing.T, tc *handlerContext, bktName string, enabled bool) {
|
||||
cfg := &VersioningConfiguration{Status: "Suspended"}
|
||||
if enabled {
|
||||
|
|
|
@ -95,7 +95,7 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E
|
|||
h.Set(api.ContentLength, strconv.FormatUint(info.Size, 10))
|
||||
}
|
||||
|
||||
h.Set(api.ETag, info.ETag(md5Enabled))
|
||||
h.Set(api.ETag, data.Quote(info.ETag(md5Enabled)))
|
||||
|
||||
h.Set(api.AmzTaggingCount, strconv.Itoa(tagSetLength))
|
||||
h.Set(api.AmzStorageClass, api.DefaultStorageClass)
|
||||
|
@ -157,7 +157,7 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
info := extendedInfo.ObjectInfo
|
||||
|
||||
if err = checkPreconditions(info, conditional); err != nil {
|
||||
if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -238,12 +238,13 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
func checkPreconditions(info *data.ObjectInfo, args *conditionalArgs) error {
|
||||
if len(args.IfMatch) > 0 && args.IfMatch != info.HashSum {
|
||||
return fmt.Errorf("%w: etag mismatched: '%s', '%s'", errors.GetAPIError(errors.ErrPreconditionFailed), args.IfMatch, info.HashSum)
|
||||
func checkPreconditions(info *data.ObjectInfo, args *conditionalArgs, md5Enabled bool) error {
|
||||
etag := info.ETag(md5Enabled)
|
||||
if len(args.IfMatch) > 0 && args.IfMatch != etag {
|
||||
return fmt.Errorf("%w: etag mismatched: '%s', '%s'", errors.GetAPIError(errors.ErrPreconditionFailed), args.IfMatch, etag)
|
||||
}
|
||||
if len(args.IfNoneMatch) > 0 && args.IfNoneMatch == info.HashSum {
|
||||
return fmt.Errorf("%w: etag matched: '%s', '%s'", errors.GetAPIError(errors.ErrNotModified), args.IfNoneMatch, info.HashSum)
|
||||
if len(args.IfNoneMatch) > 0 && args.IfNoneMatch == etag {
|
||||
return fmt.Errorf("%w: etag matched: '%s', '%s'", errors.GetAPIError(errors.ErrNotModified), args.IfNoneMatch, etag)
|
||||
}
|
||||
if args.IfModifiedSince != nil && info.Created.Before(*args.IfModifiedSince) {
|
||||
return fmt.Errorf("%w: not modified since '%s', last modified '%s'", errors.GetAPIError(errors.ErrNotModified),
|
||||
|
@ -267,8 +268,8 @@ func checkPreconditions(info *data.ObjectInfo, args *conditionalArgs) error {
|
|||
func parseConditionalHeaders(headers http.Header) (*conditionalArgs, error) {
|
||||
var err error
|
||||
args := &conditionalArgs{
|
||||
IfMatch: strings.Trim(headers.Get(api.IfMatch), "\""),
|
||||
IfNoneMatch: strings.Trim(headers.Get(api.IfNoneMatch), "\""),
|
||||
IfMatch: data.UnQuote(headers.Get(api.IfMatch)),
|
||||
IfNoneMatch: data.UnQuote(headers.Get(api.IfNoneMatch)),
|
||||
}
|
||||
|
||||
if args.IfModifiedSince, err = parseHTTPTime(headers.Get(api.IfModifiedSince)); err != nil {
|
||||
|
|
|
@ -147,7 +147,7 @@ func TestPreconditions(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual := checkPreconditions(tc.info, tc.args)
|
||||
actual := checkPreconditions(tc.info, tc.args, false)
|
||||
if tc.expected == nil {
|
||||
require.NoError(t, actual)
|
||||
} else {
|
||||
|
@ -203,18 +203,19 @@ func TestGetObjectEnabledMD5(t *testing.T) {
|
|||
_, objInfo := createBucketAndObject(hc, bktName, objName)
|
||||
|
||||
_, headers := getObject(hc, bktName, objName)
|
||||
require.Equal(t, objInfo.HashSum, headers.Get(api.ETag))
|
||||
require.Equal(t, data.Quote(objInfo.HashSum), headers.Get(api.ETag))
|
||||
|
||||
hc.config.md5Enabled = true
|
||||
_, headers = getObject(hc, bktName, objName)
|
||||
require.Equal(t, objInfo.MD5Sum, headers.Get(api.ETag))
|
||||
require.Equal(t, data.Quote(objInfo.MD5Sum), headers.Get(api.ETag))
|
||||
}
|
||||
|
||||
func putObjectContent(hc *handlerContext, bktName, objName, content string) {
|
||||
func putObjectContent(hc *handlerContext, bktName, objName, content string) http.Header {
|
||||
body := bytes.NewReader([]byte(content))
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
|
||||
hc.Handler().PutObjectHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
return w.Result().Header
|
||||
}
|
||||
|
||||
func getObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, start, end int) []byte {
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
@ -26,9 +27,12 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type handlerContext struct {
|
||||
|
@ -41,6 +45,8 @@ type handlerContext struct {
|
|||
config *configMock
|
||||
|
||||
layerFeatures *layer.FeatureSettingsMock
|
||||
treeMock *tree.ServiceClientMemory
|
||||
cache *layer.Cache
|
||||
}
|
||||
|
||||
func (hc *handlerContext) Handler() *handler {
|
||||
|
@ -67,20 +73,20 @@ type configMock struct {
|
|||
md5Enabled bool
|
||||
}
|
||||
|
||||
func (c *configMock) DefaultPlacementPolicy() netmap.PlacementPolicy {
|
||||
func (c *configMock) DefaultPlacementPolicy(_ string) netmap.PlacementPolicy {
|
||||
return c.defaultPolicy
|
||||
}
|
||||
|
||||
func (c *configMock) PlacementPolicy(string) (netmap.PlacementPolicy, bool) {
|
||||
func (c *configMock) PlacementPolicy(_, _ string) (netmap.PlacementPolicy, bool) {
|
||||
return netmap.PlacementPolicy{}, false
|
||||
}
|
||||
|
||||
func (c *configMock) CopiesNumbers(locationConstraint string) ([]uint32, bool) {
|
||||
func (c *configMock) CopiesNumbers(_, locationConstraint string) ([]uint32, bool) {
|
||||
result, ok := c.copiesNumbers[locationConstraint]
|
||||
return result, ok
|
||||
}
|
||||
|
||||
func (c *configMock) DefaultCopiesNumbers() []uint32 {
|
||||
func (c *configMock) DefaultCopiesNumbers(_ string) []uint32 {
|
||||
return c.defaultCopiesNumbers
|
||||
}
|
||||
|
||||
|
@ -116,15 +122,19 @@ func (c *configMock) MD5Enabled() bool {
|
|||
return c.md5Enabled
|
||||
}
|
||||
|
||||
func (c *configMock) ResolveNamespaceAlias(ns string) string {
|
||||
return ns
|
||||
}
|
||||
|
||||
func prepareHandlerContext(t *testing.T) *handlerContext {
|
||||
return prepareHandlerContextBase(t, false)
|
||||
return prepareHandlerContextBase(t, layer.DefaultCachesConfigs(zap.NewExample()))
|
||||
}
|
||||
|
||||
func prepareHandlerContextWithMinCache(t *testing.T) *handlerContext {
|
||||
return prepareHandlerContextBase(t, true)
|
||||
return prepareHandlerContextBase(t, getMinCacheConfig(zap.NewExample()))
|
||||
}
|
||||
|
||||
func prepareHandlerContextBase(t *testing.T, minCache bool) *handlerContext {
|
||||
func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig) *handlerContext {
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -139,21 +149,20 @@ func prepareHandlerContextBase(t *testing.T, minCache bool) *handlerContext {
|
|||
var owner user.ID
|
||||
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
||||
|
||||
treeMock := NewTreeServiceMock(t)
|
||||
memCli, err := tree.NewTreeServiceClientMemory()
|
||||
require.NoError(t, err)
|
||||
|
||||
cacheCfg := layer.DefaultCachesConfigs(l)
|
||||
if minCache {
|
||||
cacheCfg = getMinCacheConfig(l)
|
||||
}
|
||||
treeMock := tree.NewTree(memCli, zap.NewExample())
|
||||
|
||||
features := &layer.FeatureSettingsMock{}
|
||||
|
||||
layerCfg := &layer.Config{
|
||||
Caches: cacheCfg,
|
||||
Cache: layer.NewCache(cacheCfg),
|
||||
AnonKey: layer.AnonymousKey{Key: key},
|
||||
Resolver: testResolver,
|
||||
TreeService: treeMock,
|
||||
Features: features,
|
||||
GateOwner: owner,
|
||||
}
|
||||
|
||||
var pp netmap.PlacementPolicy
|
||||
|
@ -167,6 +176,7 @@ func prepareHandlerContextBase(t *testing.T, minCache bool) *handlerContext {
|
|||
log: l,
|
||||
obj: layer.NewLayer(l, tp, layerCfg),
|
||||
cfg: cfg,
|
||||
ape: newAPEMock(),
|
||||
}
|
||||
|
||||
return &handlerContext{
|
||||
|
@ -179,6 +189,8 @@ func prepareHandlerContextBase(t *testing.T, minCache bool) *handlerContext {
|
|||
config: cfg,
|
||||
|
||||
layerFeatures: features,
|
||||
treeMock: memCli,
|
||||
cache: layerCfg.Cache,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -192,6 +204,7 @@ func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
|
|||
Logger: logger,
|
||||
Objects: minCacheCfg,
|
||||
ObjectsList: minCacheCfg,
|
||||
SessionList: minCacheCfg,
|
||||
Names: minCacheCfg,
|
||||
Buckets: minCacheCfg,
|
||||
System: minCacheCfg,
|
||||
|
@ -199,10 +212,58 @@ func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
|
|||
}
|
||||
}
|
||||
|
||||
func NewTreeServiceMock(t *testing.T) *tree.Tree {
|
||||
memCli, err := tree.NewTreeServiceClientMemory()
|
||||
require.NoError(t, err)
|
||||
return tree.NewTree(memCli, zap.NewExample())
|
||||
type apeMock struct {
|
||||
chainMap map[engine.Target][]*chain.Chain
|
||||
policyMap map[string][]byte
|
||||
}
|
||||
|
||||
func newAPEMock() *apeMock {
|
||||
return &apeMock{
|
||||
chainMap: map[engine.Target][]*chain.Chain{},
|
||||
policyMap: map[string][]byte{},
|
||||
}
|
||||
}
|
||||
|
||||
func (a *apeMock) AddChain(target engine.Target, c *chain.Chain) error {
|
||||
list := a.chainMap[target]
|
||||
|
||||
ind := slices.IndexFunc(list, func(item *chain.Chain) bool { return bytes.Equal(item.ID, c.ID) })
|
||||
if ind != -1 {
|
||||
list[ind] = c
|
||||
} else {
|
||||
list = append(list, c)
|
||||
}
|
||||
|
||||
a.chainMap[target] = list
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *apeMock) RemoveChain(target engine.Target, chainID chain.ID) error {
|
||||
a.chainMap[target] = slices.DeleteFunc(a.chainMap[target], func(item *chain.Chain) bool { return bytes.Equal(item.ID, chainID) })
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *apeMock) ListChains(target engine.Target) ([]*chain.Chain, error) {
|
||||
return a.chainMap[target], nil
|
||||
}
|
||||
|
||||
func (a *apeMock) PutPolicy(namespace string, cnrID cid.ID, policy []byte) error {
|
||||
a.policyMap[namespace+cnrID.EncodeToString()] = policy
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *apeMock) GetPolicy(namespace string, cnrID cid.ID) ([]byte, error) {
|
||||
policy, ok := a.policyMap[namespace+cnrID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, errors.New("not found")
|
||||
}
|
||||
|
||||
return policy, nil
|
||||
}
|
||||
|
||||
func (a *apeMock) DeletePolicy(namespace string, cnrID cid.ID) error {
|
||||
delete(a.policyMap, namespace+cnrID.EncodeToString())
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTestBucket(hc *handlerContext, bktName string) *data.BucketInfo {
|
||||
|
|
|
@ -65,7 +65,7 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = checkPreconditions(info, conditional); err != nil {
|
||||
if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -145,7 +145,7 @@ func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
|||
},
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
|
@ -229,7 +229,7 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
|||
NewLock: lock,
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
|
|
|
@ -157,7 +157,7 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
p.Header[api.ContentLanguage] = contentLanguage
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(p.Header, bktInfo.LocationConstraint)
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(p.Header, reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err, additional...)
|
||||
return
|
||||
|
@ -246,6 +246,7 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
Size: size,
|
||||
Reader: body,
|
||||
ContentMD5: r.Header.Get(api.ContentMD5),
|
||||
ContentSHA256Hash: r.Header.Get(api.AmzContentSha256),
|
||||
}
|
||||
|
||||
p.Info.Encryption, err = formEncryptionParams(r)
|
||||
|
@ -264,7 +265,7 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
addSSECHeaders(w.Header(), r.Header)
|
||||
}
|
||||
|
||||
w.Header().Set(api.ETag, hash)
|
||||
w.Header().Set(api.ETag, data.Quote(hash))
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
|
@ -339,7 +340,7 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = checkPreconditions(srcInfo, args.Conditional); err != nil {
|
||||
if err = checkPreconditions(srcInfo, args.Conditional, h.cfg.MD5Enabled()); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed),
|
||||
additional...)
|
||||
return
|
||||
|
@ -384,7 +385,7 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
response := UploadPartCopyResponse{
|
||||
LastModified: info.Created.UTC().Format(time.RFC3339),
|
||||
ETag: info.ETag(h.cfg.MD5Enabled()),
|
||||
ETag: data.Quote(info.ETag(h.cfg.MD5Enabled())),
|
||||
}
|
||||
|
||||
if p.Info.Encryption.Enabled() {
|
||||
|
@ -449,7 +450,7 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
response := CompleteMultipartUploadResponse{
|
||||
Bucket: objInfo.Bucket,
|
||||
Key: objInfo.Name,
|
||||
ETag: objInfo.ETag(h.cfg.MD5Enabled()),
|
||||
ETag: data.Quote(objInfo.ETag(h.cfg.MD5Enabled())),
|
||||
}
|
||||
|
||||
if settings.VersioningEnabled() {
|
||||
|
@ -513,7 +514,7 @@ func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMult
|
|||
|
||||
s := &SendNotificationParams{
|
||||
Event: EventObjectCreatedCompleteMultipartUpload,
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo),
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.MD5Enabled()),
|
||||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3Errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
|
@ -298,11 +299,11 @@ func TestMultipartUploadEnabledMD5(t *testing.T) {
|
|||
multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||
etag1, partBody1 := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
|
||||
md5Sum1 := md5.Sum(partBody1)
|
||||
require.Equal(t, hex.EncodeToString(md5Sum1[:]), etag1)
|
||||
require.Equal(t, data.Quote(hex.EncodeToString(md5Sum1[:])), etag1)
|
||||
|
||||
etag2, partBody2 := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
|
||||
md5Sum2 := md5.Sum(partBody2)
|
||||
require.Equal(t, hex.EncodeToString(md5Sum2[:]), etag2)
|
||||
require.Equal(t, data.Quote(hex.EncodeToString(md5Sum2[:])), etag2)
|
||||
|
||||
w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
@ -310,7 +311,85 @@ func TestMultipartUploadEnabledMD5(t *testing.T) {
|
|||
err := xml.NewDecoder(w.Result().Body).Decode(resp)
|
||||
require.NoError(t, err)
|
||||
completeMD5Sum := md5.Sum(append(md5Sum1[:], md5Sum2[:]...))
|
||||
require.Equal(t, hex.EncodeToString(completeMD5Sum[:])+"-2", resp.ETag)
|
||||
require.Equal(t, data.Quote(hex.EncodeToString(completeMD5Sum[:])+"-2"), resp.ETag)
|
||||
}
|
||||
|
||||
func TestUploadPartCheckContentSHA256(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-1", "object-1"
|
||||
createTestBucket(hc, bktName)
|
||||
partSize := 5 * 1024 * 1024
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
hash string
|
||||
content []byte
|
||||
error bool
|
||||
}{
|
||||
{
|
||||
name: "invalid hash value",
|
||||
hash: "d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8",
|
||||
content: []byte("content"),
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "correct hash for empty payload",
|
||||
hash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
content: []byte(""),
|
||||
error: false,
|
||||
},
|
||||
{
|
||||
name: "unsigned payload",
|
||||
hash: "UNSIGNED-PAYLOAD",
|
||||
content: []byte("content"),
|
||||
error: false,
|
||||
},
|
||||
{
|
||||
name: "correct hash",
|
||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73",
|
||||
content: []byte("content"),
|
||||
error: false,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||
|
||||
etag1, data1 := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
|
||||
|
||||
query := make(url.Values)
|
||||
query.Set(uploadIDQuery, multipartUpload.UploadID)
|
||||
query.Set(partNumberQuery, strconv.Itoa(2))
|
||||
|
||||
w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, tc.content)
|
||||
r.Header.Set(api.AmzContentSha256, tc.hash)
|
||||
hc.Handler().UploadPartHandler(w, r)
|
||||
if tc.error {
|
||||
assertS3Error(t, w, s3Errors.GetAPIError(s3Errors.ErrContentSHA256Mismatch))
|
||||
|
||||
list := listParts(hc, bktName, objName, multipartUpload.UploadID, "0", http.StatusOK)
|
||||
require.Len(t, list.Parts, 1)
|
||||
|
||||
w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1})
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
data, _ := getObject(hc, bktName, objName)
|
||||
equalDataSlices(t, data1, data)
|
||||
return
|
||||
}
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
list := listParts(hc, bktName, objName, multipartUpload.UploadID, "0", http.StatusOK)
|
||||
require.Len(t, list.Parts, 2)
|
||||
|
||||
etag2 := w.Header().Get(api.ETag)
|
||||
w = completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
data, _ := getObject(hc, bktName, objName)
|
||||
equalDataSlices(t, append(data1, tc.content...), data)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func uploadPartCopy(hc *handlerContext, bktName, objName, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse {
|
||||
|
|
|
@ -7,10 +7,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
)
|
||||
|
||||
func (h *handler) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
}
|
||||
|
|
|
@ -115,7 +115,7 @@ func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Re
|
|||
Configuration: conf,
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
|
|
|
@ -34,12 +34,12 @@ func (h *handler) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, encodeV1(params, list)); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, h.encodeV1(params, list)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeV1(p *layer.ListObjectsParamsV1, list *layer.ListObjectsInfoV1) *ListObjectsV1Response {
|
||||
func (h *handler) encodeV1(p *layer.ListObjectsParamsV1, list *layer.ListObjectsInfoV1) *ListObjectsV1Response {
|
||||
res := &ListObjectsV1Response{
|
||||
Name: p.BktInfo.Name,
|
||||
EncodingType: p.Encode,
|
||||
|
@ -53,7 +53,7 @@ func encodeV1(p *layer.ListObjectsParamsV1, list *layer.ListObjectsInfoV1) *List
|
|||
|
||||
res.CommonPrefixes = fillPrefixes(list.Prefixes, p.Encode)
|
||||
|
||||
res.Contents = fillContentsWithOwner(list.Objects, p.Encode)
|
||||
res.Contents = fillContentsWithOwner(list.Objects, p.Encode, h.cfg.MD5Enabled())
|
||||
|
||||
return res
|
||||
}
|
||||
|
@ -78,12 +78,12 @@ func (h *handler) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, encodeV2(params, list)); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, h.encodeV2(params, list)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeV2(p *layer.ListObjectsParamsV2, list *layer.ListObjectsInfoV2) *ListObjectsV2Response {
|
||||
func (h *handler) encodeV2(p *layer.ListObjectsParamsV2, list *layer.ListObjectsInfoV2) *ListObjectsV2Response {
|
||||
res := &ListObjectsV2Response{
|
||||
Name: p.BktInfo.Name,
|
||||
EncodingType: p.Encode,
|
||||
|
@ -99,7 +99,7 @@ func encodeV2(p *layer.ListObjectsParamsV2, list *layer.ListObjectsInfoV2) *List
|
|||
|
||||
res.CommonPrefixes = fillPrefixes(list.Prefixes, p.Encode)
|
||||
|
||||
res.Contents = fillContents(list.Objects, p.Encode, p.FetchOwner)
|
||||
res.Contents = fillContents(list.Objects, p.Encode, p.FetchOwner, h.cfg.MD5Enabled())
|
||||
|
||||
return res
|
||||
}
|
||||
|
@ -185,29 +185,26 @@ func fillPrefixes(src []string, encode string) []CommonPrefix {
|
|||
return dst
|
||||
}
|
||||
|
||||
func fillContentsWithOwner(src []*data.ObjectInfo, encode string) []Object {
|
||||
return fillContents(src, encode, true)
|
||||
func fillContentsWithOwner(src []*data.ExtendedNodeVersion, encode string, md5Enabled bool) []Object {
|
||||
return fillContents(src, encode, true, md5Enabled)
|
||||
}
|
||||
|
||||
func fillContents(src []*data.ObjectInfo, encode string, fetchOwner bool) []Object {
|
||||
func fillContents(src []*data.ExtendedNodeVersion, encode string, fetchOwner, md5Enabled bool) []Object {
|
||||
var dst []Object
|
||||
for _, obj := range src {
|
||||
res := Object{
|
||||
Key: s3PathEncode(obj.Name, encode),
|
||||
Size: obj.Size,
|
||||
LastModified: obj.Created.UTC().Format(time.RFC3339),
|
||||
ETag: obj.HashSum,
|
||||
Key: s3PathEncode(obj.NodeVersion.FilePath, encode),
|
||||
Size: obj.NodeVersion.Size,
|
||||
LastModified: obj.NodeVersion.Created.UTC().Format(time.RFC3339),
|
||||
ETag: data.Quote(obj.NodeVersion.GetETag(md5Enabled)),
|
||||
StorageClass: api.DefaultStorageClass,
|
||||
}
|
||||
|
||||
if size, err := layer.GetObjectSize(obj); err == nil {
|
||||
res.Size = size
|
||||
}
|
||||
|
||||
if fetchOwner {
|
||||
owner := obj.NodeVersion.Owner.String()
|
||||
res.Owner = &Owner{
|
||||
ID: obj.Owner.String(),
|
||||
DisplayName: obj.Owner.String(),
|
||||
ID: owner,
|
||||
DisplayName: owner,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -284,15 +281,15 @@ func encodeListObjectVersionsToResponse(info *layer.ListObjectVersionsInfo, buck
|
|||
for _, ver := range info.Version {
|
||||
res.Version = append(res.Version, ObjectVersionResponse{
|
||||
IsLatest: ver.IsLatest,
|
||||
Key: ver.ObjectInfo.Name,
|
||||
LastModified: ver.ObjectInfo.Created.UTC().Format(time.RFC3339),
|
||||
Key: ver.NodeVersion.FilePath,
|
||||
LastModified: ver.NodeVersion.Created.UTC().Format(time.RFC3339),
|
||||
Owner: Owner{
|
||||
ID: ver.ObjectInfo.Owner.String(),
|
||||
DisplayName: ver.ObjectInfo.Owner.String(),
|
||||
ID: ver.NodeVersion.Owner.String(),
|
||||
DisplayName: ver.NodeVersion.Owner.String(),
|
||||
},
|
||||
Size: ver.ObjectInfo.Size,
|
||||
Size: ver.NodeVersion.Size,
|
||||
VersionID: ver.Version(),
|
||||
ETag: ver.ObjectInfo.ETag(md5Enabled),
|
||||
ETag: data.Quote(ver.NodeVersion.GetETag(md5Enabled)),
|
||||
StorageClass: api.DefaultStorageClass,
|
||||
})
|
||||
}
|
||||
|
@ -300,11 +297,11 @@ func encodeListObjectVersionsToResponse(info *layer.ListObjectVersionsInfo, buck
|
|||
for _, del := range info.DeleteMarker {
|
||||
res.DeleteMarker = append(res.DeleteMarker, DeleteMarkerEntry{
|
||||
IsLatest: del.IsLatest,
|
||||
Key: del.ObjectInfo.Name,
|
||||
LastModified: del.ObjectInfo.Created.UTC().Format(time.RFC3339),
|
||||
Key: del.NodeVersion.FilePath,
|
||||
LastModified: del.NodeVersion.Created.UTC().Format(time.RFC3339),
|
||||
Owner: Owner{
|
||||
ID: del.ObjectInfo.Owner.String(),
|
||||
DisplayName: del.ObjectInfo.Owner.String(),
|
||||
ID: del.NodeVersion.Owner.String(),
|
||||
DisplayName: del.NodeVersion.Owner.String(),
|
||||
},
|
||||
VersionID: del.Version(),
|
||||
})
|
||||
|
|
|
@ -1,15 +1,22 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
func TestParseContinuationToken(t *testing.T) {
|
||||
|
@ -58,13 +65,164 @@ func TestListObjectNullVersions(t *testing.T) {
|
|||
require.Equal(t, data.UnversionedObjectVersionID, result.Version[1].VersionID)
|
||||
}
|
||||
|
||||
func TestListObjectsPaging(t *testing.T) {
|
||||
func TestListObjectsWithOldTreeNodes(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-versioning-enabled", "object"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
srcEnc, err := encryption.NewParams([]byte("1234567890qwertyuiopasdfghjklzxc"))
|
||||
require.NoError(t, err)
|
||||
|
||||
n := 10
|
||||
objInfos := make([]*data.ObjectInfo, n)
|
||||
for i := 0; i < n; i++ {
|
||||
objInfos[i] = createTestObject(hc, bktInfo, objName+strconv.Itoa(i), *srcEnc)
|
||||
}
|
||||
sort.Slice(objInfos, func(i, j int) bool { return objInfos[i].Name < objInfos[j].Name })
|
||||
|
||||
makeAllTreeObjectsOld(hc, bktInfo)
|
||||
|
||||
listV1 := listObjectsV1(hc, bktName, "", "", "", -1)
|
||||
checkListOldNodes(hc, listV1.Contents, objInfos)
|
||||
|
||||
listV2 := listObjectsV2(hc, bktName, "", "", "", "", -1)
|
||||
checkListOldNodes(hc, listV2.Contents, objInfos)
|
||||
|
||||
listVers := listObjectsVersions(hc, bktName, "", "", "", "", -1)
|
||||
checkListVersionsOldNodes(hc, listVers.Version, objInfos)
|
||||
}
|
||||
|
||||
func makeAllTreeObjectsOld(hc *handlerContext, bktInfo *data.BucketInfo) {
|
||||
nodes, err := hc.treeMock.GetSubTree(hc.Context(), bktInfo, "version", 0, 0)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
for _, node := range nodes {
|
||||
if node.GetNodeID() == 0 {
|
||||
continue
|
||||
}
|
||||
meta := make(map[string]string, len(node.GetMeta()))
|
||||
for _, m := range node.GetMeta() {
|
||||
if m.GetKey() != "Created" && m.GetKey() != "Owner" {
|
||||
meta[m.GetKey()] = string(m.GetValue())
|
||||
}
|
||||
}
|
||||
|
||||
err = hc.treeMock.MoveNode(hc.Context(), bktInfo, "version", node.GetNodeID(), node.GetParentID(), meta)
|
||||
require.NoError(hc.t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func checkListOldNodes(hc *handlerContext, list []Object, objInfos []*data.ObjectInfo) {
|
||||
require.Len(hc.t, list, len(objInfos))
|
||||
for i := range list {
|
||||
require.Equal(hc.t, objInfos[i].Name, list[i].Key)
|
||||
realSize, err := layer.GetObjectSize(objInfos[i])
|
||||
require.NoError(hc.t, err)
|
||||
require.Equal(hc.t, objInfos[i].Owner.EncodeToString(), list[i].Owner.ID)
|
||||
require.Equal(hc.t, objInfos[i].Created.UTC().Format(time.RFC3339), list[i].LastModified)
|
||||
require.Equal(hc.t, realSize, list[i].Size)
|
||||
}
|
||||
}
|
||||
|
||||
func checkListVersionsOldNodes(hc *handlerContext, list []ObjectVersionResponse, objInfos []*data.ObjectInfo) {
|
||||
require.Len(hc.t, list, len(objInfos))
|
||||
for i := range list {
|
||||
require.Equal(hc.t, objInfos[i].Name, list[i].Key)
|
||||
realSize, err := layer.GetObjectSize(objInfos[i])
|
||||
require.NoError(hc.t, err)
|
||||
require.Equal(hc.t, objInfos[i].Owner.EncodeToString(), list[i].Owner.ID)
|
||||
require.Equal(hc.t, objInfos[i].Created.UTC().Format(time.RFC3339), list[i].LastModified)
|
||||
require.Equal(hc.t, realSize, list[i].Size)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListObjectsContextCanceled(t *testing.T) {
|
||||
layerCfg := layer.DefaultCachesConfigs(zaptest.NewLogger(t))
|
||||
layerCfg.SessionList.Lifetime = time.Hour
|
||||
layerCfg.SessionList.Size = 1
|
||||
|
||||
hc := prepareHandlerContextBase(t, layerCfg)
|
||||
|
||||
bktName := "bucket-versioning-enabled"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
putObject(hc, bktName, "object"+strconv.Itoa(i))
|
||||
}
|
||||
|
||||
result := listObjectsV1(hc, bktName, "", "", "", 2)
|
||||
session := hc.cache.GetListSession(hc.owner, cache.CreateListSessionCacheKey(bktInfo.CID, "", result.NextMarker))
|
||||
// invoke list again to trigger cache eviction
|
||||
// (use empty prefix to check that context canceled on replace)
|
||||
listObjectsV1(hc, bktName, "", "", "", 2)
|
||||
checkContextCanceled(session.Context, t)
|
||||
|
||||
result2 := listObjectsV2(hc, bktName, "", "", "", "", 2)
|
||||
session2 := hc.cache.GetListSession(hc.owner, cache.CreateListSessionCacheKey(bktInfo.CID, "", result2.NextContinuationToken))
|
||||
// invoke list again to trigger cache eviction
|
||||
// (use non-empty prefix to check that context canceled on cache eviction)
|
||||
listObjectsV2(hc, bktName, "o", "", "", "", 2)
|
||||
checkContextCanceled(session2.Context, t)
|
||||
|
||||
result3 := listObjectsVersions(hc, bktName, "", "", "", "", 2)
|
||||
session3 := hc.cache.GetListSession(hc.owner, cache.CreateListSessionCacheKey(bktInfo.CID, "", result3.NextVersionIDMarker))
|
||||
// invoke list again to trigger cache eviction
|
||||
listObjectsVersions(hc, bktName, "o", "", "", "", 2)
|
||||
checkContextCanceled(session3.Context, t)
|
||||
}
|
||||
|
||||
func checkContextCanceled(ctx context.Context, t *testing.T) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-time.After(10 * time.Second):
|
||||
}
|
||||
require.ErrorIs(t, ctx.Err(), context.Canceled)
|
||||
}
|
||||
|
||||
func TestListObjectsLatestVersions(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-versioning-enabled"
|
||||
createTestBucket(hc, bktName)
|
||||
putBucketVersioning(t, hc, bktName, true)
|
||||
|
||||
objName1, objName2 := "object1", "object2"
|
||||
objContent1, objContent2 := "content1", "content2"
|
||||
|
||||
putObjectContent(hc, bktName, objName1, objContent1)
|
||||
hdr1 := putObjectContent(hc, bktName, objName1, objContent2)
|
||||
putObjectContent(hc, bktName, objName2, objContent1)
|
||||
hdr2 := putObjectContent(hc, bktName, objName2, objContent2)
|
||||
|
||||
t.Run("listv1", func(t *testing.T) {
|
||||
result := listObjectsV1(hc, bktName, "", "", "", -1)
|
||||
|
||||
require.Len(t, result.Contents, 2)
|
||||
require.Equal(t, objName1, result.Contents[0].Key)
|
||||
require.Equal(t, hdr1.Get(api.ETag), result.Contents[0].ETag)
|
||||
require.Equal(t, objName2, result.Contents[1].Key)
|
||||
require.Equal(t, hdr2.Get(api.ETag), result.Contents[1].ETag)
|
||||
})
|
||||
|
||||
t.Run("listv2", func(t *testing.T) {
|
||||
result := listObjectsV2(hc, bktName, "", "", "", "", -1)
|
||||
|
||||
require.Len(t, result.Contents, 2)
|
||||
require.Equal(t, objName1, result.Contents[0].Key)
|
||||
require.Equal(t, hdr1.Get(api.ETag), result.Contents[0].ETag)
|
||||
require.Equal(t, objName2, result.Contents[1].Key)
|
||||
require.Equal(t, hdr2.Get(api.ETag), result.Contents[1].ETag)
|
||||
})
|
||||
}
|
||||
|
||||
func TestListObjectsVersionsPaging(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-versioning-enabled"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
n := 10
|
||||
n := 12
|
||||
|
||||
var objects []string
|
||||
for i := 0; i < n; i++ {
|
||||
|
@ -88,6 +246,65 @@ func TestListObjectsPaging(t *testing.T) {
|
|||
require.Empty(t, objects)
|
||||
}
|
||||
|
||||
func TestListObjectsVersionsCorrectIsLatestFlag(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-versioning-enabled"
|
||||
createVersionedBucket(hc, bktName)
|
||||
|
||||
objName1, objName2 := "obj1", "obj2"
|
||||
|
||||
n := 9
|
||||
listSize := 3
|
||||
headers := make([]http.Header, n)
|
||||
// objects uploaded: ["obj1"-v1, "obj1"-v2, "obj1"-v3, "obj2"-v1, "obj2"-v2, "obj2"-v3, "obj2"-v4, "obj2"-v5, "obj2"-v6]
|
||||
for i := 0; i < n; i++ {
|
||||
objName := objName1
|
||||
if i >= listSize {
|
||||
objName = objName2
|
||||
}
|
||||
headers[i] = putObjectContent(hc, bktName, objName, fmt.Sprintf("content/%d", i))
|
||||
}
|
||||
|
||||
versions := listObjectsVersions(hc, bktName, "", "", "", "", listSize)
|
||||
// expected objects: ["obj1"-v3, "obj1"-v2, "obj1"-v1]
|
||||
checkListVersionsParts(t, versions, formReverseVersionResponse(objName1, headers[:listSize], true))
|
||||
|
||||
versions = listObjectsVersions(hc, bktName, "", "", versions.NextKeyMarker, versions.NextVersionIDMarker, listSize)
|
||||
// expected objects: ["obj2"-v6, "obj2"-v5, "obj2"-v4]
|
||||
checkListVersionsParts(t, versions, formReverseVersionResponse(objName2, headers[2*listSize:], true))
|
||||
|
||||
versions = listObjectsVersions(hc, bktName, "", "", versions.NextKeyMarker, versions.NextVersionIDMarker, listSize)
|
||||
// expected objects: ["obj2"-v3, "obj2"-v2, "obj2"-v1]
|
||||
checkListVersionsParts(t, versions, formReverseVersionResponse(objName2, headers[listSize:2*listSize], false))
|
||||
}
|
||||
|
||||
func formReverseVersionResponse(objName string, headers []http.Header, isLatest bool) []ObjectVersionResponse {
|
||||
res := make([]ObjectVersionResponse, len(headers))
|
||||
|
||||
for i, h := range headers {
|
||||
ind := len(headers) - 1 - i
|
||||
res[ind] = ObjectVersionResponse{
|
||||
ETag: h.Get(api.ETag),
|
||||
IsLatest: isLatest && ind == 0,
|
||||
Key: objName,
|
||||
VersionID: h.Get(api.AmzVersionID),
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func checkListVersionsParts(t *testing.T, versions *ListObjectsVersionsResponse, expected []ObjectVersionResponse) {
|
||||
require.Len(t, versions.Version, len(expected))
|
||||
for i, res := range versions.Version {
|
||||
require.Equal(t, expected[i].Key, res.Key)
|
||||
require.Equal(t, expected[i].ETag, res.ETag)
|
||||
require.Equal(t, expected[i].VersionID, res.VersionID)
|
||||
require.Equal(t, expected[i].IsLatest, res.IsLatest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3CompatibilityBucketListV2BothContinuationTokenStartAfter(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -162,6 +379,132 @@ func TestS3BucketListDelimiterBasic(t *testing.T) {
|
|||
require.Equal(t, "quux/", listV1Response.CommonPrefixes[1].Prefix)
|
||||
}
|
||||
|
||||
func TestS3BucketListEmpty(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
versions := listObjectsVersions(hc, bktName, "", "", "", "", -1)
|
||||
require.Empty(t, versions.Version)
|
||||
require.Empty(t, versions.DeleteMarker)
|
||||
require.Empty(t, versions.CommonPrefixes)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2PrefixAlt(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"bar", "baz", "foo"}
|
||||
for _, objName := range objects {
|
||||
putObject(hc, bktName, objName)
|
||||
}
|
||||
|
||||
response := listObjectsV2(hc, bktName, "ba", "", "", "", -1)
|
||||
|
||||
require.Equal(t, "ba", response.Prefix)
|
||||
require.Len(t, response.Contents, 2)
|
||||
require.Equal(t, "bar", response.Contents[0].Key)
|
||||
require.Equal(t, "baz", response.Contents[1].Key)
|
||||
require.Empty(t, response.CommonPrefixes)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2PrefixNotExist(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"foo/bar", "foo/baz", "quux"}
|
||||
for _, objName := range objects {
|
||||
putObject(hc, bktName, objName)
|
||||
}
|
||||
|
||||
response := listObjectsV2(hc, bktName, "d", "", "", "", -1)
|
||||
|
||||
require.Equal(t, "d", response.Prefix)
|
||||
require.Empty(t, response.Contents)
|
||||
require.Empty(t, response.CommonPrefixes)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2PrefixUnreadable(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"foo/bar", "foo/baz", "quux"}
|
||||
for _, objName := range objects {
|
||||
putObject(hc, bktName, objName)
|
||||
}
|
||||
|
||||
response := listObjectsV2(hc, bktName, "\x0a", "", "", "", -1)
|
||||
|
||||
require.Equal(t, "\x0a", response.Prefix)
|
||||
require.Empty(t, response.Contents)
|
||||
require.Empty(t, response.CommonPrefixes)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2PrefixDelimiterAlt(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"bar", "bazar", "cab", "foo"}
|
||||
for _, objName := range objects {
|
||||
putObject(hc, bktName, objName)
|
||||
}
|
||||
|
||||
response := listObjectsV2(hc, bktName, "ba", "a", "", "", -1)
|
||||
|
||||
require.Equal(t, "ba", response.Prefix)
|
||||
require.Equal(t, "a", response.Delimiter)
|
||||
require.Len(t, response.Contents, 1)
|
||||
require.Equal(t, "bar", response.Contents[0].Key)
|
||||
require.Len(t, response.CommonPrefixes, 1)
|
||||
require.Equal(t, "baza", response.CommonPrefixes[0].Prefix)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2PrefixDelimiterDelimiterNotExist(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"b/a/c", "b/a/g", "b/a/r", "g"}
|
||||
for _, objName := range objects {
|
||||
putObject(hc, bktName, objName)
|
||||
}
|
||||
|
||||
response := listObjectsV2(hc, bktName, "b", "z", "", "", -1)
|
||||
|
||||
require.Len(t, response.Contents, 3)
|
||||
require.Equal(t, "b/a/c", response.Contents[0].Key)
|
||||
require.Equal(t, "b/a/g", response.Contents[1].Key)
|
||||
require.Equal(t, "b/a/r", response.Contents[2].Key)
|
||||
require.Empty(t, response.CommonPrefixes)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2PrefixDelimiterPrefixDelimiterNotExist(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"b/a/c", "b/a/g", "b/a/r", "g"}
|
||||
for _, objName := range objects {
|
||||
putObject(hc, bktName, objName)
|
||||
}
|
||||
|
||||
response := listObjectsV2(hc, bktName, "y", "z", "", "", -1)
|
||||
|
||||
require.Empty(t, response.Contents)
|
||||
require.Empty(t, response.CommonPrefixes)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2DelimiterPercentage(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -182,6 +525,35 @@ func TestS3BucketListV2DelimiterPercentage(t *testing.T) {
|
|||
require.Equal(t, "c%", listV2Response.CommonPrefixes[1].Prefix)
|
||||
}
|
||||
|
||||
func TestS3BucketListDelimiterPrefix(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"asdf", "boo/bar", "boo/baz/xyzzy", "cquux/thud", "cquux/bla"}
|
||||
for _, objName := range objects {
|
||||
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||
}
|
||||
|
||||
var empty []string
|
||||
delim := "/"
|
||||
prefix := ""
|
||||
|
||||
marker := validateListV1(t, hc, bktName, prefix, delim, "", 1, true, []string{"asdf"}, empty, "asdf")
|
||||
marker = validateListV1(t, hc, bktName, prefix, delim, marker, 1, true, empty, []string{"boo/"}, "boo/")
|
||||
validateListV1(t, hc, bktName, prefix, delim, marker, 1, false, empty, []string{"cquux/"}, "")
|
||||
|
||||
marker = validateListV1(t, hc, bktName, prefix, delim, "", 2, true, []string{"asdf"}, []string{"boo/"}, "boo/")
|
||||
validateListV1(t, hc, bktName, prefix, delim, marker, 2, false, empty, []string{"cquux/"}, "")
|
||||
|
||||
prefix = "boo/"
|
||||
marker = validateListV1(t, hc, bktName, prefix, delim, "", 1, true, []string{"boo/bar"}, empty, "boo/bar")
|
||||
validateListV1(t, hc, bktName, prefix, delim, marker, 1, false, empty, []string{"boo/baz/"}, "")
|
||||
|
||||
validateListV1(t, hc, bktName, prefix, delim, "", 2, false, []string{"boo/bar"}, []string{"boo/baz/"}, "")
|
||||
}
|
||||
|
||||
func TestS3BucketListV2DelimiterPrefix(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -211,6 +583,65 @@ func TestS3BucketListV2DelimiterPrefix(t *testing.T) {
|
|||
validateListV2(t, tc, bktName, prefix, delim, "", 2, false, true, []string{"boo/bar"}, []string{"boo/baz/"})
|
||||
}
|
||||
|
||||
func TestS3BucketListDelimiterPrefixUnderscore(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"_obj1_", "_under1/bar", "_under1/baz/xyzzy", "_under2/thud", "_under2/bla"}
|
||||
for _, objName := range objects {
|
||||
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||
}
|
||||
|
||||
var empty []string
|
||||
delim := "/"
|
||||
prefix := ""
|
||||
|
||||
marker := validateListV1(t, hc, bktName, prefix, delim, "", 1, true, []string{"_obj1_"}, empty, "_obj1_")
|
||||
marker = validateListV1(t, hc, bktName, prefix, delim, marker, 1, true, empty, []string{"_under1/"}, "_under1/")
|
||||
validateListV1(t, hc, bktName, prefix, delim, marker, 1, false, empty, []string{"_under2/"}, "")
|
||||
|
||||
marker = validateListV1(t, hc, bktName, prefix, delim, "", 2, true, []string{"_obj1_"}, []string{"_under1/"}, "_under1/")
|
||||
validateListV1(t, hc, bktName, prefix, delim, marker, 2, false, empty, []string{"_under2/"}, "")
|
||||
|
||||
prefix = "_under1/"
|
||||
marker = validateListV1(t, hc, bktName, prefix, delim, "", 1, true, []string{"_under1/bar"}, empty, "_under1/bar")
|
||||
validateListV1(t, hc, bktName, prefix, delim, marker, 1, false, empty, []string{"_under1/baz/"}, "")
|
||||
|
||||
validateListV1(t, hc, bktName, prefix, delim, "", 2, false, []string{"_under1/bar"}, []string{"_under1/baz/"}, "")
|
||||
}
|
||||
|
||||
func TestS3BucketListDelimiterNotSkipSpecial(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"0/"}
|
||||
for i := 1000; i < 1999; i++ {
|
||||
objects = append(objects, fmt.Sprintf("0/%d", i))
|
||||
}
|
||||
|
||||
objects2 := []string{"1999", "1999#", "1999+", "2000"}
|
||||
objects = append(objects, objects2...)
|
||||
|
||||
for _, objName := range objects {
|
||||
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||
}
|
||||
|
||||
delimiter := "/"
|
||||
list := listObjectsV1(hc, bktName, "", delimiter, "", -1)
|
||||
|
||||
require.Equal(t, delimiter, list.Delimiter)
|
||||
require.Equal(t, []CommonPrefix{{Prefix: "0/"}}, list.CommonPrefixes)
|
||||
|
||||
require.Len(t, list.Contents, len(objects2))
|
||||
for i := 0; i < len(list.Contents); i++ {
|
||||
require.Equal(t, objects2[i], list.Contents[i].Key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMintVersioningListObjectVersionsVersionIDContinuation(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -232,12 +663,16 @@ func TestMintVersioningListObjectVersionsVersionIDContinuation(t *testing.T) {
|
|||
checkVersionsNames(t, page1, objects)
|
||||
require.Equal(t, page1.Version[maxKeys-1].VersionID, page1.NextVersionIDMarker)
|
||||
require.True(t, page1.IsTruncated)
|
||||
require.Empty(t, page1.KeyMarker)
|
||||
require.Empty(t, page1.VersionIDMarker)
|
||||
|
||||
page2 := listObjectsVersions(hc, bktName, "", "", page1.NextKeyMarker, page1.NextVersionIDMarker, maxKeys)
|
||||
require.Len(t, page2.Version, maxKeys)
|
||||
checkVersionsNames(t, page1, objects)
|
||||
require.Empty(t, page2.NextVersionIDMarker)
|
||||
require.False(t, page2.IsTruncated)
|
||||
require.Equal(t, page1.NextKeyMarker, page2.KeyMarker)
|
||||
require.Equal(t, page1.NextVersionIDMarker, page2.VersionIDMarker)
|
||||
}
|
||||
|
||||
func checkVersionsNames(t *testing.T, versions *ListObjectsVersionsResponse, names []string) {
|
||||
|
@ -247,13 +682,21 @@ func checkVersionsNames(t *testing.T, versions *ListObjectsVersionsResponse, nam
|
|||
}
|
||||
|
||||
func listObjectsV2(hc *handlerContext, bktName, prefix, delimiter, startAfter, continuationToken string, maxKeys int) *ListObjectsV2Response {
|
||||
return listObjectsV2Ext(hc, bktName, prefix, delimiter, startAfter, continuationToken, "", maxKeys)
|
||||
}
|
||||
|
||||
func listObjectsV2Ext(hc *handlerContext, bktName, prefix, delimiter, startAfter, continuationToken, encodingType string, maxKeys int) *ListObjectsV2Response {
|
||||
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
|
||||
query.Add("fetch-owner", "true")
|
||||
if len(startAfter) != 0 {
|
||||
query.Add("start-after", startAfter)
|
||||
}
|
||||
if len(continuationToken) != 0 {
|
||||
query.Add("continuation-token", continuationToken)
|
||||
}
|
||||
if len(encodingType) != 0 {
|
||||
query.Add("encoding-type", encodingType)
|
||||
}
|
||||
|
||||
w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
|
||||
hc.Handler().ListObjectsV2Handler(w, r)
|
||||
|
@ -263,6 +706,26 @@ func listObjectsV2(hc *handlerContext, bktName, prefix, delimiter, startAfter, c
|
|||
return res
|
||||
}
|
||||
|
||||
func validateListV1(t *testing.T, tc *handlerContext, bktName, prefix, delimiter, marker string, maxKeys int,
|
||||
isTruncated bool, checkObjects, checkPrefixes []string, nextMarker string) string {
|
||||
response := listObjectsV1(tc, bktName, prefix, delimiter, marker, maxKeys)
|
||||
|
||||
require.Equal(t, isTruncated, response.IsTruncated)
|
||||
require.Equal(t, nextMarker, response.NextMarker)
|
||||
|
||||
require.Len(t, response.Contents, len(checkObjects))
|
||||
for i := 0; i < len(checkObjects); i++ {
|
||||
require.Equal(t, checkObjects[i], response.Contents[i].Key)
|
||||
}
|
||||
|
||||
require.Len(t, response.CommonPrefixes, len(checkPrefixes))
|
||||
for i := 0; i < len(checkPrefixes); i++ {
|
||||
require.Equal(t, checkPrefixes[i], response.CommonPrefixes[i].Prefix)
|
||||
}
|
||||
|
||||
return response.NextMarker
|
||||
}
|
||||
|
||||
func validateListV2(t *testing.T, tc *handlerContext, bktName, prefix, delimiter, continuationToken string, maxKeys int,
|
||||
isTruncated, last bool, checkObjects, checkPrefixes []string) string {
|
||||
response := listObjectsV2(tc, bktName, prefix, delimiter, "", continuationToken, maxKeys)
|
||||
|
|
|
@ -245,9 +245,10 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
Header: metadata,
|
||||
Encryption: encryptionParams,
|
||||
ContentMD5: r.Header.Get(api.ContentMD5),
|
||||
ContentSHA256Hash: r.Header.Get(api.AmzContentSha256),
|
||||
}
|
||||
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, bktInfo.LocationConstraint)
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
|
@ -276,7 +277,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
s := &SendNotificationParams{
|
||||
Event: EventObjectCreatedPut,
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo),
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.MD5Enabled()),
|
||||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
|
@ -327,7 +328,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
addSSECHeaders(w.Header(), r.Header)
|
||||
}
|
||||
|
||||
w.Header().Set(api.ETag, objInfo.ETag(h.cfg.MD5Enabled()))
|
||||
w.Header().Set(api.ETag, data.Quote(objInfo.ETag(h.cfg.MD5Enabled())))
|
||||
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
@ -522,7 +523,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
s := &SendNotificationParams{
|
||||
Event: EventObjectCreatedPost,
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo),
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.MD5Enabled()),
|
||||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
|
@ -591,7 +592,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
resp := &PostResponse{
|
||||
Bucket: objInfo.Bucket,
|
||||
Key: objInfo.Name,
|
||||
ETag: objInfo.ETag(h.cfg.MD5Enabled()),
|
||||
ETag: data.Quote(objInfo.ETag(h.cfg.MD5Enabled())),
|
||||
}
|
||||
w.WriteHeader(status)
|
||||
if _, err = w.Write(middleware.EncodeResponse(resp)); err != nil {
|
||||
|
@ -601,7 +602,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
w.Header().Set(api.ETag, objInfo.HashSum)
|
||||
w.Header().Set(api.ETag, data.Quote(objInfo.ETag(h.cfg.MD5Enabled())))
|
||||
w.WriteHeader(status)
|
||||
}
|
||||
|
||||
|
@ -748,6 +749,7 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
p := &layer.CreateBucketParams{
|
||||
Name: reqInfo.BucketName,
|
||||
Namespace: reqInfo.Namespace,
|
||||
}
|
||||
|
||||
if err := checkBucketName(reqInfo.BucketName); err != nil {
|
||||
|
@ -798,7 +800,7 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = h.setPolicy(p, createParams.LocationConstraint, policies); err != nil {
|
||||
if err = h.setPolicy(p, reqInfo.Namespace, createParams.LocationConstraint, policies); err != nil {
|
||||
h.logAndSendError(w, "couldn't set placement policy", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -828,8 +830,8 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func (h handler) setPolicy(prm *layer.CreateBucketParams, locationConstraint string, userPolicies []*accessbox.ContainerPolicy) error {
|
||||
prm.Policy = h.cfg.DefaultPlacementPolicy()
|
||||
func (h handler) setPolicy(prm *layer.CreateBucketParams, namespace, locationConstraint string, userPolicies []*accessbox.ContainerPolicy) error {
|
||||
prm.Policy = h.cfg.DefaultPlacementPolicy(namespace)
|
||||
prm.LocationConstraint = locationConstraint
|
||||
|
||||
if locationConstraint == "" {
|
||||
|
@ -843,7 +845,7 @@ func (h handler) setPolicy(prm *layer.CreateBucketParams, locationConstraint str
|
|||
}
|
||||
}
|
||||
|
||||
if policy, ok := h.cfg.PlacementPolicy(locationConstraint); ok {
|
||||
if policy, ok := h.cfg.PlacementPolicy(namespace, locationConstraint); ok {
|
||||
prm.Policy = policy
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,8 +17,8 @@ import (
|
|||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
|
@ -204,7 +204,64 @@ func TestPutObjectWithEnabledMD5(t *testing.T) {
|
|||
md5Hash.Write(content)
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
require.Equal(t, hex.EncodeToString(md5Hash.Sum(nil)), w.Header().Get(api.ETag))
|
||||
require.Equal(t, data.Quote(hex.EncodeToString(md5Hash.Sum(nil))), w.Header().Get(api.ETag))
|
||||
}
|
||||
|
||||
func TestPutObjectCheckContentSHA256(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-put", "object-for-put"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
hash string
|
||||
content []byte
|
||||
error bool
|
||||
}{
|
||||
{
|
||||
name: "invalid hash value",
|
||||
hash: "d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8",
|
||||
content: []byte("content"),
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "correct hash for empty payload",
|
||||
hash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
content: []byte(""),
|
||||
error: false,
|
||||
},
|
||||
{
|
||||
name: "unsigned payload",
|
||||
hash: "UNSIGNED-PAYLOAD",
|
||||
content: []byte("content"),
|
||||
error: false,
|
||||
},
|
||||
{
|
||||
name: "correct hash",
|
||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73",
|
||||
content: []byte("content"),
|
||||
error: false,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, objName, bytes.NewReader(tc.content))
|
||||
r.Header.Set("X-Amz-Content-Sha256", tc.hash)
|
||||
hc.Handler().PutObjectHandler(w, r)
|
||||
|
||||
if tc.error {
|
||||
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch))
|
||||
|
||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().GetObjectHandler(w, r)
|
||||
|
||||
assertStatus(t, w, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamBodyAWSExample(t *testing.T) {
|
||||
|
@ -297,10 +354,9 @@ func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName strin
|
|||
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName})
|
||||
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
req = req.WithContext(middleware.SetClientTime(req.Context(), signTime))
|
||||
req = req.WithContext(middleware.SetAuthHeaders(req.Context(), &auth.AuthHeader{
|
||||
req = req.WithContext(middleware.SetAuthHeaders(req.Context(), &middleware.AuthHeader{
|
||||
AccessKeyID: AWSAccessKeyID,
|
||||
SignatureV4: "4f232c4386841ef735655705268965c44a0e4690baa4adea153f7db9fa80a0a9",
|
||||
Service: "s3",
|
||||
Region: "us-east-1",
|
||||
}))
|
||||
req = req.WithContext(middleware.SetBoxData(req.Context(), &accessbox.Box{
|
||||
|
@ -324,6 +380,26 @@ func TestCreateBucket(t *testing.T) {
|
|||
createBucketAssertS3Error(hc, bktName, box2, s3errors.ErrBucketAlreadyExists)
|
||||
}
|
||||
|
||||
func TestCreateNamespacedBucket(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName := "bkt-name"
|
||||
namespace := "yabloko"
|
||||
|
||||
box, _ := createAccessBox(t)
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
ctx := middleware.SetBoxData(r.Context(), box)
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
reqInfo.Namespace = namespace
|
||||
r = r.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
hc.Handler().CreateBucketHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
bktInfo, err := hc.Layer().GetBucketInfo(middleware.SetReqInfo(hc.Context(), reqInfo), bktName)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, namespace+".ns", bktInfo.Zone)
|
||||
}
|
||||
|
||||
func TestPutObjectClientCut(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName, objName1, objName2 := "bkt-name", "obj-name1", "obj-name2"
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
type Cache struct {
|
||||
logger *zap.Logger
|
||||
listsCache *cache.ObjectsListCache
|
||||
sessionListCache *cache.ListSessionCache
|
||||
objCache *cache.ObjectsCache
|
||||
namesCache *cache.ObjectsNameCache
|
||||
bucketCache *cache.BucketCache
|
||||
|
@ -25,6 +26,7 @@ type CachesConfig struct {
|
|||
Logger *zap.Logger
|
||||
Objects *cache.Config
|
||||
ObjectsList *cache.Config
|
||||
SessionList *cache.Config
|
||||
Names *cache.Config
|
||||
Buckets *cache.Config
|
||||
System *cache.Config
|
||||
|
@ -37,6 +39,7 @@ func DefaultCachesConfigs(logger *zap.Logger) *CachesConfig {
|
|||
Logger: logger,
|
||||
Objects: cache.DefaultObjectsConfig(logger),
|
||||
ObjectsList: cache.DefaultObjectsListConfig(logger),
|
||||
SessionList: cache.DefaultListSessionConfig(logger),
|
||||
Names: cache.DefaultObjectsNameConfig(logger),
|
||||
Buckets: cache.DefaultBucketConfig(logger),
|
||||
System: cache.DefaultSystemConfig(logger),
|
||||
|
@ -48,6 +51,7 @@ func NewCache(cfg *CachesConfig) *Cache {
|
|||
return &Cache{
|
||||
logger: cfg.Logger,
|
||||
listsCache: cache.NewObjectsListCache(cfg.ObjectsList),
|
||||
sessionListCache: cache.NewListSessionCache(cfg.SessionList),
|
||||
objCache: cache.New(cfg.Objects),
|
||||
namesCache: cache.NewObjectsNameCache(cfg.Names),
|
||||
bucketCache: cache.NewBucketCache(cfg.Buckets),
|
||||
|
@ -56,21 +60,22 @@ func NewCache(cfg *CachesConfig) *Cache {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Cache) GetBucket(name string) *data.BucketInfo {
|
||||
return c.bucketCache.Get(name)
|
||||
func (c *Cache) GetBucket(ns, name string) *data.BucketInfo {
|
||||
return c.bucketCache.Get(ns, name)
|
||||
}
|
||||
|
||||
func (c *Cache) PutBucket(bktInfo *data.BucketInfo) {
|
||||
if err := c.bucketCache.Put(bktInfo); err != nil {
|
||||
c.logger.Warn(logs.CouldntPutBucketInfoIntoCache,
|
||||
zap.String("zone", bktInfo.Zone),
|
||||
zap.String("bucket name", bktInfo.Name),
|
||||
zap.Stringer("bucket cid", bktInfo.CID),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) DeleteBucket(name string) {
|
||||
c.bucketCache.Delete(name)
|
||||
func (c *Cache) DeleteBucket(bktInfo *data.BucketInfo) {
|
||||
c.bucketCache.Delete(bktInfo)
|
||||
}
|
||||
|
||||
func (c *Cache) CleanListCacheEntriesContainingObject(objectName string, cnrID cid.ID) {
|
||||
|
@ -143,6 +148,29 @@ func (c *Cache) PutList(owner user.ID, key cache.ObjectsListKey, list []*data.No
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Cache) GetListSession(owner user.ID, key cache.ListSessionKey) *data.ListSession {
|
||||
if !c.accessCache.Get(owner, key.String()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.sessionListCache.GetListSession(key)
|
||||
}
|
||||
|
||||
func (c *Cache) PutListSession(owner user.ID, key cache.ListSessionKey, session *data.ListSession) {
|
||||
if err := c.sessionListCache.PutListSession(key, session); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheListSession, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key.String()); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) DeleteListSession(owner user.ID, key cache.ListSessionKey) {
|
||||
c.sessionListCache.DeleteListSession(key)
|
||||
c.accessCache.Delete(owner, key.String())
|
||||
}
|
||||
|
||||
func (c *Cache) GetTagging(owner user.ID, key string) map[string]string {
|
||||
if !c.accessCache.Get(owner, key) {
|
||||
return nil
|
||||
|
|
|
@ -5,10 +5,10 @@ import (
|
|||
"fmt"
|
||||
"strconv"
|
||||
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
|
@ -41,6 +41,8 @@ func (n *layer) containerInfo(ctx context.Context, idCnr cid.ID) (*data.BucketIn
|
|||
CID: idCnr,
|
||||
Name: idCnr.EncodeToString(),
|
||||
}
|
||||
|
||||
reqInfo = middleware.GetReqInfo(ctx)
|
||||
)
|
||||
res, err = n.frostFS.Container(ctx, idCnr)
|
||||
if err != nil {
|
||||
|
@ -72,6 +74,11 @@ func (n *layer) containerInfo(ctx context.Context, idCnr cid.ID) (*data.BucketIn
|
|||
}
|
||||
}
|
||||
|
||||
zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
|
||||
if zone != info.Zone {
|
||||
return nil, fmt.Errorf("ns '%s' and zone '%s' are mismatched for container '%s'", zone, info.Zone, idCnr)
|
||||
}
|
||||
|
||||
n.cache.PutBucket(info)
|
||||
|
||||
return info, nil
|
||||
|
@ -102,9 +109,12 @@ func (n *layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
|
|||
if p.LocationConstraint == "" {
|
||||
p.LocationConstraint = api.DefaultLocationConstraint // s3tests_boto3.functional.test_s3:test_bucket_get_location
|
||||
}
|
||||
|
||||
zone, _ := n.features.FormContainerZone(p.Namespace)
|
||||
|
||||
bktInfo := &data.BucketInfo{
|
||||
Name: p.Name,
|
||||
Zone: v2container.SysAttributeZoneDefault,
|
||||
Zone: zone,
|
||||
Owner: n.BearerOwner(ctx),
|
||||
Created: TimeNow(ctx),
|
||||
LocationConstraint: p.LocationConstraint,
|
||||
|
@ -127,6 +137,7 @@ func (n *layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
|
|||
Creator: bktInfo.Owner,
|
||||
Policy: p.Policy,
|
||||
Name: p.Name,
|
||||
Zone: zone,
|
||||
SessionToken: p.SessionContainerCreation,
|
||||
CreationTime: bktInfo.Created,
|
||||
AdditionalAttributes: attributes,
|
||||
|
|
|
@ -30,6 +30,9 @@ type PrmContainerCreate struct {
|
|||
// Name for the container.
|
||||
Name string
|
||||
|
||||
// Zone for container registration.
|
||||
Zone string
|
||||
|
||||
// CreationTime value for Timestamp attribute
|
||||
CreationTime time.Time
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"io"
|
||||
"time"
|
||||
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
|
@ -50,6 +51,14 @@ func (k *FeatureSettingsMock) SetMD5Enabled(md5Enabled bool) {
|
|||
k.md5Enabled = md5Enabled
|
||||
}
|
||||
|
||||
func (k *FeatureSettingsMock) FormContainerZone(ns string) (zone string, isDefault bool) {
|
||||
if ns == "" {
|
||||
return v2container.SysAttributeZoneDefault, true
|
||||
}
|
||||
|
||||
return ns + ".ns", false
|
||||
}
|
||||
|
||||
type TestFrostFS struct {
|
||||
FrostFS
|
||||
|
||||
|
@ -143,6 +152,7 @@ func (t *TestFrostFS) CreateContainer(_ context.Context, prm PrmContainerCreate)
|
|||
if prm.Name != "" {
|
||||
var d container.Domain
|
||||
d.SetName(prm.Name)
|
||||
d.SetZone(prm.Zone)
|
||||
|
||||
container.WriteDomain(&cnr, d)
|
||||
container.SetName(&cnr, prm.Name)
|
||||
|
|
|
@ -51,6 +51,7 @@ type (
|
|||
ClientCut() bool
|
||||
BufferMaxSizeForPut() uint64
|
||||
MD5Enabled() bool
|
||||
FormContainerZone(ns string) (zone string, isDefault bool)
|
||||
}
|
||||
|
||||
layer struct {
|
||||
|
@ -68,7 +69,7 @@ type (
|
|||
Config struct {
|
||||
GateOwner user.ID
|
||||
ChainAddress string
|
||||
Caches *CachesConfig
|
||||
Cache *Cache
|
||||
AnonKey AnonymousKey
|
||||
Resolver BucketResolver
|
||||
TreeService TreeService
|
||||
|
@ -122,6 +123,7 @@ type (
|
|||
CopiesNumbers []uint32
|
||||
CompleteMD5Hash string
|
||||
ContentMD5 string
|
||||
ContentSHA256Hash string
|
||||
}
|
||||
|
||||
PutCombinedObjectParams struct {
|
||||
|
@ -171,6 +173,7 @@ type (
|
|||
// CreateBucketParams stores bucket create request parameters.
|
||||
CreateBucketParams struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Policy netmap.PlacementPolicy
|
||||
EACL *eacl.Table
|
||||
SessionContainerCreation *session.Container
|
||||
|
@ -320,7 +323,7 @@ func NewLayer(log *zap.Logger, frostFS FrostFS, config *Config) Client {
|
|||
gateOwner: config.GateOwner,
|
||||
anonKey: config.AnonKey,
|
||||
resolver: config.Resolver,
|
||||
cache: NewCache(config.Caches),
|
||||
cache: config.Cache,
|
||||
treeService: config.TreeService,
|
||||
features: config.Features,
|
||||
}
|
||||
|
@ -400,7 +403,9 @@ func (n *layer) GetBucketInfo(ctx context.Context, name string) (*data.BucketInf
|
|||
return nil, fmt.Errorf("unescape bucket name: %w", err)
|
||||
}
|
||||
|
||||
if bktInfo := n.cache.GetBucket(name); bktInfo != nil {
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
if bktInfo := n.cache.GetBucket(reqInfo.Namespace, name); bktInfo != nil {
|
||||
return bktInfo, nil
|
||||
}
|
||||
|
||||
|
@ -646,7 +651,7 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
|
||||
var nullVersionToDelete *data.NodeVersion
|
||||
if lastVersion.IsUnversioned {
|
||||
if !lastVersion.IsDeleteMarker() {
|
||||
if !lastVersion.IsDeleteMarker {
|
||||
nullVersionToDelete = lastVersion
|
||||
}
|
||||
} else if nullVersionToDelete, obj.Error = n.getNodeVersionToDelete(ctx, bkt, obj); obj.Error != nil {
|
||||
|
@ -662,7 +667,7 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
}
|
||||
}
|
||||
|
||||
if lastVersion.IsDeleteMarker() {
|
||||
if lastVersion.IsDeleteMarker {
|
||||
obj.DeleteMarkVersion = lastVersion.OID.EncodeToString()
|
||||
return obj
|
||||
}
|
||||
|
@ -674,15 +679,14 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
}
|
||||
|
||||
obj.DeleteMarkVersion = randOID.EncodeToString()
|
||||
|
||||
now := TimeNow(ctx)
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
OID: randOID,
|
||||
FilePath: obj.Name,
|
||||
},
|
||||
DeleteMarker: &data.DeleteMarkerInfo{
|
||||
Created: TimeNow(ctx),
|
||||
Owner: n.gateOwner,
|
||||
Created: &now,
|
||||
Owner: &n.gateOwner,
|
||||
IsDeleteMarker: true,
|
||||
},
|
||||
IsUnversioned: settings.VersioningSuspended(),
|
||||
}
|
||||
|
@ -707,24 +711,15 @@ func (n *layer) handleNotFoundError(bkt *data.BucketInfo, obj *VersionedObject)
|
|||
}
|
||||
|
||||
func (n *layer) handleObjectDeleteErrors(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject, nodeID uint64) *VersionedObject {
|
||||
if client.IsErrObjectAlreadyRemoved(obj.Error) {
|
||||
n.reqLogger(ctx).Debug(logs.ObjectAlreadyRemoved,
|
||||
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID))
|
||||
|
||||
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeID)
|
||||
if obj.Error != nil {
|
||||
if !client.IsErrObjectAlreadyRemoved(obj.Error) && !client.IsErrObjectNotFound(obj.Error) {
|
||||
return obj
|
||||
}
|
||||
|
||||
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
|
||||
}
|
||||
|
||||
if client.IsErrObjectNotFound(obj.Error) {
|
||||
n.reqLogger(ctx).Debug(logs.ObjectNotFound,
|
||||
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID))
|
||||
|
||||
obj.Error = nil
|
||||
n.reqLogger(ctx).Debug(logs.CouldntDeleteObjectFromStorageContinueDeleting,
|
||||
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID), zap.Error(obj.Error))
|
||||
|
||||
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeID)
|
||||
if obj.Error == nil {
|
||||
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
|
||||
}
|
||||
|
||||
|
@ -759,7 +754,7 @@ func (n *layer) getLastNodeVersion(ctx context.Context, bkt *data.BucketInfo, ob
|
|||
}
|
||||
|
||||
func (n *layer) removeOldVersion(ctx context.Context, bkt *data.BucketInfo, nodeVersion *data.NodeVersion, obj *VersionedObject) (string, error) {
|
||||
if nodeVersion.IsDeleteMarker() {
|
||||
if nodeVersion.IsDeleteMarker {
|
||||
return obj.VersionID, nil
|
||||
}
|
||||
|
||||
|
@ -805,14 +800,18 @@ func (n *layer) ResolveBucket(ctx context.Context, name string) (cid.ID, error)
|
|||
}
|
||||
|
||||
func (n *layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
|
||||
nodeVersions, err := n.getAllObjectsVersions(ctx, p.BktInfo, "", "")
|
||||
res, _, err := n.getAllObjectsVersions(ctx, commonVersionsListingParams{
|
||||
BktInfo: p.BktInfo,
|
||||
MaxKeys: 1,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(nodeVersions) != 0 {
|
||||
if len(res) != 0 {
|
||||
return errors.GetAPIError(errors.ErrBucketNotEmpty)
|
||||
}
|
||||
|
||||
n.cache.DeleteBucket(p.BktInfo.Name)
|
||||
n.cache.DeleteBucket(p.BktInfo)
|
||||
return n.frostFS.DeleteContainer(ctx, p.BktInfo.CID, p.SessionToken)
|
||||
}
|
||||
|
|
725
api/layer/listing.go
Normal file
725
api/layer/listing.go
Normal file
|
@ -0,0 +1,725 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"github.com/panjf2000/ants/v2"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
// ListObjectsParamsCommon contains common parameters for ListObjectsV1 and ListObjectsV2.
|
||||
ListObjectsParamsCommon struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Delimiter string
|
||||
Encode string
|
||||
MaxKeys int
|
||||
Prefix string
|
||||
}
|
||||
|
||||
// ListObjectsParamsV1 contains params for ListObjectsV1.
|
||||
ListObjectsParamsV1 struct {
|
||||
ListObjectsParamsCommon
|
||||
Marker string
|
||||
}
|
||||
|
||||
// ListObjectsParamsV2 contains params for ListObjectsV2.
|
||||
ListObjectsParamsV2 struct {
|
||||
ListObjectsParamsCommon
|
||||
ContinuationToken string
|
||||
StartAfter string
|
||||
FetchOwner bool
|
||||
}
|
||||
|
||||
// ListObjectsInfo contains common fields of data for ListObjectsV1 and ListObjectsV2.
|
||||
ListObjectsInfo struct {
|
||||
Prefixes []string
|
||||
Objects []*data.ExtendedNodeVersion
|
||||
IsTruncated bool
|
||||
}
|
||||
|
||||
// ListObjectsInfoV1 holds data which ListObjectsV1 returns.
|
||||
ListObjectsInfoV1 struct {
|
||||
ListObjectsInfo
|
||||
NextMarker string
|
||||
}
|
||||
|
||||
// ListObjectsInfoV2 holds data which ListObjectsV2 returns.
|
||||
ListObjectsInfoV2 struct {
|
||||
ListObjectsInfo
|
||||
NextContinuationToken string
|
||||
}
|
||||
|
||||
// ListObjectVersionsInfo stores info and list of objects versions.
|
||||
ListObjectVersionsInfo struct {
|
||||
CommonPrefixes []string
|
||||
IsTruncated bool
|
||||
KeyMarker string
|
||||
NextKeyMarker string
|
||||
NextVersionIDMarker string
|
||||
Version []*data.ExtendedNodeVersion
|
||||
DeleteMarker []*data.ExtendedNodeVersion
|
||||
VersionIDMarker string
|
||||
}
|
||||
|
||||
commonVersionsListingParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Delimiter string
|
||||
Prefix string
|
||||
MaxKeys int
|
||||
Marker string
|
||||
Bookmark string
|
||||
}
|
||||
|
||||
commonLatestVersionsListingParams struct {
|
||||
commonVersionsListingParams
|
||||
ListType ListType
|
||||
}
|
||||
)
|
||||
|
||||
type ListType int
|
||||
|
||||
const (
|
||||
ListObjectsV1Type ListType = iota + 1
|
||||
ListObjectsV2Type ListType = iota + 1
|
||||
)
|
||||
|
||||
// ListObjectsV1 returns objects in a bucket for requests of Version 1.
|
||||
func (n *layer) ListObjectsV1(ctx context.Context, p *ListObjectsParamsV1) (*ListObjectsInfoV1, error) {
|
||||
var result ListObjectsInfoV1
|
||||
|
||||
prm := commonLatestVersionsListingParams{
|
||||
commonVersionsListingParams: commonVersionsListingParams{
|
||||
BktInfo: p.BktInfo,
|
||||
Delimiter: p.Delimiter,
|
||||
Prefix: p.Prefix,
|
||||
MaxKeys: p.MaxKeys,
|
||||
Marker: p.Marker,
|
||||
Bookmark: p.Marker,
|
||||
},
|
||||
ListType: ListObjectsV1Type,
|
||||
}
|
||||
|
||||
objects, next, err := n.getLatestObjectsVersions(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if next != nil {
|
||||
result.IsTruncated = true
|
||||
result.NextMarker = objects[len(objects)-1].Name()
|
||||
}
|
||||
|
||||
result.Prefixes, result.Objects = triageExtendedObjects(objects)
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// ListObjectsV2 returns objects in a bucket for requests of Version 2.
|
||||
func (n *layer) ListObjectsV2(ctx context.Context, p *ListObjectsParamsV2) (*ListObjectsInfoV2, error) {
|
||||
var result ListObjectsInfoV2
|
||||
|
||||
prm := commonLatestVersionsListingParams{
|
||||
commonVersionsListingParams: commonVersionsListingParams{
|
||||
BktInfo: p.BktInfo,
|
||||
Delimiter: p.Delimiter,
|
||||
Prefix: p.Prefix,
|
||||
MaxKeys: p.MaxKeys,
|
||||
Marker: p.StartAfter,
|
||||
Bookmark: p.ContinuationToken,
|
||||
},
|
||||
ListType: ListObjectsV2Type,
|
||||
}
|
||||
|
||||
objects, next, err := n.getLatestObjectsVersions(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if next != nil {
|
||||
result.IsTruncated = true
|
||||
result.NextContinuationToken = next.NodeVersion.OID.EncodeToString()
|
||||
}
|
||||
|
||||
result.Prefixes, result.Objects = triageExtendedObjects(objects)
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (n *layer) ListObjectVersions(ctx context.Context, p *ListObjectVersionsParams) (*ListObjectVersionsInfo, error) {
|
||||
prm := commonVersionsListingParams{
|
||||
BktInfo: p.BktInfo,
|
||||
Delimiter: p.Delimiter,
|
||||
Prefix: p.Prefix,
|
||||
MaxKeys: p.MaxKeys,
|
||||
Marker: p.KeyMarker,
|
||||
Bookmark: p.VersionIDMarker,
|
||||
}
|
||||
|
||||
objects, isTruncated, err := n.getAllObjectsVersions(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := &ListObjectVersionsInfo{
|
||||
KeyMarker: p.KeyMarker,
|
||||
VersionIDMarker: p.VersionIDMarker,
|
||||
IsTruncated: isTruncated,
|
||||
}
|
||||
|
||||
if res.IsTruncated {
|
||||
res.NextKeyMarker = objects[p.MaxKeys-1].NodeVersion.FilePath
|
||||
res.NextVersionIDMarker = objects[p.MaxKeys-1].NodeVersion.OID.EncodeToString()
|
||||
}
|
||||
|
||||
res.CommonPrefixes, objects = triageExtendedObjects(objects)
|
||||
res.Version, res.DeleteMarker = triageVersions(objects)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (n *layer) getLatestObjectsVersions(ctx context.Context, p commonLatestVersionsListingParams) (objects []*data.ExtendedNodeVersion, next *data.ExtendedNodeVersion, err error) {
|
||||
if p.MaxKeys == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
session, err := n.getListLatestVersionsSession(ctx, p)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
generator, errorCh := nodesGeneratorStream(ctx, p.commonVersionsListingParams, session)
|
||||
objOutCh, err := n.initWorkerPool(ctx, 2, p.commonVersionsListingParams, generator)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to init worker pool: %w", err)
|
||||
}
|
||||
|
||||
objects = make([]*data.ExtendedNodeVersion, 0, p.MaxKeys+1)
|
||||
objects = append(objects, session.Next...)
|
||||
for obj := range objOutCh {
|
||||
objects = append(objects, obj)
|
||||
}
|
||||
|
||||
if err = <-errorCh; err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get next object from stream: %w", err)
|
||||
}
|
||||
|
||||
sort.Slice(objects, func(i, j int) bool { return objects[i].NodeVersion.FilePath < objects[j].NodeVersion.FilePath })
|
||||
|
||||
if len(objects) > p.MaxKeys {
|
||||
next = objects[p.MaxKeys]
|
||||
n.putListLatestVersionsSession(ctx, p, session, objects)
|
||||
objects = objects[:p.MaxKeys]
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (n *layer) getAllObjectsVersions(ctx context.Context, p commonVersionsListingParams) ([]*data.ExtendedNodeVersion, bool, error) {
|
||||
if p.MaxKeys == 0 {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
session, err := n.getListAllVersionsSession(ctx, p)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
generator, errorCh := nodesGeneratorVersions(ctx, p, session)
|
||||
objOutCh, err := n.initWorkerPool(ctx, 2, p, generator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
allObjects := handleGeneratedVersions(objOutCh, p, session)
|
||||
|
||||
sort.SliceStable(allObjects, func(i, j int) bool { return allObjects[i].NodeVersion.FilePath < allObjects[j].NodeVersion.FilePath })
|
||||
|
||||
if err = <-errorCh; err != nil {
|
||||
return nil, false, fmt.Errorf("failed to get next object from stream: %w", err)
|
||||
}
|
||||
|
||||
var isTruncated bool
|
||||
if len(allObjects) > p.MaxKeys {
|
||||
isTruncated = true
|
||||
n.putListAllVersionsSession(ctx, p, session, allObjects)
|
||||
allObjects = allObjects[:p.MaxKeys]
|
||||
}
|
||||
|
||||
return allObjects, isTruncated, nil
|
||||
}
|
||||
|
||||
func handleGeneratedVersions(objOutCh <-chan *data.ExtendedNodeVersion, p commonVersionsListingParams, session *data.ListSession) []*data.ExtendedNodeVersion {
|
||||
var lastName string
|
||||
var listRowStartIndex int
|
||||
allObjects := make([]*data.ExtendedNodeVersion, 0, p.MaxKeys)
|
||||
for eoi := range objOutCh {
|
||||
name := eoi.NodeVersion.FilePath
|
||||
if eoi.DirName != "" {
|
||||
name = eoi.DirName
|
||||
}
|
||||
|
||||
if lastName != name {
|
||||
formVersionsListRow(allObjects, listRowStartIndex, session)
|
||||
listRowStartIndex = len(allObjects)
|
||||
allObjects = append(allObjects, eoi)
|
||||
} else if eoi.DirName == "" {
|
||||
allObjects = append(allObjects, eoi)
|
||||
}
|
||||
lastName = name
|
||||
}
|
||||
|
||||
formVersionsListRow(allObjects, listRowStartIndex, session)
|
||||
|
||||
return allObjects
|
||||
}
|
||||
|
||||
func formVersionsListRow(objects []*data.ExtendedNodeVersion, rowStartIndex int, session *data.ListSession) {
|
||||
if len(objects) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
prevVersions := objects[rowStartIndex:]
|
||||
sort.Slice(prevVersions, func(i, j int) bool {
|
||||
return prevVersions[j].NodeVersion.Timestamp < prevVersions[i].NodeVersion.Timestamp // sort in reverse order to have last added first
|
||||
})
|
||||
|
||||
prevVersions[0].IsLatest = len(session.Next) == 0 || session.Next[0].NodeVersion.FilePath != prevVersions[0].NodeVersion.FilePath
|
||||
|
||||
for _, version := range prevVersions[1:] {
|
||||
version.IsLatest = false
|
||||
}
|
||||
}
|
||||
|
||||
func (n *layer) getListLatestVersionsSession(ctx context.Context, p commonLatestVersionsListingParams) (*data.ListSession, error) {
|
||||
return n.getListVersionsSession(ctx, p.commonVersionsListingParams, true)
|
||||
}
|
||||
|
||||
func (n *layer) getListAllVersionsSession(ctx context.Context, p commonVersionsListingParams) (*data.ListSession, error) {
|
||||
return n.getListVersionsSession(ctx, p, false)
|
||||
}
|
||||
|
||||
func (n *layer) getListVersionsSession(ctx context.Context, p commonVersionsListingParams, latestOnly bool) (*data.ListSession, error) {
|
||||
owner := n.BearerOwner(ctx)
|
||||
|
||||
cacheKey := cache.CreateListSessionCacheKey(p.BktInfo.CID, p.Prefix, p.Bookmark)
|
||||
session := n.cache.GetListSession(owner, cacheKey)
|
||||
if session == nil {
|
||||
return n.initNewVersionsByPrefixSession(ctx, p, latestOnly)
|
||||
}
|
||||
|
||||
if session.Acquired.Swap(true) {
|
||||
return n.initNewVersionsByPrefixSession(ctx, p, latestOnly)
|
||||
}
|
||||
|
||||
// after reading next object from stream in session
|
||||
// the current cache value already doesn't match with next token in cache key
|
||||
n.cache.DeleteListSession(owner, cacheKey)
|
||||
|
||||
return session, nil
|
||||
}
|
||||
|
||||
func (n *layer) initNewVersionsByPrefixSession(ctx context.Context, p commonVersionsListingParams, latestOnly bool) (session *data.ListSession, err error) {
|
||||
session = &data.ListSession{NamesMap: make(map[string]struct{})}
|
||||
session.Context, session.Cancel = context.WithCancel(context.Background())
|
||||
|
||||
if bd, err := middleware.GetBoxData(ctx); err == nil {
|
||||
session.Context = middleware.SetBoxData(session.Context, bd)
|
||||
}
|
||||
|
||||
session.Stream, err = n.treeService.InitVersionsByPrefixStream(session.Context, p.BktInfo, p.Prefix, latestOnly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return session, nil
|
||||
}
|
||||
|
||||
func (n *layer) putListLatestVersionsSession(ctx context.Context, p commonLatestVersionsListingParams, session *data.ListSession, allObjects []*data.ExtendedNodeVersion) {
|
||||
if len(allObjects) <= p.MaxKeys {
|
||||
return
|
||||
}
|
||||
|
||||
var cacheKey cache.ListSessionKey
|
||||
switch p.ListType {
|
||||
case ListObjectsV1Type:
|
||||
cacheKey = cache.CreateListSessionCacheKey(p.BktInfo.CID, p.Prefix, allObjects[p.MaxKeys-1].Name())
|
||||
case ListObjectsV2Type:
|
||||
cacheKey = cache.CreateListSessionCacheKey(p.BktInfo.CID, p.Prefix, allObjects[p.MaxKeys].NodeVersion.OID.EncodeToString())
|
||||
default:
|
||||
// should never happen
|
||||
panic("invalid list type")
|
||||
}
|
||||
|
||||
session.Acquired.Store(false)
|
||||
session.Next = []*data.ExtendedNodeVersion{allObjects[p.MaxKeys]}
|
||||
n.cache.PutListSession(n.BearerOwner(ctx), cacheKey, session)
|
||||
}
|
||||
|
||||
func (n *layer) putListAllVersionsSession(ctx context.Context, p commonVersionsListingParams, session *data.ListSession, allObjects []*data.ExtendedNodeVersion) {
|
||||
if len(allObjects) <= p.MaxKeys {
|
||||
return
|
||||
}
|
||||
|
||||
session.Acquired.Store(false)
|
||||
|
||||
session.Next = make([]*data.ExtendedNodeVersion, len(allObjects)-p.MaxKeys+1)
|
||||
session.Next[0] = allObjects[p.MaxKeys-1]
|
||||
for i, node := range allObjects[p.MaxKeys:] {
|
||||
session.Next[i+1] = node
|
||||
}
|
||||
|
||||
cacheKey := cache.CreateListSessionCacheKey(p.BktInfo.CID, p.Prefix, session.Next[0].NodeVersion.OID.EncodeToString())
|
||||
n.cache.PutListSession(n.BearerOwner(ctx), cacheKey, session)
|
||||
}
|
||||
|
||||
func nodesGeneratorStream(ctx context.Context, p commonVersionsListingParams, stream *data.ListSession) (<-chan *data.ExtendedNodeVersion, <-chan error) {
|
||||
nodeCh := make(chan *data.ExtendedNodeVersion, 1000)
|
||||
errCh := make(chan error, 1)
|
||||
existed := stream.NamesMap
|
||||
|
||||
if len(stream.Next) != 0 {
|
||||
existed[continuationToken] = struct{}{}
|
||||
}
|
||||
|
||||
limit := p.MaxKeys
|
||||
if len(stream.Next) == 0 {
|
||||
limit++
|
||||
}
|
||||
|
||||
go func() {
|
||||
var generated int
|
||||
var err error
|
||||
|
||||
LOOP:
|
||||
for err == nil {
|
||||
node, err := stream.Stream.Next(ctx)
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
errCh <- fmt.Errorf("stream next: %w", err)
|
||||
}
|
||||
break LOOP
|
||||
}
|
||||
|
||||
nodeExt := &data.ExtendedNodeVersion{
|
||||
NodeVersion: node,
|
||||
IsLatest: true,
|
||||
DirName: tryDirectoryName(node, p.Prefix, p.Delimiter),
|
||||
}
|
||||
|
||||
if shouldSkip(nodeExt, p, existed) {
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break LOOP
|
||||
case nodeCh <- nodeExt:
|
||||
generated++
|
||||
|
||||
if generated == limit { // we use maxKeys+1 to be able to know nextMarker/nextContinuationToken
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
close(nodeCh)
|
||||
close(errCh)
|
||||
}()
|
||||
|
||||
return nodeCh, errCh
|
||||
}
|
||||
|
||||
func nodesGeneratorVersions(ctx context.Context, p commonVersionsListingParams, stream *data.ListSession) (<-chan *data.ExtendedNodeVersion, <-chan error) {
|
||||
nodeCh := make(chan *data.ExtendedNodeVersion, 1000)
|
||||
errCh := make(chan error, 1)
|
||||
existed := stream.NamesMap
|
||||
|
||||
delete(existed, continuationToken)
|
||||
|
||||
go func() {
|
||||
var (
|
||||
generated int
|
||||
ind int
|
||||
err error
|
||||
lastName string
|
||||
node *data.NodeVersion
|
||||
nodeExt *data.ExtendedNodeVersion
|
||||
)
|
||||
|
||||
LOOP:
|
||||
for err == nil {
|
||||
if ind < len(stream.Next) {
|
||||
nodeExt = stream.Next[ind]
|
||||
ind++
|
||||
} else {
|
||||
node, err = stream.Stream.Next(ctx)
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
errCh <- fmt.Errorf("stream next: %w", err)
|
||||
}
|
||||
break LOOP
|
||||
}
|
||||
|
||||
nodeExt = &data.ExtendedNodeVersion{
|
||||
NodeVersion: node,
|
||||
DirName: tryDirectoryName(node, p.Prefix, p.Delimiter),
|
||||
}
|
||||
}
|
||||
|
||||
if shouldSkipVersions(nodeExt, p, existed) {
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break LOOP
|
||||
case nodeCh <- nodeExt:
|
||||
generated++
|
||||
if generated > p.MaxKeys && nodeExt.NodeVersion.FilePath != lastName {
|
||||
break LOOP
|
||||
}
|
||||
lastName = nodeExt.NodeVersion.FilePath
|
||||
}
|
||||
}
|
||||
close(nodeCh)
|
||||
close(errCh)
|
||||
}()
|
||||
|
||||
return nodeCh, errCh
|
||||
}
|
||||
|
||||
func (n *layer) initWorkerPool(ctx context.Context, size int, p commonVersionsListingParams, input <-chan *data.ExtendedNodeVersion) (<-chan *data.ExtendedNodeVersion, error) {
|
||||
reqLog := n.reqLogger(ctx)
|
||||
pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{reqLog}))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("coudln't init go pool for listing: %w", err)
|
||||
}
|
||||
objCh := make(chan *data.ExtendedNodeVersion, size)
|
||||
|
||||
go func() {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
LOOP:
|
||||
for node := range input {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break LOOP
|
||||
default:
|
||||
}
|
||||
|
||||
if node.DirName != "" || node.NodeVersion.IsFilledExtra() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case objCh <- node:
|
||||
}
|
||||
} else {
|
||||
// We have to make a copy of pointer to data.NodeVersion
|
||||
// to get correct value in submitted task function.
|
||||
func(node *data.ExtendedNodeVersion) {
|
||||
wg.Add(1)
|
||||
err = pool.Submit(func() {
|
||||
defer wg.Done()
|
||||
|
||||
oi := n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.BktInfo, node.NodeVersion)
|
||||
if oi == nil {
|
||||
// try to get object again
|
||||
if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.BktInfo, node.NodeVersion); oi == nil {
|
||||
// do not process object which are definitely missing in object service
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
realSize, err := GetObjectSize(oi)
|
||||
if err != nil {
|
||||
reqLog.Debug(logs.FailedToGetRealObjectSize, zap.Error(err))
|
||||
realSize = oi.Size
|
||||
}
|
||||
|
||||
node.NodeVersion.FillExtra(&oi.Owner, &oi.Created, realSize)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case objCh <- node:
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
wg.Done()
|
||||
reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err))
|
||||
}
|
||||
}(node)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
close(objCh)
|
||||
pool.Release()
|
||||
}()
|
||||
|
||||
return objCh, nil
|
||||
}
|
||||
|
||||
func shouldSkip(node *data.ExtendedNodeVersion, p commonVersionsListingParams, existed map[string]struct{}) bool {
|
||||
if node.NodeVersion.IsDeleteMarker {
|
||||
return true
|
||||
}
|
||||
|
||||
filePath := node.NodeVersion.FilePath
|
||||
if node.DirName != "" {
|
||||
filePath = node.DirName
|
||||
}
|
||||
|
||||
if _, ok := existed[filePath]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
if filePath <= p.Marker {
|
||||
return true
|
||||
}
|
||||
|
||||
if p.Bookmark != "" {
|
||||
if _, ok := existed[continuationToken]; !ok {
|
||||
if p.Bookmark != node.NodeVersion.OID.EncodeToString() {
|
||||
return true
|
||||
}
|
||||
existed[continuationToken] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
existed[filePath] = struct{}{}
|
||||
return false
|
||||
}
|
||||
|
||||
func shouldSkipVersions(node *data.ExtendedNodeVersion, p commonVersionsListingParams, existed map[string]struct{}) bool {
|
||||
filePath := node.NodeVersion.FilePath
|
||||
if node.DirName != "" {
|
||||
filePath = node.DirName
|
||||
if _, ok := existed[filePath]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if filePath < p.Marker {
|
||||
return true
|
||||
}
|
||||
|
||||
if p.Bookmark != "" {
|
||||
if _, ok := existed[continuationToken]; !ok {
|
||||
if p.Bookmark != node.NodeVersion.OID.EncodeToString() {
|
||||
return true
|
||||
}
|
||||
existed[continuationToken] = struct{}{}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
existed[filePath] = struct{}{}
|
||||
return false
|
||||
}
|
||||
|
||||
func triageExtendedObjects(allObjects []*data.ExtendedNodeVersion) (prefixes []string, objects []*data.ExtendedNodeVersion) {
|
||||
for _, ov := range allObjects {
|
||||
if ov.DirName != "" {
|
||||
prefixes = append(prefixes, ov.DirName)
|
||||
} else {
|
||||
objects = append(objects, ov)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (n *layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo *data.BucketInfo, node *data.NodeVersion) (oi *data.ObjectInfo) {
|
||||
owner := n.BearerOwner(ctx)
|
||||
if extInfo := n.cache.GetObject(owner, newAddress(bktInfo.CID, node.OID)); extInfo != nil {
|
||||
return extInfo.ObjectInfo
|
||||
}
|
||||
|
||||
meta, err := n.objectHead(ctx, bktInfo, node.OID)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.CouldNotFetchObjectMeta, zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
|
||||
oi = objectInfoFromMeta(bktInfo, meta)
|
||||
oi.MD5Sum = node.MD5
|
||||
n.cache.PutObject(owner, &data.ExtendedObjectInfo{ObjectInfo: oi, NodeVersion: node})
|
||||
|
||||
return oi
|
||||
}
|
||||
|
||||
// tryDirectoryName forms directory name by prefix and delimiter.
|
||||
// If node isn't a directory empty string is returned.
|
||||
// This function doesn't check if node has a prefix. It must do a caller.
|
||||
func tryDirectoryName(node *data.NodeVersion, prefix, delimiter string) string {
|
||||
if len(delimiter) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
tail := strings.TrimPrefix(node.FilePath, prefix)
|
||||
index := strings.Index(tail, delimiter)
|
||||
if index >= 0 {
|
||||
return prefix + tail[:index+1]
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func filterVersionsByMarker(objects []*data.ExtendedNodeVersion, p *ListObjectVersionsParams) ([]*data.ExtendedNodeVersion, error) {
|
||||
if p.KeyMarker == "" {
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
for i, obj := range objects {
|
||||
if obj.NodeVersion.FilePath == p.KeyMarker {
|
||||
for j := i; j < len(objects); j++ {
|
||||
if objects[j].NodeVersion.FilePath != obj.NodeVersion.FilePath {
|
||||
if p.VersionIDMarker == "" {
|
||||
return objects[j:], nil
|
||||
}
|
||||
break
|
||||
}
|
||||
if objects[j].NodeVersion.OID.EncodeToString() == p.VersionIDMarker {
|
||||
return objects[j+1:], nil
|
||||
}
|
||||
}
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidVersion)
|
||||
} else if obj.NodeVersion.FilePath > p.KeyMarker {
|
||||
if p.VersionIDMarker != "" {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidVersion)
|
||||
}
|
||||
return objects[i:], nil
|
||||
}
|
||||
}
|
||||
|
||||
// don't use nil as empty slice to be consistent with `return objects[j+1:], nil` above
|
||||
// that can be empty
|
||||
return []*data.ExtendedNodeVersion{}, nil
|
||||
}
|
||||
|
||||
func triageVersions(objVersions []*data.ExtendedNodeVersion) ([]*data.ExtendedNodeVersion, []*data.ExtendedNodeVersion) {
|
||||
if len(objVersions) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var resVersion []*data.ExtendedNodeVersion
|
||||
var resDelMarkVersions []*data.ExtendedNodeVersion
|
||||
|
||||
for _, version := range objVersions {
|
||||
if version.NodeVersion.IsDeleteMarker {
|
||||
resDelMarkVersions = append(resDelMarkVersions, version)
|
||||
} else {
|
||||
resVersion = append(resVersion, version)
|
||||
}
|
||||
}
|
||||
|
||||
return resVersion, resDelMarkVersions
|
||||
}
|
|
@ -15,6 +15,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
|
@ -71,6 +72,7 @@ type (
|
|||
Size uint64
|
||||
Reader io.Reader
|
||||
ContentMD5 string
|
||||
ContentSHA256Hash string
|
||||
}
|
||||
|
||||
UploadCopyParams struct {
|
||||
|
@ -260,6 +262,20 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
size = decSize
|
||||
}
|
||||
|
||||
if !p.Info.Encryption.Enabled() && len(p.ContentSHA256Hash) > 0 && !auth.IsStandardContentSHA256(p.ContentSHA256Hash) {
|
||||
contentHashBytes, err := hex.DecodeString(p.ContentSHA256Hash)
|
||||
if err != nil {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch)
|
||||
}
|
||||
if !bytes.Equal(contentHashBytes, hash) {
|
||||
err = n.objectDelete(ctx, bktInfo, id)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
|
||||
}
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch)
|
||||
}
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Debug(logs.UploadPart,
|
||||
zap.String("multipart upload", p.Info.UploadID), zap.Int("part number", p.PartNumber),
|
||||
zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
|
||||
|
@ -376,7 +392,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
md5Hash := md5.New()
|
||||
for i, part := range p.Parts {
|
||||
partInfo := partsInfo[part.PartNumber]
|
||||
if partInfo == nil || (part.ETag != partInfo.ETag && part.ETag != partInfo.MD5) {
|
||||
if partInfo == nil || data.UnQuote(part.ETag) != partInfo.GetETag(n.features.MD5Enabled()) {
|
||||
return nil, nil, fmt.Errorf("%w: unknown part %d or etag mismatched", s3errors.GetAPIError(s3errors.ErrInvalidPart), part.PartNumber)
|
||||
}
|
||||
delete(partsInfo, part.PartNumber)
|
||||
|
@ -473,7 +489,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
n.cache.DeleteObject(addr)
|
||||
}
|
||||
|
||||
return uploadData, extObjInfo, n.treeService.DeleteMultipartUpload(ctx, p.Info.Bkt, multipartInfo.ID)
|
||||
return uploadData, extObjInfo, n.treeService.DeleteMultipartUpload(ctx, p.Info.Bkt, multipartInfo)
|
||||
}
|
||||
|
||||
func (n *layer) ListMultipartUploads(ctx context.Context, p *ListMultipartUploadsParams) (*ListMultipartUploadsInfo, error) {
|
||||
|
@ -549,7 +565,7 @@ func (n *layer) AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) e
|
|||
}
|
||||
}
|
||||
|
||||
return n.treeService.DeleteMultipartUpload(ctx, p.Bkt, multipartInfo.ID)
|
||||
return n.treeService.DeleteMultipartUpload(ctx, p.Bkt, multipartInfo)
|
||||
}
|
||||
|
||||
func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsInfo, error) {
|
||||
|
@ -571,7 +587,7 @@ func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
|
|||
|
||||
for _, partInfo := range partsInfo {
|
||||
parts = append(parts, &Part{
|
||||
ETag: partInfo.ETag,
|
||||
ETag: data.Quote(partInfo.GetETag(n.features.MD5Enabled())),
|
||||
LastModified: partInfo.Created.UTC().Format(time.RFC3339),
|
||||
PartNumber: partInfo.Number,
|
||||
Size: partInfo.Size,
|
||||
|
|
|
@ -13,13 +13,11 @@ import (
|
|||
"io"
|
||||
"mime"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
|
@ -28,7 +26,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/minio/sio"
|
||||
"github.com/panjf2000/ants/v2"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -49,38 +46,6 @@ type (
|
|||
bktInfo *data.BucketInfo
|
||||
}
|
||||
|
||||
// ListObjectsParamsCommon contains common parameters for ListObjectsV1 and ListObjectsV2.
|
||||
ListObjectsParamsCommon struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Delimiter string
|
||||
Encode string
|
||||
MaxKeys int
|
||||
Prefix string
|
||||
}
|
||||
|
||||
// ListObjectsParamsV1 contains params for ListObjectsV1.
|
||||
ListObjectsParamsV1 struct {
|
||||
ListObjectsParamsCommon
|
||||
Marker string
|
||||
}
|
||||
|
||||
// ListObjectsParamsV2 contains params for ListObjectsV2.
|
||||
ListObjectsParamsV2 struct {
|
||||
ListObjectsParamsCommon
|
||||
ContinuationToken string
|
||||
StartAfter string
|
||||
FetchOwner bool
|
||||
}
|
||||
|
||||
allObjectParams struct {
|
||||
Bucket *data.BucketInfo
|
||||
Delimiter string
|
||||
Prefix string
|
||||
MaxKeys int
|
||||
Marker string
|
||||
ContinuationToken string
|
||||
}
|
||||
|
||||
DeleteMarkerError struct {
|
||||
ErrorCode apiErrors.ErrorCode
|
||||
}
|
||||
|
@ -316,14 +281,30 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
}
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
||||
if !p.Encryption.Enabled() && len(p.ContentSHA256Hash) > 0 && !auth.IsStandardContentSHA256(p.ContentSHA256Hash) {
|
||||
contentHashBytes, err := hex.DecodeString(p.ContentSHA256Hash)
|
||||
if err != nil {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
|
||||
}
|
||||
if !bytes.Equal(contentHashBytes, hash) {
|
||||
err = n.objectDelete(ctx, p.BktInfo, id)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
||||
}
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
|
||||
}
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
||||
now := TimeNow(ctx)
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
OID: id,
|
||||
ETag: hex.EncodeToString(hash),
|
||||
FilePath: p.Object,
|
||||
Size: size,
|
||||
Size: p.Size,
|
||||
Created: &now,
|
||||
Owner: &n.gateOwner,
|
||||
},
|
||||
IsUnversioned: !bktSettings.VersioningEnabled(),
|
||||
IsCombined: p.Header[MultipartObjectSize] != "",
|
||||
|
@ -396,7 +377,7 @@ func (n *layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.Bucke
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if node.IsDeleteMarker() {
|
||||
if node.IsDeleteMarker {
|
||||
return nil, DeleteMarkerError{ErrorCode: apiErrors.ErrNoSuchKey}
|
||||
}
|
||||
|
||||
|
@ -453,7 +434,7 @@ func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
|||
return extObjInfo, nil
|
||||
}
|
||||
|
||||
if foundVersion.IsDeleteMarker() {
|
||||
if foundVersion.IsDeleteMarker {
|
||||
return nil, DeleteMarkerError{ErrorCode: apiErrors.ErrMethodNotAllowed}
|
||||
}
|
||||
|
||||
|
@ -517,61 +498,6 @@ func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktIn
|
|||
return size, id, hash.Sum(nil), md5Hash.Sum(nil), nil
|
||||
}
|
||||
|
||||
// ListObjectsV1 returns objects in a bucket for requests of Version 1.
|
||||
func (n *layer) ListObjectsV1(ctx context.Context, p *ListObjectsParamsV1) (*ListObjectsInfoV1, error) {
|
||||
var result ListObjectsInfoV1
|
||||
|
||||
prm := allObjectParams{
|
||||
Bucket: p.BktInfo,
|
||||
Delimiter: p.Delimiter,
|
||||
Prefix: p.Prefix,
|
||||
MaxKeys: p.MaxKeys,
|
||||
Marker: p.Marker,
|
||||
}
|
||||
|
||||
objects, next, err := n.getLatestObjectsVersions(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if next != nil {
|
||||
result.IsTruncated = true
|
||||
result.NextMarker = objects[len(objects)-1].Name
|
||||
}
|
||||
|
||||
result.Prefixes, result.Objects = triageObjects(objects)
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// ListObjectsV2 returns objects in a bucket for requests of Version 2.
|
||||
func (n *layer) ListObjectsV2(ctx context.Context, p *ListObjectsParamsV2) (*ListObjectsInfoV2, error) {
|
||||
var result ListObjectsInfoV2
|
||||
|
||||
prm := allObjectParams{
|
||||
Bucket: p.BktInfo,
|
||||
Delimiter: p.Delimiter,
|
||||
Prefix: p.Prefix,
|
||||
MaxKeys: p.MaxKeys,
|
||||
Marker: p.StartAfter,
|
||||
ContinuationToken: p.ContinuationToken,
|
||||
}
|
||||
|
||||
objects, next, err := n.getLatestObjectsVersions(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if next != nil {
|
||||
result.IsTruncated = true
|
||||
result.NextContinuationToken = next.ID.EncodeToString()
|
||||
}
|
||||
|
||||
result.Prefixes, result.Objects = triageObjects(objects)
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
type logWrapper struct {
|
||||
log *zap.Logger
|
||||
}
|
||||
|
@ -580,310 +506,11 @@ func (l *logWrapper) Printf(format string, args ...interface{}) {
|
|||
l.log.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (n *layer) getLatestObjectsVersions(ctx context.Context, p allObjectParams) (objects []*data.ObjectInfo, next *data.ObjectInfo, err error) {
|
||||
if p.MaxKeys == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
owner := n.BearerOwner(ctx)
|
||||
cacheKey := cache.CreateObjectsListCacheKey(p.Bucket.CID, p.Prefix, true)
|
||||
nodeVersions := n.cache.GetList(owner, cacheKey)
|
||||
|
||||
if nodeVersions == nil {
|
||||
nodeVersions, err = n.treeService.GetLatestVersionsByPrefix(ctx, p.Bucket, p.Prefix)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
n.cache.PutList(owner, cacheKey, nodeVersions)
|
||||
}
|
||||
|
||||
if len(nodeVersions) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
sort.Slice(nodeVersions, func(i, j int) bool {
|
||||
return nodeVersions[i].FilePath < nodeVersions[j].FilePath
|
||||
})
|
||||
|
||||
poolCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
objOutCh, err := n.initWorkerPool(poolCtx, 2, p, nodesGenerator(poolCtx, p, nodeVersions))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to init worker pool: %w", err)
|
||||
}
|
||||
|
||||
objects = make([]*data.ObjectInfo, 0, p.MaxKeys)
|
||||
|
||||
for obj := range objOutCh {
|
||||
objects = append(objects, obj)
|
||||
}
|
||||
|
||||
sort.Slice(objects, func(i, j int) bool {
|
||||
return objects[i].Name < objects[j].Name
|
||||
})
|
||||
|
||||
if len(objects) > p.MaxKeys {
|
||||
next = objects[p.MaxKeys]
|
||||
objects = objects[:p.MaxKeys]
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func nodesGenerator(ctx context.Context, p allObjectParams, nodeVersions []*data.NodeVersion) <-chan *data.NodeVersion {
|
||||
nodeCh := make(chan *data.NodeVersion)
|
||||
existed := make(map[string]struct{}, len(nodeVersions)) // to squash the same directories
|
||||
|
||||
go func() {
|
||||
var generated int
|
||||
LOOP:
|
||||
for _, node := range nodeVersions {
|
||||
if shouldSkip(node, p, existed) {
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break LOOP
|
||||
case nodeCh <- node:
|
||||
generated++
|
||||
if generated == p.MaxKeys+1 { // we use maxKeys+1 to be able to know nextMarker/nextContinuationToken
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
close(nodeCh)
|
||||
}()
|
||||
|
||||
return nodeCh
|
||||
}
|
||||
|
||||
func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams, input <-chan *data.NodeVersion) (<-chan *data.ObjectInfo, error) {
|
||||
reqLog := n.reqLogger(ctx)
|
||||
pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{reqLog}))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("coudln't init go pool for listing: %w", err)
|
||||
}
|
||||
objCh := make(chan *data.ObjectInfo)
|
||||
|
||||
go func() {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
LOOP:
|
||||
for node := range input {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break LOOP
|
||||
default:
|
||||
}
|
||||
|
||||
// We have to make a copy of pointer to data.NodeVersion
|
||||
// to get correct value in submitted task function.
|
||||
func(node *data.NodeVersion) {
|
||||
wg.Add(1)
|
||||
err = pool.Submit(func() {
|
||||
defer wg.Done()
|
||||
oi := n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter)
|
||||
if oi == nil {
|
||||
// try to get object again
|
||||
if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter); oi == nil {
|
||||
// do not process object which are definitely missing in object service
|
||||
return
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case objCh <- oi:
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
wg.Done()
|
||||
reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err))
|
||||
}
|
||||
}(node)
|
||||
}
|
||||
wg.Wait()
|
||||
close(objCh)
|
||||
pool.Release()
|
||||
}()
|
||||
|
||||
return objCh, nil
|
||||
}
|
||||
|
||||
func (n *layer) bucketNodeVersions(ctx context.Context, bkt *data.BucketInfo, prefix string) ([]*data.NodeVersion, error) {
|
||||
var err error
|
||||
|
||||
owner := n.BearerOwner(ctx)
|
||||
cacheKey := cache.CreateObjectsListCacheKey(bkt.CID, prefix, false)
|
||||
nodeVersions := n.cache.GetList(owner, cacheKey)
|
||||
|
||||
if nodeVersions == nil {
|
||||
nodeVersions, err = n.treeService.GetAllVersionsByPrefix(ctx, bkt, prefix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get all versions from tree service: %w", err)
|
||||
}
|
||||
|
||||
n.cache.PutList(owner, cacheKey, nodeVersions)
|
||||
}
|
||||
|
||||
return nodeVersions, nil
|
||||
}
|
||||
|
||||
func (n *layer) getAllObjectsVersions(ctx context.Context, bkt *data.BucketInfo, prefix, delimiter string) (map[string][]*data.ExtendedObjectInfo, error) {
|
||||
nodeVersions, err := n.bucketNodeVersions(ctx, bkt, prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
versions := make(map[string][]*data.ExtendedObjectInfo, len(nodeVersions))
|
||||
|
||||
for _, nodeVersion := range nodeVersions {
|
||||
oi := &data.ObjectInfo{}
|
||||
|
||||
if nodeVersion.IsDeleteMarker() { // delete marker does not match any object in FrostFS
|
||||
oi.ID = nodeVersion.OID
|
||||
oi.Name = nodeVersion.FilePath
|
||||
oi.Owner = nodeVersion.DeleteMarker.Owner
|
||||
oi.Created = nodeVersion.DeleteMarker.Created
|
||||
oi.IsDeleteMarker = true
|
||||
} else {
|
||||
if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, bkt, nodeVersion, prefix, delimiter); oi == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
eoi := &data.ExtendedObjectInfo{
|
||||
ObjectInfo: oi,
|
||||
NodeVersion: nodeVersion,
|
||||
}
|
||||
|
||||
objVersions, ok := versions[oi.Name]
|
||||
if !ok {
|
||||
objVersions = []*data.ExtendedObjectInfo{eoi}
|
||||
} else if !oi.IsDir {
|
||||
objVersions = append(objVersions, eoi)
|
||||
}
|
||||
versions[oi.Name] = objVersions
|
||||
}
|
||||
|
||||
return versions, nil
|
||||
}
|
||||
|
||||
func IsSystemHeader(key string) bool {
|
||||
_, ok := api.SystemMetadata[key]
|
||||
return ok || strings.HasPrefix(key, api.FrostFSSystemMetadataPrefix)
|
||||
}
|
||||
|
||||
func shouldSkip(node *data.NodeVersion, p allObjectParams, existed map[string]struct{}) bool {
|
||||
if node.IsDeleteMarker() {
|
||||
return true
|
||||
}
|
||||
|
||||
filePath := node.FilePath
|
||||
if dirName := tryDirectoryName(node, p.Prefix, p.Delimiter); len(dirName) != 0 {
|
||||
filePath = dirName
|
||||
}
|
||||
if _, ok := existed[filePath]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
if filePath <= p.Marker {
|
||||
return true
|
||||
}
|
||||
|
||||
if p.ContinuationToken != "" {
|
||||
if _, ok := existed[continuationToken]; !ok {
|
||||
if p.ContinuationToken != node.OID.EncodeToString() {
|
||||
return true
|
||||
}
|
||||
existed[continuationToken] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
existed[filePath] = struct{}{}
|
||||
return false
|
||||
}
|
||||
|
||||
func triageObjects(allObjects []*data.ObjectInfo) (prefixes []string, objects []*data.ObjectInfo) {
|
||||
for _, ov := range allObjects {
|
||||
if ov.IsDir {
|
||||
prefixes = append(prefixes, ov.Name)
|
||||
} else {
|
||||
objects = append(objects, ov)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func triageExtendedObjects(allObjects []*data.ExtendedObjectInfo) (prefixes []string, objects []*data.ExtendedObjectInfo) {
|
||||
for _, ov := range allObjects {
|
||||
if ov.ObjectInfo.IsDir {
|
||||
prefixes = append(prefixes, ov.ObjectInfo.Name)
|
||||
} else {
|
||||
objects = append(objects, ov)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (n *layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo *data.BucketInfo, node *data.NodeVersion, prefix, delimiter string) (oi *data.ObjectInfo) {
|
||||
if oiDir := tryDirectory(bktInfo, node, prefix, delimiter); oiDir != nil {
|
||||
return oiDir
|
||||
}
|
||||
|
||||
owner := n.BearerOwner(ctx)
|
||||
if extInfo := n.cache.GetObject(owner, newAddress(bktInfo.CID, node.OID)); extInfo != nil {
|
||||
return extInfo.ObjectInfo
|
||||
}
|
||||
|
||||
meta, err := n.objectHead(ctx, bktInfo, node.OID)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.CouldNotFetchObjectMeta, zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
|
||||
oi = objectInfoFromMeta(bktInfo, meta)
|
||||
oi.MD5Sum = node.MD5
|
||||
n.cache.PutObject(owner, &data.ExtendedObjectInfo{ObjectInfo: oi, NodeVersion: node})
|
||||
|
||||
return oi
|
||||
}
|
||||
|
||||
func tryDirectory(bktInfo *data.BucketInfo, node *data.NodeVersion, prefix, delimiter string) *data.ObjectInfo {
|
||||
dirName := tryDirectoryName(node, prefix, delimiter)
|
||||
if len(dirName) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &data.ObjectInfo{
|
||||
ID: node.OID, // to use it as continuation token
|
||||
CID: bktInfo.CID,
|
||||
IsDir: true,
|
||||
IsDeleteMarker: node.IsDeleteMarker(),
|
||||
Bucket: bktInfo.Name,
|
||||
Name: dirName,
|
||||
}
|
||||
}
|
||||
|
||||
// tryDirectoryName forms directory name by prefix and delimiter.
|
||||
// If node isn't a directory empty string is returned.
|
||||
// This function doesn't check if node has a prefix. It must do a caller.
|
||||
func tryDirectoryName(node *data.NodeVersion, prefix, delimiter string) string {
|
||||
if len(delimiter) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
tail := strings.TrimPrefix(node.FilePath, prefix)
|
||||
index := strings.Index(tail, delimiter)
|
||||
if index >= 0 {
|
||||
return prefix + tail[:index+1]
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func wrapReader(input io.Reader, bufSize int, f func(buf []byte)) io.Reader {
|
||||
if input == nil {
|
||||
return nil
|
||||
|
|
|
@ -174,13 +174,13 @@ func (n *layer) getNodeVersion(ctx context.Context, objVersion *ObjectVersion) (
|
|||
}
|
||||
}
|
||||
|
||||
if err == nil && version.IsDeleteMarker() && !objVersion.NoErrorOnDeleteMarker {
|
||||
if err == nil && version.IsDeleteMarker && !objVersion.NoErrorOnDeleteMarker {
|
||||
return nil, fmt.Errorf("%w: found version is delete marker", s3errors.GetAPIError(s3errors.ErrNoSuchKey))
|
||||
} else if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
|
||||
if err == nil && version != nil && !version.IsDeleteMarker() {
|
||||
if err == nil && version != nil && !version.IsDeleteMarker {
|
||||
n.reqLogger(ctx).Debug(logs.GetTreeNode,
|
||||
zap.Stringer("cid", objVersion.BktInfo.CID), zap.Stringer("oid", version.OID))
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package layer
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
|
@ -10,6 +11,21 @@ import (
|
|||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
type VersionsByPrefixStreamMock struct {
|
||||
result []*data.NodeVersion
|
||||
offset int
|
||||
}
|
||||
|
||||
func (s *VersionsByPrefixStreamMock) Next(context.Context) (*data.NodeVersion, error) {
|
||||
if s.offset > len(s.result)-1 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
res := s.result[s.offset]
|
||||
s.offset++
|
||||
return res, nil
|
||||
}
|
||||
|
||||
type TreeServiceMock struct {
|
||||
settings map[string]*data.BucketSettings
|
||||
versions map[string]map[string][]*data.NodeVersion
|
||||
|
@ -49,14 +65,8 @@ func (t *TreeServiceMock) PutObjectTagging(_ context.Context, bktInfo *data.Buck
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) DeleteObjectTagging(_ context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) error {
|
||||
cnrTagsMap, ok := t.tags[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
delete(cnrTagsMap, objVersion.ID)
|
||||
return nil
|
||||
func (t *TreeServiceMock) DeleteObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) error {
|
||||
return t.PutObjectTagging(ctx, bktInfo, objVersion, nil)
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetBucketTagging(context.Context, *data.BucketInfo) (map[string]string, error) {
|
||||
|
@ -177,7 +187,7 @@ func (t *TreeServiceMock) GetLatestVersion(_ context.Context, bktInfo *data.Buck
|
|||
return nil, ErrNodeNotFound
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetLatestVersionsByPrefix(_ context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.NodeVersion, error) {
|
||||
func (t *TreeServiceMock) InitVersionsByPrefixStream(_ context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) (data.VersionsStream, error) {
|
||||
cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, ErrNodeNotFound
|
||||
|
@ -190,6 +200,11 @@ func (t *TreeServiceMock) GetLatestVersionsByPrefix(_ context.Context, bktInfo *
|
|||
continue
|
||||
}
|
||||
|
||||
if !latestOnly {
|
||||
result = append(result, versions...)
|
||||
continue
|
||||
}
|
||||
|
||||
sort.Slice(versions, func(i, j int) bool {
|
||||
return versions[i].ID < versions[j].ID
|
||||
})
|
||||
|
@ -199,7 +214,9 @@ func (t *TreeServiceMock) GetLatestVersionsByPrefix(_ context.Context, bktInfo *
|
|||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
return &VersionsByPrefixStreamMock{
|
||||
result: result,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetUnversioned(_ context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error) {
|
||||
|
@ -383,7 +400,7 @@ LOOP:
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) DeleteMultipartUpload(_ context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) error {
|
||||
func (t *TreeServiceMock) DeleteMultipartUpload(_ context.Context, bktInfo *data.BucketInfo, multipartInfo *data.MultipartInfo) error {
|
||||
cnrMultipartsMap := t.multiparts[bktInfo.CID.EncodeToString()]
|
||||
|
||||
var uploadID string
|
||||
|
@ -391,7 +408,7 @@ func (t *TreeServiceMock) DeleteMultipartUpload(_ context.Context, bktInfo *data
|
|||
LOOP:
|
||||
for key, multiparts := range cnrMultipartsMap {
|
||||
for i, multipart := range multiparts {
|
||||
if multipart.ID == multipartNodeID {
|
||||
if multipart.ID == multipartInfo.ID {
|
||||
uploadID = multipart.UploadID
|
||||
cnrMultipartsMap[key] = append(multiparts[:i], multiparts[i+1:]...)
|
||||
break LOOP
|
||||
|
|
|
@ -54,8 +54,7 @@ type TreeService interface {
|
|||
|
||||
GetVersions(ctx context.Context, bktInfo *data.BucketInfo, objectName string) ([]*data.NodeVersion, error)
|
||||
GetLatestVersion(ctx context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error)
|
||||
GetLatestVersionsByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.NodeVersion, error)
|
||||
GetAllVersionsByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.NodeVersion, error)
|
||||
InitVersionsByPrefixStream(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) (data.VersionsStream, error)
|
||||
GetUnversioned(ctx context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error)
|
||||
AddVersion(ctx context.Context, bktInfo *data.BucketInfo, newVersion *data.NodeVersion) (uint64, error)
|
||||
RemoveVersion(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64) error
|
||||
|
@ -64,7 +63,7 @@ type TreeService interface {
|
|||
GetLock(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64) (*data.LockInfo, error)
|
||||
|
||||
CreateMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, info *data.MultipartInfo) error
|
||||
DeleteMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) error
|
||||
DeleteMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, info *data.MultipartInfo) error
|
||||
GetMultipartUploadsByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.MultipartInfo, error)
|
||||
GetMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error)
|
||||
|
||||
|
|
|
@ -13,39 +13,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
)
|
||||
|
||||
type (
|
||||
// ListObjectsInfo contains common fields of data for ListObjectsV1 and ListObjectsV2.
|
||||
ListObjectsInfo struct {
|
||||
Prefixes []string
|
||||
Objects []*data.ObjectInfo
|
||||
IsTruncated bool
|
||||
}
|
||||
|
||||
// ListObjectsInfoV1 holds data which ListObjectsV1 returns.
|
||||
ListObjectsInfoV1 struct {
|
||||
ListObjectsInfo
|
||||
NextMarker string
|
||||
}
|
||||
|
||||
// ListObjectsInfoV2 holds data which ListObjectsV2 returns.
|
||||
ListObjectsInfoV2 struct {
|
||||
ListObjectsInfo
|
||||
NextContinuationToken string
|
||||
}
|
||||
|
||||
// ListObjectVersionsInfo stores info and list of objects versions.
|
||||
ListObjectVersionsInfo struct {
|
||||
CommonPrefixes []string
|
||||
IsTruncated bool
|
||||
KeyMarker string
|
||||
NextKeyMarker string
|
||||
NextVersionIDMarker string
|
||||
Version []*data.ExtendedObjectInfo
|
||||
DeleteMarker []*data.ExtendedObjectInfo
|
||||
VersionIDMarker string
|
||||
}
|
||||
)
|
||||
|
||||
// PathSeparator is a path components separator string.
|
||||
const PathSeparator = string(os.PathSeparator)
|
||||
|
||||
|
@ -83,7 +50,6 @@ func objectInfoFromMeta(bkt *data.BucketInfo, meta *object.Object) *data.ObjectI
|
|||
return &data.ObjectInfo{
|
||||
ID: objID,
|
||||
CID: bkt.CID,
|
||||
IsDir: false,
|
||||
|
||||
Bucket: bkt.Name,
|
||||
Name: filepathFromObject(meta),
|
||||
|
|
|
@ -1,51 +1,13 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultTestCreated = time.Now()
|
||||
defaultTestPayload = []byte("test object payload")
|
||||
defaultTestPayloadLength = uint64(len(defaultTestPayload))
|
||||
defaultTestContentType = http.DetectContentType(defaultTestPayload)
|
||||
)
|
||||
|
||||
func newTestInfo(obj oid.ID, bkt *data.BucketInfo, name string, isDir bool) *data.ObjectInfo {
|
||||
var hashSum checksum.Checksum
|
||||
info := &data.ObjectInfo{
|
||||
ID: obj,
|
||||
Name: name,
|
||||
Bucket: bkt.Name,
|
||||
CID: bkt.CID,
|
||||
Size: defaultTestPayloadLength,
|
||||
ContentType: defaultTestContentType,
|
||||
Created: time.Unix(defaultTestCreated.Unix(), 0),
|
||||
Owner: bkt.Owner,
|
||||
Headers: make(map[string]string),
|
||||
HashSum: hex.EncodeToString(hashSum.Value()),
|
||||
}
|
||||
|
||||
if isDir {
|
||||
info.IsDir = true
|
||||
info.Size = 0
|
||||
info.ContentType = ""
|
||||
info.Headers = nil
|
||||
}
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
func newTestNodeVersion(id oid.ID, name string) *data.NodeVersion {
|
||||
return &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
|
@ -56,98 +18,84 @@ func newTestNodeVersion(id oid.ID, name string) *data.NodeVersion {
|
|||
}
|
||||
|
||||
func TestTryDirectory(t *testing.T) {
|
||||
var uid user.ID
|
||||
var id oid.ID
|
||||
var containerID cid.ID
|
||||
|
||||
bkt := &data.BucketInfo{
|
||||
Name: "test-container",
|
||||
CID: containerID,
|
||||
Owner: uid,
|
||||
Created: time.Now(),
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
prefix string
|
||||
result *data.ObjectInfo
|
||||
result string
|
||||
node *data.NodeVersion
|
||||
delimiter string
|
||||
}{
|
||||
{
|
||||
name: "small.jpg",
|
||||
result: nil,
|
||||
result: "",
|
||||
node: newTestNodeVersion(id, "small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "small.jpg not matched prefix",
|
||||
prefix: "big",
|
||||
result: nil,
|
||||
result: "",
|
||||
node: newTestNodeVersion(id, "small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "small.jpg delimiter",
|
||||
delimiter: "/",
|
||||
result: nil,
|
||||
result: "",
|
||||
node: newTestNodeVersion(id, "small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "test/small.jpg",
|
||||
result: nil,
|
||||
result: "",
|
||||
node: newTestNodeVersion(id, "test/small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "test/small.jpg with prefix and delimiter",
|
||||
prefix: "test/",
|
||||
delimiter: "/",
|
||||
result: nil,
|
||||
result: "",
|
||||
node: newTestNodeVersion(id, "test/small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "a/b/small.jpg",
|
||||
prefix: "a",
|
||||
result: nil,
|
||||
result: "",
|
||||
node: newTestNodeVersion(id, "a/b/small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "a/b/small.jpg",
|
||||
prefix: "a/",
|
||||
delimiter: "/",
|
||||
result: newTestInfo(id, bkt, "a/b/", true),
|
||||
result: "a/b/",
|
||||
node: newTestNodeVersion(id, "a/b/small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "a/b/c/small.jpg",
|
||||
prefix: "a/",
|
||||
delimiter: "/",
|
||||
result: newTestInfo(id, bkt, "a/b/", true),
|
||||
result: "a/b/",
|
||||
node: newTestNodeVersion(id, "a/b/c/small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "a/b/c/small.jpg",
|
||||
prefix: "a/b/c/s",
|
||||
delimiter: "/",
|
||||
result: nil,
|
||||
result: "",
|
||||
node: newTestNodeVersion(id, "a/b/c/small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "a/b/c/big.jpg",
|
||||
prefix: "a/b/",
|
||||
delimiter: "/",
|
||||
result: newTestInfo(id, bkt, "a/b/c/", true),
|
||||
result: "a/b/c/",
|
||||
node: newTestNodeVersion(id, "a/b/c/big.jpg"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
info := tryDirectory(bkt, tc.node, tc.prefix, tc.delimiter)
|
||||
if tc.result != nil {
|
||||
tc.result.Created = time.Time{}
|
||||
tc.result.Owner = user.ID{}
|
||||
}
|
||||
|
||||
require.Equal(t, tc.result, info)
|
||||
dirName := tryDirectoryName(tc.node, tc.prefix, tc.delimiter)
|
||||
require.Equal(t, tc.result, dirName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,109 +0,0 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
)
|
||||
|
||||
func (n *layer) ListObjectVersions(ctx context.Context, p *ListObjectVersionsParams) (*ListObjectVersionsInfo, error) {
|
||||
var (
|
||||
allObjects = make([]*data.ExtendedObjectInfo, 0, p.MaxKeys)
|
||||
res = &ListObjectVersionsInfo{}
|
||||
)
|
||||
|
||||
versions, err := n.getAllObjectsVersions(ctx, p.BktInfo, p.Prefix, p.Delimiter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sortedNames := make([]string, 0, len(versions))
|
||||
for k := range versions {
|
||||
sortedNames = append(sortedNames, k)
|
||||
}
|
||||
sort.Strings(sortedNames)
|
||||
|
||||
for _, name := range sortedNames {
|
||||
sortedVersions := versions[name]
|
||||
sort.Slice(sortedVersions, func(i, j int) bool {
|
||||
return sortedVersions[j].NodeVersion.Timestamp < sortedVersions[i].NodeVersion.Timestamp // sort in reverse order
|
||||
})
|
||||
|
||||
for i, version := range sortedVersions {
|
||||
version.IsLatest = i == 0
|
||||
allObjects = append(allObjects, version)
|
||||
}
|
||||
}
|
||||
|
||||
if allObjects, err = filterVersionsByMarker(allObjects, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res.CommonPrefixes, allObjects = triageExtendedObjects(allObjects)
|
||||
|
||||
if len(allObjects) > p.MaxKeys {
|
||||
res.IsTruncated = true
|
||||
res.NextKeyMarker = allObjects[p.MaxKeys-1].ObjectInfo.Name
|
||||
res.NextVersionIDMarker = allObjects[p.MaxKeys-1].ObjectInfo.VersionID()
|
||||
|
||||
allObjects = allObjects[:p.MaxKeys]
|
||||
res.KeyMarker = p.KeyMarker
|
||||
res.VersionIDMarker = p.VersionIDMarker
|
||||
}
|
||||
|
||||
res.Version, res.DeleteMarker = triageVersions(allObjects)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func filterVersionsByMarker(objects []*data.ExtendedObjectInfo, p *ListObjectVersionsParams) ([]*data.ExtendedObjectInfo, error) {
|
||||
if p.KeyMarker == "" {
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
for i, obj := range objects {
|
||||
if obj.ObjectInfo.Name == p.KeyMarker {
|
||||
for j := i; j < len(objects); j++ {
|
||||
if objects[j].ObjectInfo.Name != obj.ObjectInfo.Name {
|
||||
if p.VersionIDMarker == "" {
|
||||
return objects[j:], nil
|
||||
}
|
||||
break
|
||||
}
|
||||
if objects[j].ObjectInfo.VersionID() == p.VersionIDMarker {
|
||||
return objects[j+1:], nil
|
||||
}
|
||||
}
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidVersion)
|
||||
} else if obj.ObjectInfo.Name > p.KeyMarker {
|
||||
if p.VersionIDMarker != "" {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidVersion)
|
||||
}
|
||||
return objects[i:], nil
|
||||
}
|
||||
}
|
||||
|
||||
// don't use nil as empty slice to be consistent with `return objects[j+1:], nil` above
|
||||
// that can be empty
|
||||
return []*data.ExtendedObjectInfo{}, nil
|
||||
}
|
||||
|
||||
func triageVersions(objVersions []*data.ExtendedObjectInfo) ([]*data.ExtendedObjectInfo, []*data.ExtendedObjectInfo) {
|
||||
if len(objVersions) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var resVersion []*data.ExtendedObjectInfo
|
||||
var resDelMarkVersions []*data.ExtendedObjectInfo
|
||||
|
||||
for _, version := range objVersions {
|
||||
if version.NodeVersion.IsDeleteMarker() {
|
||||
resDelMarkVersions = append(resDelMarkVersions, version)
|
||||
} else {
|
||||
resVersion = append(resVersion, version)
|
||||
}
|
||||
}
|
||||
|
||||
return resVersion, resDelMarkVersions
|
||||
}
|
|
@ -72,7 +72,7 @@ func (tc *testContext) deleteObject(objectName, versionID string, settings *data
|
|||
}
|
||||
}
|
||||
|
||||
func (tc *testContext) listObjectsV1() []*data.ObjectInfo {
|
||||
func (tc *testContext) listObjectsV1() []*data.ExtendedNodeVersion {
|
||||
res, err := tc.layer.ListObjectsV1(tc.ctx, &ListObjectsParamsV1{
|
||||
ListObjectsParamsCommon: ListObjectsParamsCommon{
|
||||
BktInfo: tc.bktInfo,
|
||||
|
@ -83,7 +83,7 @@ func (tc *testContext) listObjectsV1() []*data.ObjectInfo {
|
|||
return res.Objects
|
||||
}
|
||||
|
||||
func (tc *testContext) listObjectsV2() []*data.ObjectInfo {
|
||||
func (tc *testContext) listObjectsV2() []*data.ExtendedNodeVersion {
|
||||
res, err := tc.layer.ListObjectsV2(tc.ctx, &ListObjectsParamsV2{
|
||||
ListObjectsParamsCommon: ListObjectsParamsCommon{
|
||||
BktInfo: tc.bktInfo,
|
||||
|
@ -168,10 +168,11 @@ func prepareContext(t *testing.T, cachesConfig ...*CachesConfig) *testContext {
|
|||
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
||||
|
||||
layerCfg := &Config{
|
||||
Caches: config,
|
||||
Cache: NewCache(config),
|
||||
AnonKey: AnonymousKey{Key: key},
|
||||
TreeService: NewTreeService(),
|
||||
Features: &FeatureSettingsMock{},
|
||||
GateOwner: owner,
|
||||
}
|
||||
|
||||
return &testContext{
|
||||
|
@ -288,9 +289,10 @@ func TestVersioningDeleteSpecificObjectVersion(t *testing.T) {
|
|||
tc.getObject(tc.obj, "", true)
|
||||
|
||||
versions := tc.listVersions()
|
||||
require.Len(t, versions.DeleteMarker, 1)
|
||||
for _, ver := range versions.DeleteMarker {
|
||||
if ver.IsLatest {
|
||||
tc.deleteObject(tc.obj, ver.ObjectInfo.VersionID(), settings)
|
||||
tc.deleteObject(tc.obj, ver.NodeVersion.OID.EncodeToString(), settings)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -322,112 +324,112 @@ func TestFilterVersionsByMarker(t *testing.T) {
|
|||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
objects []*data.ExtendedObjectInfo
|
||||
objects []*data.ExtendedNodeVersion
|
||||
params *ListObjectVersionsParams
|
||||
expected []*data.ExtendedObjectInfo
|
||||
expected []*data.ExtendedNodeVersion
|
||||
error bool
|
||||
}{
|
||||
{
|
||||
name: "missed key marker",
|
||||
objects: []*data.ExtendedObjectInfo{
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[0]}},
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[1]}},
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "", VersionIDMarker: "dummy"},
|
||||
expected: []*data.ExtendedObjectInfo{
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[0]}},
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[1]}},
|
||||
expected: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "last version id",
|
||||
objects: []*data.ExtendedObjectInfo{
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[0]}},
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[1]}},
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj0", VersionIDMarker: testOIDs[1].EncodeToString()},
|
||||
expected: []*data.ExtendedObjectInfo{},
|
||||
expected: []*data.ExtendedNodeVersion{},
|
||||
},
|
||||
{
|
||||
name: "same name, different versions",
|
||||
objects: []*data.ExtendedObjectInfo{
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[0]}},
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[1]}},
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj0", VersionIDMarker: testOIDs[0].EncodeToString()},
|
||||
expected: []*data.ExtendedObjectInfo{
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[1]}},
|
||||
expected: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "different name, different versions",
|
||||
objects: []*data.ExtendedObjectInfo{
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[0]}},
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj1", ID: testOIDs[1]}},
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[1]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj0", VersionIDMarker: testOIDs[0].EncodeToString()},
|
||||
expected: []*data.ExtendedObjectInfo{
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj1", ID: testOIDs[1]}},
|
||||
expected: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[1]}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "not matched name alphabetically less",
|
||||
objects: []*data.ExtendedObjectInfo{
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[0]}},
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj1", ID: testOIDs[1]}},
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[1]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj", VersionIDMarker: ""},
|
||||
expected: []*data.ExtendedObjectInfo{
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[0]}},
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj1", ID: testOIDs[1]}},
|
||||
expected: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[1]}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "not matched name alphabetically less with dummy version id",
|
||||
objects: []*data.ExtendedObjectInfo{
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[0]}},
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj", VersionIDMarker: "dummy"},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "not matched name alphabetically greater",
|
||||
objects: []*data.ExtendedObjectInfo{
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[0]}},
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj1", ID: testOIDs[1]}},
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[1]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj2", VersionIDMarker: testOIDs[2].EncodeToString()},
|
||||
expected: []*data.ExtendedObjectInfo{},
|
||||
expected: []*data.ExtendedNodeVersion{},
|
||||
},
|
||||
{
|
||||
name: "not found version id",
|
||||
objects: []*data.ExtendedObjectInfo{
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[0]}},
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[1]}},
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj1", ID: testOIDs[2]}},
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[2]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj0", VersionIDMarker: "dummy"},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "not found version id, obj last",
|
||||
objects: []*data.ExtendedObjectInfo{
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[0]}},
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[1]}},
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj0", VersionIDMarker: "dummy"},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "not found version id, obj last",
|
||||
objects: []*data.ExtendedObjectInfo{
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[0]}},
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj0", ID: testOIDs[1]}},
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj1", ID: testOIDs[2]}},
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[2]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj0", VersionIDMarker: ""},
|
||||
expected: []*data.ExtendedObjectInfo{
|
||||
{ObjectInfo: &data.ObjectInfo{Name: "obj1", ID: testOIDs[2]}},
|
||||
expected: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[2]}}},
|
||||
},
|
||||
},
|
||||
} {
|
||||
|
|
|
@ -1,21 +1,54 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"crypto/elliptic"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func Auth(center auth.Center, log *zap.Logger) Func {
|
||||
type (
|
||||
// Box contains access box and additional info.
|
||||
Box struct {
|
||||
AccessBox *accessbox.Box
|
||||
ClientTime time.Time
|
||||
AuthHeaders *AuthHeader
|
||||
}
|
||||
|
||||
// Center is a user authentication interface.
|
||||
Center interface {
|
||||
// Authenticate validate and authenticate request.
|
||||
// Must return ErrNoAuthorizationHeader if auth header is missed.
|
||||
Authenticate(request *http.Request) (*Box, error)
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
AuthHeader struct {
|
||||
AccessKeyID string
|
||||
Region string
|
||||
SignatureV4 string
|
||||
}
|
||||
)
|
||||
|
||||
// ErrNoAuthorizationHeader is returned for unauthenticated requests.
|
||||
var ErrNoAuthorizationHeader = stderrors.New("no authorization header")
|
||||
|
||||
func Auth(center Center, log *zap.Logger) Func {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
box, err := center.Authenticate(r)
|
||||
if err != nil {
|
||||
if err == auth.ErrNoAuthorizationHeader {
|
||||
if err == ErrNoAuthorizationHeader {
|
||||
reqLogOrDefault(ctx, log).Debug(logs.CouldntReceiveAccessBoxForGateKeyRandomKeyWillBeUsed)
|
||||
} else {
|
||||
reqLogOrDefault(ctx, log).Error(logs.FailedToPassAuthentication, zap.Error(err))
|
||||
|
@ -37,3 +70,45 @@ func Auth(center auth.Center, log *zap.Logger) Func {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
type FrostFSIDValidator interface {
|
||||
ValidatePublicKey(key *keys.PublicKey) error
|
||||
}
|
||||
|
||||
func FrostfsIDValidation(frostfsID FrostFSIDValidator, log *zap.Logger) Func {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
bd, err := GetBoxData(ctx)
|
||||
if err != nil || bd.Gate.BearerToken == nil {
|
||||
reqLogOrDefault(ctx, log).Debug(logs.AnonRequestSkipFrostfsIDValidation)
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateBearerToken(frostfsID, bd.Gate.BearerToken); err != nil {
|
||||
reqLogOrDefault(ctx, log).Error(logs.FrostfsIDValidationFailed, zap.Error(err))
|
||||
WriteErrorResponse(w, GetReqInfo(r.Context()), err)
|
||||
return
|
||||
}
|
||||
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func validateBearerToken(frostfsID FrostFSIDValidator, bt *bearer.Token) error {
|
||||
m := new(acl.BearerToken)
|
||||
bt.WriteToV2(m)
|
||||
|
||||
pk, err := keys.NewPublicKeyFromBytes(m.GetSignature().GetKey(), elliptic.P256())
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid bearer token public key: %w", err)
|
||||
}
|
||||
|
||||
if err = frostfsID.ValidatePublicKey(pk); err != nil {
|
||||
return fmt.Errorf("validation data user key failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
105
api/middleware/constants.go
Normal file
105
api/middleware/constants.go
Normal file
|
@ -0,0 +1,105 @@
|
|||
package middleware
|
||||
|
||||
const (
|
||||
ListBucketsOperation = "ListBuckets"
|
||||
|
||||
// bucket operations.
|
||||
|
||||
OptionsOperation = "Options"
|
||||
HeadBucketOperation = "HeadBucket"
|
||||
ListMultipartUploadsOperation = "ListMultipartUploads"
|
||||
GetBucketLocationOperation = "GetBucketLocation"
|
||||
GetBucketPolicyOperation = "GetBucketPolicy"
|
||||
GetBucketLifecycleOperation = "GetBucketLifecycle"
|
||||
GetBucketEncryptionOperation = "GetBucketEncryption"
|
||||
GetBucketCorsOperation = "GetBucketCors"
|
||||
GetBucketACLOperation = "GetBucketACL"
|
||||
GetBucketWebsiteOperation = "GetBucketWebsite"
|
||||
GetBucketAccelerateOperation = "GetBucketAccelerate"
|
||||
GetBucketRequestPaymentOperation = "GetBucketRequestPayment"
|
||||
GetBucketLoggingOperation = "GetBucketLogging"
|
||||
GetBucketReplicationOperation = "GetBucketReplication"
|
||||
GetBucketTaggingOperation = "GetBucketTagging"
|
||||
GetBucketObjectLockConfigOperation = "GetBucketObjectLockConfig"
|
||||
GetBucketVersioningOperation = "GetBucketVersioning"
|
||||
GetBucketNotificationOperation = "GetBucketNotification"
|
||||
ListenBucketNotificationOperation = "ListenBucketNotification"
|
||||
ListBucketObjectVersionsOperation = "ListBucketObjectVersions"
|
||||
ListObjectsV2MOperation = "ListObjectsV2M"
|
||||
ListObjectsV2Operation = "ListObjectsV2"
|
||||
ListObjectsV1Operation = "ListObjectsV1"
|
||||
PutBucketCorsOperation = "PutBucketCors"
|
||||
PutBucketACLOperation = "PutBucketACL"
|
||||
PutBucketLifecycleOperation = "PutBucketLifecycle"
|
||||
PutBucketEncryptionOperation = "PutBucketEncryption"
|
||||
PutBucketPolicyOperation = "PutBucketPolicy"
|
||||
PutBucketObjectLockConfigOperation = "PutBucketObjectLockConfig"
|
||||
PutBucketTaggingOperation = "PutBucketTagging"
|
||||
PutBucketVersioningOperation = "PutBucketVersioning"
|
||||
PutBucketNotificationOperation = "PutBucketNotification"
|
||||
CreateBucketOperation = "CreateBucket"
|
||||
DeleteMultipleObjectsOperation = "DeleteMultipleObjects"
|
||||
PostObjectOperation = "PostObject"
|
||||
DeleteBucketCorsOperation = "DeleteBucketCors"
|
||||
DeleteBucketWebsiteOperation = "DeleteBucketWebsite"
|
||||
DeleteBucketTaggingOperation = "DeleteBucketTagging"
|
||||
DeleteBucketPolicyOperation = "DeleteBucketPolicy"
|
||||
DeleteBucketLifecycleOperation = "DeleteBucketLifecycle"
|
||||
DeleteBucketEncryptionOperation = "DeleteBucketEncryption"
|
||||
DeleteBucketOperation = "DeleteBucket"
|
||||
|
||||
// object operations.
|
||||
|
||||
HeadObjectOperation = "HeadObject"
|
||||
ListPartsOperation = "ListParts"
|
||||
GetObjectACLOperation = "GetObjectACL"
|
||||
GetObjectTaggingOperation = "GetObjectTagging"
|
||||
GetObjectRetentionOperation = "GetObjectRetention"
|
||||
GetObjectLegalHoldOperation = "GetObjectLegalHold"
|
||||
GetObjectAttributesOperation = "GetObjectAttributes"
|
||||
GetObjectOperation = "GetObject"
|
||||
UploadPartCopyOperation = "UploadPartCopy"
|
||||
UploadPartOperation = "UploadPart"
|
||||
PutObjectACLOperation = "PutObjectACL"
|
||||
PutObjectTaggingOperation = "PutObjectTagging"
|
||||
CopyObjectOperation = "CopyObject"
|
||||
PutObjectRetentionOperation = "PutObjectRetention"
|
||||
PutObjectLegalHoldOperation = "PutObjectLegalHold"
|
||||
PutObjectOperation = "PutObject"
|
||||
CompleteMultipartUploadOperation = "CompleteMultipartUpload"
|
||||
CreateMultipartUploadOperation = "CreateMultipartUpload"
|
||||
SelectObjectContentOperation = "SelectObjectContent"
|
||||
AbortMultipartUploadOperation = "AbortMultipartUpload"
|
||||
DeleteObjectTaggingOperation = "DeleteObjectTagging"
|
||||
DeleteObjectOperation = "DeleteObject"
|
||||
)
|
||||
|
||||
const (
|
||||
UploadsQuery = "uploads"
|
||||
LocationQuery = "location"
|
||||
PolicyQuery = "policy"
|
||||
LifecycleQuery = "lifecycle"
|
||||
EncryptionQuery = "encryption"
|
||||
CorsQuery = "cors"
|
||||
ACLQuery = "acl"
|
||||
WebsiteQuery = "website"
|
||||
AccelerateQuery = "accelerate"
|
||||
RequestPaymentQuery = "requestPayment"
|
||||
LoggingQuery = "logging"
|
||||
ReplicationQuery = "replication"
|
||||
TaggingQuery = "tagging"
|
||||
ObjectLockQuery = "object-lock"
|
||||
VersioningQuery = "versioning"
|
||||
NotificationQuery = "notification"
|
||||
EventsQuery = "events"
|
||||
VersionsQuery = "versions"
|
||||
ListTypeQuery = "list-type"
|
||||
MetadataQuery = "metadata"
|
||||
DeleteQuery = "delete"
|
||||
UploadIDQuery = "uploadId"
|
||||
RetentionQuery = "retention"
|
||||
LegalQuery = "legal"
|
||||
AttributesQuery = "attributes"
|
||||
PartNumberQuery = "partNumber"
|
||||
LegalHoldQuery = "legal-hold"
|
||||
)
|
|
@ -17,10 +17,6 @@ import (
|
|||
)
|
||||
|
||||
type (
|
||||
UsersStat interface {
|
||||
Update(user, bucket, cnrID string, reqType int, in, out uint64)
|
||||
}
|
||||
|
||||
readCounter struct {
|
||||
io.ReadCloser
|
||||
countBytes uint64
|
||||
|
@ -39,6 +35,10 @@ type (
|
|||
startTime time.Time
|
||||
}
|
||||
|
||||
MetricsSettings interface {
|
||||
ResolveNamespaceAlias(namespace string) string
|
||||
}
|
||||
|
||||
// BucketResolveFunc is a func to resolve bucket info by name.
|
||||
BucketResolveFunc func(ctx context.Context, bucket string) (*data.BucketInfo, error)
|
||||
|
||||
|
@ -49,14 +49,14 @@ type (
|
|||
const systemPath = "/system"
|
||||
|
||||
// Metrics wraps http handler for api with basic statistics collection.
|
||||
func Metrics(log *zap.Logger, resolveBucket BucketResolveFunc, appMetrics *metrics.AppMetrics) Func {
|
||||
func Metrics(log *zap.Logger, resolveBucket BucketResolveFunc, appMetrics *metrics.AppMetrics, settings MetricsSettings) Func {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return stats(h.ServeHTTP, resolveCID(log, resolveBucket), appMetrics)
|
||||
return stats(h.ServeHTTP, resolveCID(log, resolveBucket), appMetrics, settings)
|
||||
}
|
||||
}
|
||||
|
||||
// Stats is a handler that update metrics.
|
||||
func stats(f http.HandlerFunc, resolveCID cidResolveFunc, appMetrics *metrics.AppMetrics) http.HandlerFunc {
|
||||
func stats(f http.HandlerFunc, resolveCID cidResolveFunc, appMetrics *metrics.AppMetrics, settings MetricsSettings) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := GetReqInfo(r.Context())
|
||||
|
||||
|
@ -82,7 +82,8 @@ func stats(f http.HandlerFunc, resolveCID cidResolveFunc, appMetrics *metrics.Ap
|
|||
|
||||
user := resolveUser(r.Context())
|
||||
cnrID := resolveCID(r.Context(), reqInfo)
|
||||
appMetrics.Update(user, reqInfo.BucketName, cnrID, requestTypeFromAPI(reqInfo.API), in.countBytes, out.countBytes)
|
||||
appMetrics.Update(user, reqInfo.BucketName, cnrID, settings.ResolveNamespaceAlias(reqInfo.Namespace),
|
||||
requestTypeFromAPI(reqInfo.API), in.countBytes, out.countBytes)
|
||||
|
||||
code := statsWriter.statusCode
|
||||
// A successful request has a 2xx response code
|
||||
|
@ -94,10 +95,8 @@ func stats(f http.HandlerFunc, resolveCID cidResolveFunc, appMetrics *metrics.Ap
|
|||
}
|
||||
}
|
||||
|
||||
if r.Method == http.MethodGet {
|
||||
// Increment the prometheus http request response histogram with appropriate label
|
||||
appMetrics.Statistic().RequestDurationsUpdate(reqInfo.API, durationSecs)
|
||||
}
|
||||
|
||||
appMetrics.Statistic().TotalInputBytesAdd(in.countBytes)
|
||||
appMetrics.Statistic().TotalOutputBytesAdd(out.countBytes)
|
||||
|
@ -106,27 +105,27 @@ func stats(f http.HandlerFunc, resolveCID cidResolveFunc, appMetrics *metrics.Ap
|
|||
|
||||
func requestTypeFromAPI(api string) metrics.RequestType {
|
||||
switch api {
|
||||
case "Options", "HeadObject", "HeadBucket":
|
||||
case OptionsOperation, HeadObjectOperation, HeadBucketOperation:
|
||||
return metrics.HEADRequest
|
||||
case "CreateMultipartUpload", "UploadPartCopy", "UploadPart", "CompleteMultipartUpload",
|
||||
"PutObjectACL", "PutObjectTagging", "CopyObject", "PutObjectRetention", "PutObjectLegalHold",
|
||||
"PutObject", "PutBucketCors", "PutBucketACL", "PutBucketLifecycle", "PutBucketEncryption",
|
||||
"PutBucketPolicy", "PutBucketObjectLockConfig", "PutBucketTagging", "PutBucketVersioning",
|
||||
"PutBucketNotification", "CreateBucket", "PostObject":
|
||||
case CreateMultipartUploadOperation, UploadPartCopyOperation, UploadPartOperation, CompleteMultipartUploadOperation,
|
||||
PutObjectACLOperation, PutObjectTaggingOperation, CopyObjectOperation, PutObjectRetentionOperation, PutObjectLegalHoldOperation,
|
||||
PutObjectOperation, PutBucketCorsOperation, PutBucketACLOperation, PutBucketLifecycleOperation, PutBucketEncryptionOperation,
|
||||
PutBucketPolicyOperation, PutBucketObjectLockConfigOperation, PutBucketTaggingOperation, PutBucketVersioningOperation,
|
||||
PutBucketNotificationOperation, CreateBucketOperation, PostObjectOperation:
|
||||
return metrics.PUTRequest
|
||||
case "ListObjectParts", "ListMultipartUploads", "ListObjectsV2M", "ListObjectsV2", "ListBucketVersions",
|
||||
"ListObjectsV1", "ListBuckets":
|
||||
case ListPartsOperation, ListMultipartUploadsOperation, ListObjectsV2MOperation, ListObjectsV2Operation,
|
||||
ListObjectsV1Operation, ListBucketsOperation:
|
||||
return metrics.LISTRequest
|
||||
case "GetObjectACL", "GetObjectTagging", "SelectObjectContent", "GetObjectRetention", "getobjectlegalhold",
|
||||
"GetObjectAttributes", "GetObject", "GetBucketLocation", "GetBucketPolicy",
|
||||
"GetBucketLifecycle", "GetBucketEncryption", "GetBucketCors", "GetBucketACL",
|
||||
"GetBucketWebsite", "GetBucketAccelerate", "GetBucketRequestPayment", "GetBucketLogging",
|
||||
"GetBucketReplication", "GetBucketTagging", "GetBucketObjectLockConfig",
|
||||
"GetBucketVersioning", "GetBucketNotification", "ListenBucketNotification":
|
||||
case GetObjectACLOperation, GetObjectTaggingOperation, SelectObjectContentOperation, GetObjectRetentionOperation, GetObjectLegalHoldOperation,
|
||||
GetObjectAttributesOperation, GetObjectOperation, GetBucketLocationOperation, GetBucketPolicyOperation,
|
||||
GetBucketLifecycleOperation, GetBucketEncryptionOperation, GetBucketCorsOperation, GetBucketACLOperation,
|
||||
GetBucketWebsiteOperation, GetBucketAccelerateOperation, GetBucketRequestPaymentOperation, GetBucketLoggingOperation,
|
||||
GetBucketReplicationOperation, GetBucketTaggingOperation, GetBucketObjectLockConfigOperation,
|
||||
GetBucketVersioningOperation, GetBucketNotificationOperation, ListenBucketNotificationOperation:
|
||||
return metrics.GETRequest
|
||||
case "AbortMultipartUpload", "DeleteObjectTagging", "DeleteObject", "DeleteBucketCors",
|
||||
"DeleteBucketWebsite", "DeleteBucketTagging", "DeleteMultipleObjects", "DeleteBucketPolicy",
|
||||
"DeleteBucketLifecycle", "DeleteBucketEncryption", "DeleteBucket":
|
||||
case AbortMultipartUploadOperation, DeleteObjectTaggingOperation, DeleteObjectOperation, DeleteBucketCorsOperation,
|
||||
DeleteBucketWebsiteOperation, DeleteBucketTaggingOperation, DeleteMultipleObjectsOperation, DeleteBucketPolicyOperation,
|
||||
DeleteBucketLifecycleOperation, DeleteBucketEncryptionOperation, DeleteBucketOperation:
|
||||
return metrics.DELETERequest
|
||||
default:
|
||||
return metrics.UNKNOWNRequest
|
||||
|
@ -136,7 +135,7 @@ func requestTypeFromAPI(api string) metrics.RequestType {
|
|||
// resolveCID forms CIDResolveFunc using BucketResolveFunc.
|
||||
func resolveCID(log *zap.Logger, resolveBucket BucketResolveFunc) cidResolveFunc {
|
||||
return func(ctx context.Context, reqInfo *ReqInfo) (cnrID string) {
|
||||
if reqInfo.BucketName == "" || reqInfo.API == "CreateBucket" || reqInfo.API == "" {
|
||||
if reqInfo.BucketName == "" || reqInfo.API == CreateBucketOperation || reqInfo.API == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
|
|
335
api/middleware/policy.go
Normal file
335
api/middleware/policy.go
Normal file
|
@ -0,0 +1,335 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"crypto/elliptic"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
apiErr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/resource/testutil"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/schema/s3"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type PolicySettings interface {
|
||||
ResolveNamespaceAlias(ns string) string
|
||||
PolicyDenyByDefault() bool
|
||||
}
|
||||
|
||||
type FrostFSIDInformer interface {
|
||||
GetUserGroupIDs(userHash util.Uint160) ([]string, error)
|
||||
}
|
||||
|
||||
func PolicyCheck(storage engine.ChainRouter, frostfsid FrostFSIDInformer, settings PolicySettings, domains []string, log *zap.Logger) Func {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
st, err := policyCheck(storage, frostfsid, settings, domains, r)
|
||||
if err == nil {
|
||||
if st != chain.Allow && (st != chain.NoRuleFound || settings.PolicyDenyByDefault()) {
|
||||
err = apiErr.GetAPIErrorWithError(apiErr.ErrAccessDenied, fmt.Errorf("policy check: %s", st.String()))
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
reqLogOrDefault(ctx, log).Error(logs.PolicyValidationFailed, zap.Error(err))
|
||||
WriteErrorResponse(w, GetReqInfo(ctx), err)
|
||||
return
|
||||
}
|
||||
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func policyCheck(storage engine.ChainRouter, frostfsid FrostFSIDInformer, settings PolicySettings, domains []string, r *http.Request) (chain.Status, error) {
|
||||
req, err := getPolicyRequest(r, frostfsid, domains)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
reqInfo := GetReqInfo(r.Context())
|
||||
target := engine.NewRequestTargetWithNamespace(settings.ResolveNamespaceAlias(reqInfo.Namespace))
|
||||
st, found, err := storage.IsAllowed(chain.S3, target, req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if !found {
|
||||
st = chain.NoRuleFound
|
||||
}
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func getPolicyRequest(r *http.Request, frostfsid FrostFSIDInformer, domains []string) (*testutil.Request, error) {
|
||||
var (
|
||||
owner string
|
||||
groups []string
|
||||
)
|
||||
|
||||
ctx := r.Context()
|
||||
bd, err := GetBoxData(ctx)
|
||||
if err == nil && bd.Gate.BearerToken != nil {
|
||||
pk, err := keys.NewPublicKeyFromBytes(bd.Gate.BearerToken.SigningKeyBytes(), elliptic.P256())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse pubclic key from btoken: %w", err)
|
||||
}
|
||||
owner = pk.Address()
|
||||
|
||||
groups, err = frostfsid.GetUserGroupIDs(pk.GetScriptHash())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get group ids: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
op, res := determineOperationAndResource(r, domains)
|
||||
|
||||
return testutil.NewRequest(op, testutil.NewResource(res, nil),
|
||||
map[string]string{
|
||||
s3.PropertyKeyOwner: owner,
|
||||
common.PropertyKeyFrostFSIDGroupID: chain.FormCondSliceContainsValue(groups),
|
||||
},
|
||||
), nil
|
||||
}
|
||||
|
||||
type ReqType int
|
||||
|
||||
const (
|
||||
noneType ReqType = iota
|
||||
bucketType
|
||||
objectType
|
||||
)
|
||||
|
||||
func determineOperationAndResource(r *http.Request, domains []string) (operation string, resource string) {
|
||||
var (
|
||||
reqType ReqType
|
||||
matchDomain bool
|
||||
)
|
||||
|
||||
for _, domain := range domains {
|
||||
ind := strings.Index(r.Host, "."+domain)
|
||||
if ind == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
matchDomain = true
|
||||
reqType = bucketType
|
||||
bkt := r.Host[:ind]
|
||||
if obj := strings.TrimPrefix(r.URL.Path, "/"); obj != "" {
|
||||
reqType = objectType
|
||||
resource = fmt.Sprintf(s3.ResourceFormatS3BucketObject, bkt, obj)
|
||||
} else {
|
||||
resource = fmt.Sprintf(s3.ResourceFormatS3Bucket, bkt)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
if !matchDomain {
|
||||
bktObj := strings.TrimPrefix(r.URL.Path, "/")
|
||||
if ind := strings.IndexByte(bktObj, '/'); ind == -1 {
|
||||
reqType = bucketType
|
||||
resource = fmt.Sprintf(s3.ResourceFormatS3Bucket, bktObj)
|
||||
if bktObj == "" {
|
||||
reqType = noneType
|
||||
}
|
||||
} else {
|
||||
reqType = objectType
|
||||
resource = fmt.Sprintf(s3.ResourceFormatS3BucketObject, bktObj[:ind], bktObj[ind+1:])
|
||||
}
|
||||
}
|
||||
|
||||
switch reqType {
|
||||
case objectType:
|
||||
operation = determineObjectOperation(r)
|
||||
case bucketType:
|
||||
operation = determineBucketOperation(r)
|
||||
default:
|
||||
operation = determineGeneralOperation(r)
|
||||
}
|
||||
|
||||
return "s3:" + operation, resource
|
||||
}
|
||||
|
||||
func determineBucketOperation(r *http.Request) string {
|
||||
query := r.URL.Query()
|
||||
switch r.Method {
|
||||
case http.MethodOptions:
|
||||
return OptionsOperation
|
||||
case http.MethodHead:
|
||||
return HeadBucketOperation
|
||||
case http.MethodGet:
|
||||
switch {
|
||||
case query.Has(UploadsQuery):
|
||||
return ListMultipartUploadsOperation
|
||||
case query.Has(LocationQuery):
|
||||
return GetBucketLocationOperation
|
||||
case query.Has(PolicyQuery):
|
||||
return GetBucketPolicyOperation
|
||||
case query.Has(LifecycleQuery):
|
||||
return GetBucketLifecycleOperation
|
||||
case query.Has(EncryptionQuery):
|
||||
return GetBucketEncryptionOperation
|
||||
case query.Has(CorsQuery):
|
||||
return GetBucketCorsOperation
|
||||
case query.Has(ACLQuery):
|
||||
return GetBucketACLOperation
|
||||
case query.Has(WebsiteQuery):
|
||||
return GetBucketWebsiteOperation
|
||||
case query.Has(AccelerateQuery):
|
||||
return GetBucketAccelerateOperation
|
||||
case query.Has(RequestPaymentQuery):
|
||||
return GetBucketRequestPaymentOperation
|
||||
case query.Has(LoggingQuery):
|
||||
return GetBucketLoggingOperation
|
||||
case query.Has(ReplicationQuery):
|
||||
return GetBucketReplicationOperation
|
||||
case query.Has(TaggingQuery):
|
||||
return GetBucketTaggingOperation
|
||||
case query.Has(ObjectLockQuery):
|
||||
return GetBucketObjectLockConfigOperation
|
||||
case query.Has(VersioningQuery):
|
||||
return GetBucketVersioningOperation
|
||||
case query.Has(NotificationQuery):
|
||||
return GetBucketNotificationOperation
|
||||
case query.Has(EventsQuery):
|
||||
return ListenBucketNotificationOperation
|
||||
case query.Has(VersionsQuery):
|
||||
return ListBucketObjectVersionsOperation
|
||||
case query.Get(ListTypeQuery) == "2" && query.Get(MetadataQuery) == "true":
|
||||
return ListObjectsV2MOperation
|
||||
case query.Get(ListTypeQuery) == "2":
|
||||
return ListObjectsV2Operation
|
||||
default:
|
||||
return ListObjectsV1Operation
|
||||
}
|
||||
case http.MethodPut:
|
||||
switch {
|
||||
case query.Has(CorsQuery):
|
||||
return PutBucketCorsOperation
|
||||
case query.Has(ACLQuery):
|
||||
return PutBucketACLOperation
|
||||
case query.Has(LifecycleQuery):
|
||||
return PutBucketLifecycleOperation
|
||||
case query.Has(EncryptionQuery):
|
||||
return PutBucketEncryptionOperation
|
||||
case query.Has(PolicyQuery):
|
||||
return PutBucketPolicyOperation
|
||||
case query.Has(ObjectLockQuery):
|
||||
return PutBucketObjectLockConfigOperation
|
||||
case query.Has(TaggingQuery):
|
||||
return PutBucketTaggingOperation
|
||||
case query.Has(VersioningQuery):
|
||||
return PutBucketVersioningOperation
|
||||
case query.Has(NotificationQuery):
|
||||
return PutBucketNotificationOperation
|
||||
default:
|
||||
return CreateBucketOperation
|
||||
}
|
||||
case http.MethodPost:
|
||||
switch {
|
||||
case query.Has(DeleteQuery):
|
||||
return DeleteMultipleObjectsOperation
|
||||
default:
|
||||
return PostObjectOperation
|
||||
}
|
||||
case http.MethodDelete:
|
||||
switch {
|
||||
case query.Has(CorsQuery):
|
||||
return DeleteBucketCorsOperation
|
||||
case query.Has(WebsiteQuery):
|
||||
return DeleteBucketWebsiteOperation
|
||||
case query.Has(TaggingQuery):
|
||||
return DeleteBucketTaggingOperation
|
||||
case query.Has(PolicyQuery):
|
||||
return DeleteBucketPolicyOperation
|
||||
case query.Has(LifecycleQuery):
|
||||
return DeleteBucketLifecycleOperation
|
||||
case query.Has(EncryptionQuery):
|
||||
return DeleteBucketEncryptionOperation
|
||||
default:
|
||||
return DeleteBucketOperation
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func determineObjectOperation(r *http.Request) string {
|
||||
query := r.URL.Query()
|
||||
switch r.Method {
|
||||
case http.MethodHead:
|
||||
return HeadObjectOperation
|
||||
case http.MethodGet:
|
||||
switch {
|
||||
case query.Has(UploadIDQuery):
|
||||
return ListPartsOperation
|
||||
case query.Has(ACLQuery):
|
||||
return GetObjectACLOperation
|
||||
case query.Has(TaggingQuery):
|
||||
return GetObjectTaggingOperation
|
||||
case query.Has(RetentionQuery):
|
||||
return GetObjectRetentionOperation
|
||||
case query.Has(LegalQuery):
|
||||
return GetObjectLegalHoldOperation
|
||||
case query.Has(AttributesQuery):
|
||||
return GetObjectAttributesOperation
|
||||
default:
|
||||
return GetObjectOperation
|
||||
}
|
||||
case http.MethodPut:
|
||||
switch {
|
||||
case query.Has(PartNumberQuery) && query.Has(UploadIDQuery) && r.Header.Get("X-Amz-Copy-Source") != "":
|
||||
return UploadPartCopyOperation
|
||||
case query.Has(PartNumberQuery) && query.Has(UploadIDQuery):
|
||||
return UploadPartOperation
|
||||
case query.Has(ACLQuery):
|
||||
return PutObjectACLOperation
|
||||
case query.Has(TaggingQuery):
|
||||
return PutObjectTaggingOperation
|
||||
case r.Header.Get("X-Amz-Copy-Source") != "":
|
||||
return CopyObjectOperation
|
||||
case query.Has(RetentionQuery):
|
||||
return PutObjectRetentionOperation
|
||||
case query.Has(LegalHoldQuery):
|
||||
return PutObjectLegalHoldOperation
|
||||
default:
|
||||
return PutObjectOperation
|
||||
}
|
||||
case http.MethodPost:
|
||||
switch {
|
||||
case query.Has(UploadIDQuery):
|
||||
return CompleteMultipartUploadOperation
|
||||
case query.Has(UploadsQuery):
|
||||
return CreateMultipartUploadOperation
|
||||
default:
|
||||
return SelectObjectContentOperation
|
||||
}
|
||||
case http.MethodDelete:
|
||||
switch {
|
||||
case query.Has(UploadIDQuery):
|
||||
return AbortMultipartUploadOperation
|
||||
case query.Has(TaggingQuery):
|
||||
return DeleteObjectTaggingOperation
|
||||
default:
|
||||
return DeleteObjectOperation
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func determineGeneralOperation(r *http.Request) string {
|
||||
if r.Method == http.MethodGet {
|
||||
return ListBucketsOperation
|
||||
}
|
||||
return ""
|
||||
}
|
|
@ -9,7 +9,9 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
|
@ -36,6 +38,7 @@ type (
|
|||
ObjectName string // Object name
|
||||
TraceID string // Trace ID
|
||||
URL *url.URL // Request url
|
||||
Namespace string
|
||||
tags []KeyVal // Any additional info not accommodated by above fields
|
||||
}
|
||||
|
||||
|
@ -185,7 +188,11 @@ func GetReqLog(ctx context.Context) *zap.Logger {
|
|||
return nil
|
||||
}
|
||||
|
||||
func Request(log *zap.Logger) Func {
|
||||
type RequestSettings interface {
|
||||
NamespaceHeader() string
|
||||
}
|
||||
|
||||
func Request(log *zap.Logger, settings RequestSettings) Func {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// generate random UUIDv4
|
||||
|
@ -199,6 +206,7 @@ func Request(log *zap.Logger) Func {
|
|||
// set request info into context
|
||||
// bucket name and object will be set in reqInfo later (limitation of go-chi)
|
||||
reqInfo := NewReqInfo(w, r, ObjectRequest{})
|
||||
reqInfo.Namespace = r.Header.Get(settings.NamespaceHeader())
|
||||
r = r.WithContext(SetReqInfo(r.Context(), reqInfo))
|
||||
|
||||
// set request id into gRPC meta header
|
||||
|
@ -206,6 +214,8 @@ func Request(log *zap.Logger) Func {
|
|||
r.Context(), HdrAmzRequestID, reqInfo.RequestID,
|
||||
))
|
||||
|
||||
r = r.WithContext(treepool.SetRequestID(r.Context(), reqInfo.RequestID))
|
||||
|
||||
reqLogger := log.With(zap.String("request_id", reqInfo.RequestID))
|
||||
r = r.WithContext(SetReqLogger(r.Context(), reqLogger))
|
||||
|
||||
|
@ -293,7 +303,7 @@ func getSourceIP(r *http.Request) string {
|
|||
if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 {
|
||||
// IPv6 addresses in Forwarded headers are quoted-strings. We strip
|
||||
// these quotes.
|
||||
addr = strings.Trim(match[1], `"`)
|
||||
addr = data.UnQuote(match[1])
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -129,8 +129,6 @@ func WriteErrorResponse(w http.ResponseWriter, reqInfo *ReqInfo, err error) int
|
|||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(hdrRetryAfter, "120")
|
||||
case "AccessDenied":
|
||||
// TODO process when the request is from browser and also if browser
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
)
|
||||
|
||||
|
@ -37,8 +36,8 @@ func GetBoxData(ctx context.Context) (*accessbox.Box, error) {
|
|||
}
|
||||
|
||||
// GetAuthHeaders extracts auth.AuthHeader from context.
|
||||
func GetAuthHeaders(ctx context.Context) (*auth.AuthHeader, error) {
|
||||
authHeaders, ok := ctx.Value(authHeadersKey).(*auth.AuthHeader)
|
||||
func GetAuthHeaders(ctx context.Context) (*AuthHeader, error) {
|
||||
authHeaders, ok := ctx.Value(authHeadersKey).(*AuthHeader)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("couldn't get auth headers from context")
|
||||
}
|
||||
|
@ -62,7 +61,7 @@ func SetBoxData(ctx context.Context, box *accessbox.Box) context.Context {
|
|||
}
|
||||
|
||||
// SetAuthHeaders sets auth.AuthHeader in the context.
|
||||
func SetAuthHeaders(ctx context.Context, header *auth.AuthHeader) context.Context {
|
||||
func SetAuthHeaders(ctx context.Context, header *AuthHeader) context.Context {
|
||||
return context.WithValue(ctx, authHeadersKey, header)
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||
|
@ -28,12 +29,21 @@ type FrostFS interface {
|
|||
SystemDNS(context.Context) (string, error)
|
||||
}
|
||||
|
||||
type Settings interface {
|
||||
FormContainerZone(ns string) (zone string, isDefault bool)
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
FrostFS FrostFS
|
||||
RPCAddress string
|
||||
Settings Settings
|
||||
}
|
||||
|
||||
type BucketResolver struct {
|
||||
rpcAddress string
|
||||
frostfs FrostFS
|
||||
settings Settings
|
||||
|
||||
mu sync.RWMutex
|
||||
resolvers []*Resolver
|
||||
}
|
||||
|
@ -58,6 +68,8 @@ func NewBucketResolver(resolverNames []string, cfg *Config) (*BucketResolver, er
|
|||
}
|
||||
|
||||
return &BucketResolver{
|
||||
rpcAddress: cfg.RPCAddress,
|
||||
frostfs: cfg.FrostFS,
|
||||
resolvers: resolvers,
|
||||
}, nil
|
||||
}
|
||||
|
@ -100,7 +112,7 @@ func (r *BucketResolver) Resolve(ctx context.Context, bktName string) (cnrID cid
|
|||
return cnrID, ErrNoResolvers
|
||||
}
|
||||
|
||||
func (r *BucketResolver) UpdateResolvers(resolverNames []string, cfg *Config) error {
|
||||
func (r *BucketResolver) UpdateResolvers(resolverNames []string) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
|
@ -108,6 +120,12 @@ func (r *BucketResolver) UpdateResolvers(resolverNames []string, cfg *Config) er
|
|||
return nil
|
||||
}
|
||||
|
||||
cfg := &Config{
|
||||
FrostFS: r.frostfs,
|
||||
RPCAddress: r.rpcAddress,
|
||||
Settings: r.settings,
|
||||
}
|
||||
|
||||
resolvers, err := createResolvers(resolverNames, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -134,28 +152,37 @@ func (r *BucketResolver) equals(resolverNames []string) bool {
|
|||
func newResolver(name string, cfg *Config) (*Resolver, error) {
|
||||
switch name {
|
||||
case DNSResolver:
|
||||
return NewDNSResolver(cfg.FrostFS)
|
||||
return NewDNSResolver(cfg.FrostFS, cfg.Settings)
|
||||
case NNSResolver:
|
||||
return NewNNSResolver(cfg.RPCAddress)
|
||||
return NewNNSResolver(cfg.RPCAddress, cfg.Settings)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown resolver: %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
|
||||
func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
|
||||
if frostFS == nil {
|
||||
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
|
||||
}
|
||||
if settings == nil {
|
||||
return nil, fmt.Errorf("resolver settings must not be nil for DNS resolver")
|
||||
}
|
||||
|
||||
var dns ns.DNS
|
||||
|
||||
resolveFunc := func(ctx context.Context, name string) (cid.ID, error) {
|
||||
domain, err := frostFS.SystemDNS(ctx)
|
||||
var err error
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
zone, isDefault := settings.FormContainerZone(reqInfo.Namespace)
|
||||
if isDefault {
|
||||
zone, err = frostFS.SystemDNS(ctx)
|
||||
if err != nil {
|
||||
return cid.ID{}, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
domain = name + "." + domain
|
||||
domain := name + "." + zone
|
||||
cnrID, err := dns.ResolveContainerName(domain)
|
||||
if err != nil {
|
||||
return cid.ID{}, fmt.Errorf("couldn't resolve container '%s' as '%s': %w", name, domain, err)
|
||||
|
@ -169,10 +196,13 @@ func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func NewNNSResolver(address string) (*Resolver, error) {
|
||||
func NewNNSResolver(address string, settings Settings) (*Resolver, error) {
|
||||
if address == "" {
|
||||
return nil, fmt.Errorf("rpc address must not be empty for NNS resolver")
|
||||
}
|
||||
if settings == nil {
|
||||
return nil, fmt.Errorf("resolver settings must not be nil for NNS resolver")
|
||||
}
|
||||
|
||||
var nns ns.NNS
|
||||
|
||||
|
@ -180,10 +210,14 @@ func NewNNSResolver(address string) (*Resolver, error) {
|
|||
return nil, fmt.Errorf("dial %s: %w", address, err)
|
||||
}
|
||||
|
||||
resolveFunc := func(_ context.Context, name string) (cid.ID, error) {
|
||||
resolveFunc := func(ctx context.Context, name string) (cid.ID, error) {
|
||||
var d container.Domain
|
||||
d.SetName(name)
|
||||
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
zone, _ := settings.FormContainerZone(reqInfo.Namespace)
|
||||
d.SetZone(zone)
|
||||
|
||||
cnrID, err := nns.ResolveContainerDomain(d)
|
||||
if err != nil {
|
||||
return cid.ID{}, fmt.Errorf("couldn't resolve container '%s': %w", name, err)
|
||||
|
|
291
api/router.go
291
api/router.go
|
@ -5,12 +5,12 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
s3middleware "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/metrics"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"go.uber.org/zap"
|
||||
|
@ -90,29 +90,68 @@ type (
|
|||
}
|
||||
)
|
||||
|
||||
func AttachChi(api *chi.Mux, domains []string, throttle middleware.ThrottleOpts, h Handler, center auth.Center, log *zap.Logger, appMetrics *metrics.AppMetrics) {
|
||||
type Settings interface {
|
||||
s3middleware.RequestSettings
|
||||
s3middleware.PolicySettings
|
||||
s3middleware.MetricsSettings
|
||||
}
|
||||
|
||||
type FrostFSID interface {
|
||||
s3middleware.FrostFSIDValidator
|
||||
s3middleware.FrostFSIDInformer
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Throttle middleware.ThrottleOpts
|
||||
Handler Handler
|
||||
Center s3middleware.Center
|
||||
Log *zap.Logger
|
||||
Metrics *metrics.AppMetrics
|
||||
|
||||
MiddlewareSettings Settings
|
||||
|
||||
// Domains optional. If empty no virtual hosted domains will be attached.
|
||||
Domains []string
|
||||
|
||||
FrostfsID FrostFSID
|
||||
|
||||
FrostFSIDValidation bool
|
||||
|
||||
PolicyChecker engine.ChainRouter
|
||||
}
|
||||
|
||||
func NewRouter(cfg Config) *chi.Mux {
|
||||
api := chi.NewRouter()
|
||||
api.Use(
|
||||
s3middleware.Request(log),
|
||||
middleware.ThrottleWithOpts(throttle),
|
||||
s3middleware.Request(cfg.Log, cfg.MiddlewareSettings),
|
||||
middleware.ThrottleWithOpts(cfg.Throttle),
|
||||
middleware.Recoverer,
|
||||
s3middleware.Tracing(),
|
||||
s3middleware.Metrics(log, h.ResolveBucket, appMetrics),
|
||||
s3middleware.LogSuccessResponse(log),
|
||||
s3middleware.Auth(center, log),
|
||||
s3middleware.Metrics(cfg.Log, cfg.Handler.ResolveBucket, cfg.Metrics, cfg.MiddlewareSettings),
|
||||
s3middleware.LogSuccessResponse(cfg.Log),
|
||||
s3middleware.Auth(cfg.Center, cfg.Log),
|
||||
)
|
||||
|
||||
if cfg.FrostFSIDValidation {
|
||||
api.Use(s3middleware.FrostfsIDValidation(cfg.FrostfsID, cfg.Log))
|
||||
}
|
||||
|
||||
api.Use(s3middleware.PolicyCheck(cfg.PolicyChecker, cfg.FrostfsID, cfg.MiddlewareSettings, cfg.Domains, cfg.Log))
|
||||
|
||||
defaultRouter := chi.NewRouter()
|
||||
defaultRouter.Mount(fmt.Sprintf("/{%s}", s3middleware.BucketURLPrm), bucketRouter(h, log))
|
||||
defaultRouter.Get("/", named("ListBuckets", h.ListBucketsHandler))
|
||||
defaultRouter.Mount(fmt.Sprintf("/{%s}", s3middleware.BucketURLPrm), bucketRouter(cfg.Handler, cfg.Log))
|
||||
defaultRouter.Get("/", named("ListBuckets", cfg.Handler.ListBucketsHandler))
|
||||
|
||||
hr := NewHostBucketRouter("bucket")
|
||||
hr.Default(defaultRouter)
|
||||
for _, domain := range domains {
|
||||
hr.Map(domain, bucketRouter(h, log))
|
||||
for _, domain := range cfg.Domains {
|
||||
hr.Map(domain, bucketRouter(cfg.Handler, cfg.Log))
|
||||
}
|
||||
api.Mount("/", hr)
|
||||
|
||||
attachErrorHandler(api)
|
||||
|
||||
return api
|
||||
}
|
||||
|
||||
func named(name string, handlerFunc http.HandlerFunc) http.HandlerFunc {
|
||||
|
@ -160,139 +199,139 @@ func bucketRouter(h Handler, log *zap.Logger) chi.Router {
|
|||
|
||||
bktRouter.Options("/", h.Preflight)
|
||||
|
||||
bktRouter.Head("/", named("HeadBucket", h.HeadBucketHandler))
|
||||
bktRouter.Head("/", named(s3middleware.HeadBucketOperation, h.HeadBucketHandler))
|
||||
|
||||
// GET method handlers
|
||||
bktRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodGet, "/", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Queries("uploads").
|
||||
Handler(named("ListMultipartUploads", h.ListMultipartUploadsHandler))).
|
||||
Queries(s3middleware.UploadsQuery).
|
||||
Handler(named(s3middleware.ListMultipartUploadsOperation, h.ListMultipartUploadsHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("location").
|
||||
Handler(named("GetBucketLocation", h.GetBucketLocationHandler))).
|
||||
Queries(s3middleware.LocationQuery).
|
||||
Handler(named(s3middleware.GetBucketLocationOperation, h.GetBucketLocationHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("policy").
|
||||
Handler(named("GetBucketPolicy", h.GetBucketPolicyHandler))).
|
||||
Queries(s3middleware.PolicyQuery).
|
||||
Handler(named(s3middleware.GetBucketPolicyOperation, h.GetBucketPolicyHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("lifecycle").
|
||||
Handler(named("GetBucketLifecycle", h.GetBucketLifecycleHandler))).
|
||||
Queries(s3middleware.LifecycleQuery).
|
||||
Handler(named(s3middleware.GetBucketLifecycleOperation, h.GetBucketLifecycleHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("encryption").
|
||||
Handler(named("GetBucketEncryption", h.GetBucketEncryptionHandler))).
|
||||
Queries(s3middleware.EncryptionQuery).
|
||||
Handler(named(s3middleware.GetBucketEncryptionOperation, h.GetBucketEncryptionHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("cors").
|
||||
Handler(named("GetBucketCors", h.GetBucketCorsHandler))).
|
||||
Queries(s3middleware.CorsQuery).
|
||||
Handler(named(s3middleware.GetBucketCorsOperation, h.GetBucketCorsHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("acl").
|
||||
Handler(named("GetBucketACL", h.GetBucketACLHandler))).
|
||||
Queries(s3middleware.ACLQuery).
|
||||
Handler(named(s3middleware.GetBucketACLOperation, h.GetBucketACLHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("website").
|
||||
Handler(named("GetBucketWebsite", h.GetBucketWebsiteHandler))).
|
||||
Queries(s3middleware.WebsiteQuery).
|
||||
Handler(named(s3middleware.GetBucketWebsiteOperation, h.GetBucketWebsiteHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("accelerate").
|
||||
Handler(named("GetBucketAccelerate", h.GetBucketAccelerateHandler))).
|
||||
Queries(s3middleware.AccelerateQuery).
|
||||
Handler(named(s3middleware.GetBucketAccelerateOperation, h.GetBucketAccelerateHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("requestPayment").
|
||||
Handler(named("GetBucketRequestPayment", h.GetBucketRequestPaymentHandler))).
|
||||
Queries(s3middleware.RequestPaymentQuery).
|
||||
Handler(named(s3middleware.GetBucketRequestPaymentOperation, h.GetBucketRequestPaymentHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("logging").
|
||||
Handler(named("GetBucketLogging", h.GetBucketLoggingHandler))).
|
||||
Queries(s3middleware.LoggingQuery).
|
||||
Handler(named(s3middleware.GetBucketLoggingOperation, h.GetBucketLoggingHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("replication").
|
||||
Handler(named("GetBucketReplication", h.GetBucketReplicationHandler))).
|
||||
Queries(s3middleware.ReplicationQuery).
|
||||
Handler(named(s3middleware.GetBucketReplicationOperation, h.GetBucketReplicationHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("tagging").
|
||||
Handler(named("GetBucketTagging", h.GetBucketTaggingHandler))).
|
||||
Queries(s3middleware.TaggingQuery).
|
||||
Handler(named(s3middleware.GetBucketTaggingOperation, h.GetBucketTaggingHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("object-lock").
|
||||
Handler(named("GetBucketObjectLockConfig", h.GetBucketObjectLockConfigHandler))).
|
||||
Queries(s3middleware.ObjectLockQuery).
|
||||
Handler(named(s3middleware.GetBucketObjectLockConfigOperation, h.GetBucketObjectLockConfigHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("versioning").
|
||||
Handler(named("GetBucketVersioning", h.GetBucketVersioningHandler))).
|
||||
Queries(s3middleware.VersioningQuery).
|
||||
Handler(named(s3middleware.GetBucketVersioningOperation, h.GetBucketVersioningHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("notification").
|
||||
Handler(named("GetBucketNotification", h.GetBucketNotificationHandler))).
|
||||
Queries(s3middleware.NotificationQuery).
|
||||
Handler(named(s3middleware.GetBucketNotificationOperation, h.GetBucketNotificationHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("events").
|
||||
Handler(named("ListenBucketNotification", h.ListenBucketNotificationHandler))).
|
||||
Queries(s3middleware.EventsQuery).
|
||||
Handler(named(s3middleware.ListenBucketNotificationOperation, h.ListenBucketNotificationHandler))).
|
||||
Add(NewFilter().
|
||||
QueriesMatch("list-type", "2", "metadata", "true").
|
||||
Handler(named("ListObjectsV2M", h.ListObjectsV2MHandler))).
|
||||
QueriesMatch(s3middleware.ListTypeQuery, "2", s3middleware.MetadataQuery, "true").
|
||||
Handler(named(s3middleware.ListObjectsV2MOperation, h.ListObjectsV2MHandler))).
|
||||
Add(NewFilter().
|
||||
QueriesMatch("list-type", "2").
|
||||
Handler(named("ListObjectsV2", h.ListObjectsV2Handler))).
|
||||
QueriesMatch(s3middleware.ListTypeQuery, "2").
|
||||
Handler(named(s3middleware.ListObjectsV2Operation, h.ListObjectsV2Handler))).
|
||||
Add(NewFilter().
|
||||
Queries("versions").
|
||||
Handler(named("ListBucketObjectVersions", h.ListBucketObjectVersionsHandler))).
|
||||
DefaultHandler(named("ListObjectsV1", h.ListObjectsV1Handler)))
|
||||
Queries(s3middleware.VersionsQuery).
|
||||
Handler(named(s3middleware.ListBucketObjectVersionsOperation, h.ListBucketObjectVersionsHandler))).
|
||||
DefaultHandler(named(s3middleware.ListObjectsV1Operation, h.ListObjectsV1Handler)))
|
||||
})
|
||||
|
||||
// PUT method handlers
|
||||
bktRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodPut, "/", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Queries("cors").
|
||||
Handler(named("PutBucketCors", h.PutBucketCorsHandler))).
|
||||
Queries(s3middleware.CorsQuery).
|
||||
Handler(named(s3middleware.PutBucketCorsOperation, h.PutBucketCorsHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("acl").
|
||||
Handler(named("PutBucketACL", h.PutBucketACLHandler))).
|
||||
Queries(s3middleware.ACLQuery).
|
||||
Handler(named(s3middleware.PutBucketACLOperation, h.PutBucketACLHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("lifecycle").
|
||||
Handler(named("PutBucketLifecycle", h.PutBucketLifecycleHandler))).
|
||||
Queries(s3middleware.LifecycleQuery).
|
||||
Handler(named(s3middleware.PutBucketLifecycleOperation, h.PutBucketLifecycleHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("encryption").
|
||||
Handler(named("PutBucketEncryption", h.PutBucketEncryptionHandler))).
|
||||
Queries(s3middleware.EncryptionQuery).
|
||||
Handler(named(s3middleware.PutBucketEncryptionOperation, h.PutBucketEncryptionHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("policy").
|
||||
Handler(named("PutBucketPolicy", h.PutBucketPolicyHandler))).
|
||||
Queries(s3middleware.PolicyQuery).
|
||||
Handler(named(s3middleware.PutBucketPolicyOperation, h.PutBucketPolicyHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("object-lock").
|
||||
Handler(named("PutBucketObjectLockConfig", h.PutBucketObjectLockConfigHandler))).
|
||||
Queries(s3middleware.ObjectLockQuery).
|
||||
Handler(named(s3middleware.PutBucketObjectLockConfigOperation, h.PutBucketObjectLockConfigHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("tagging").
|
||||
Handler(named("PutBucketTagging", h.PutBucketTaggingHandler))).
|
||||
Queries(s3middleware.TaggingQuery).
|
||||
Handler(named(s3middleware.PutBucketTaggingOperation, h.PutBucketTaggingHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("versioning").
|
||||
Handler(named("PutBucketVersioning", h.PutBucketVersioningHandler))).
|
||||
Queries(s3middleware.VersioningQuery).
|
||||
Handler(named(s3middleware.PutBucketVersioningOperation, h.PutBucketVersioningHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("notification").
|
||||
Handler(named("PutBucketNotification", h.PutBucketNotificationHandler))).
|
||||
DefaultHandler(named("CreateBucket", h.CreateBucketHandler)))
|
||||
Queries(s3middleware.NotificationQuery).
|
||||
Handler(named(s3middleware.PutBucketNotificationOperation, h.PutBucketNotificationHandler))).
|
||||
DefaultHandler(named(s3middleware.CreateBucketOperation, h.CreateBucketHandler)))
|
||||
})
|
||||
|
||||
// POST method handlers
|
||||
bktRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodPost, "/", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Queries("delete").
|
||||
Handler(named("DeleteMultipleObjects", h.DeleteMultipleObjectsHandler))).
|
||||
Queries(s3middleware.DeleteQuery).
|
||||
Handler(named(s3middleware.DeleteMultipleObjectsOperation, h.DeleteMultipleObjectsHandler))).
|
||||
// todo consider add filter to match header for defaultHandler: hdrContentType, "multipart/form-data*"
|
||||
DefaultHandler(named("PostObject", h.PostObject)))
|
||||
DefaultHandler(named(s3middleware.PostObjectOperation, h.PostObject)))
|
||||
})
|
||||
|
||||
// DELETE method handlers
|
||||
bktRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodDelete, "/", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Queries("cors").
|
||||
Handler(named("DeleteBucketCors", h.DeleteBucketCorsHandler))).
|
||||
Queries(s3middleware.CorsQuery).
|
||||
Handler(named(s3middleware.DeleteBucketCorsOperation, h.DeleteBucketCorsHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("website").
|
||||
Handler(named("DeleteBucketWebsite", h.DeleteBucketWebsiteHandler))).
|
||||
Queries(s3middleware.WebsiteQuery).
|
||||
Handler(named(s3middleware.DeleteBucketWebsiteOperation, h.DeleteBucketWebsiteHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("tagging").
|
||||
Handler(named("DeleteBucketTagging", h.DeleteBucketTaggingHandler))).
|
||||
Queries(s3middleware.TaggingQuery).
|
||||
Handler(named(s3middleware.DeleteBucketTaggingOperation, h.DeleteBucketTaggingHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("policy").
|
||||
Handler(named("PutBucketPolicy", h.PutBucketPolicyHandler))).
|
||||
Queries(s3middleware.PolicyQuery).
|
||||
Handler(named(s3middleware.DeleteBucketPolicyOperation, h.DeleteBucketPolicyHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("lifecycle").
|
||||
Handler(named("PutBucketLifecycle", h.PutBucketLifecycleHandler))).
|
||||
Queries(s3middleware.LifecycleQuery).
|
||||
Handler(named(s3middleware.PutBucketLifecycleOperation, h.PutBucketLifecycleHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("encryption").
|
||||
Handler(named("DeleteBucketEncryption", h.DeleteBucketEncryptionHandler))).
|
||||
DefaultHandler(named("DeleteBucket", h.DeleteBucketHandler)))
|
||||
Queries(s3middleware.EncryptionQuery).
|
||||
Handler(named(s3middleware.DeleteBucketEncryptionOperation, h.DeleteBucketEncryptionHandler))).
|
||||
DefaultHandler(named(s3middleware.DeleteBucketOperation, h.DeleteBucketHandler)))
|
||||
})
|
||||
|
||||
attachErrorHandler(bktRouter)
|
||||
|
@ -304,30 +343,30 @@ func objectRouter(h Handler, l *zap.Logger) chi.Router {
|
|||
objRouter := chi.NewRouter()
|
||||
objRouter.Use(s3middleware.AddObjectName(l))
|
||||
|
||||
objRouter.Head("/*", named("HeadObject", h.HeadObjectHandler))
|
||||
objRouter.Head("/*", named(s3middleware.HeadObjectOperation, h.HeadObjectHandler))
|
||||
|
||||
// GET method handlers
|
||||
objRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodGet, "/*", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Queries("uploadId").
|
||||
Handler(named("ListParts", h.ListPartsHandler))).
|
||||
Queries(s3middleware.UploadIDQuery).
|
||||
Handler(named(s3middleware.ListPartsOperation, h.ListPartsHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("acl").
|
||||
Handler(named("GetObjectACL", h.GetObjectACLHandler))).
|
||||
Queries(s3middleware.ACLQuery).
|
||||
Handler(named(s3middleware.GetObjectACLOperation, h.GetObjectACLHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("tagging").
|
||||
Handler(named("GetObjectTagging", h.GetObjectTaggingHandler))).
|
||||
Queries(s3middleware.TaggingQuery).
|
||||
Handler(named(s3middleware.GetObjectTaggingOperation, h.GetObjectTaggingHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("retention").
|
||||
Handler(named("GetObjectRetention", h.GetObjectRetentionHandler))).
|
||||
Queries(s3middleware.RetentionQuery).
|
||||
Handler(named(s3middleware.GetObjectRetentionOperation, h.GetObjectRetentionHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("legal-hold").
|
||||
Handler(named("GetObjectLegalHold", h.GetObjectLegalHoldHandler))).
|
||||
Queries(s3middleware.LegalHoldQuery).
|
||||
Handler(named(s3middleware.GetObjectLegalHoldOperation, h.GetObjectLegalHoldHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("attributes").
|
||||
Handler(named("GetObjectAttributes", h.GetObjectAttributesHandler))).
|
||||
DefaultHandler(named("GetObject", h.GetObjectHandler)))
|
||||
Queries(s3middleware.AttributesQuery).
|
||||
Handler(named(s3middleware.GetObjectAttributesOperation, h.GetObjectAttributesHandler))).
|
||||
DefaultHandler(named(s3middleware.GetObjectOperation, h.GetObjectHandler)))
|
||||
})
|
||||
|
||||
// PUT method handlers
|
||||
|
@ -335,51 +374,51 @@ func objectRouter(h Handler, l *zap.Logger) chi.Router {
|
|||
r.Method(http.MethodPut, "/*", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Headers(AmzCopySource).
|
||||
Queries("partNumber", "uploadId").
|
||||
Handler(named("UploadPartCopy", h.UploadPartCopy))).
|
||||
Queries(s3middleware.PartNumberQuery, s3middleware.UploadIDQuery).
|
||||
Handler(named(s3middleware.UploadPartCopyOperation, h.UploadPartCopy))).
|
||||
Add(NewFilter().
|
||||
Queries("partNumber", "uploadId").
|
||||
Handler(named("UploadPart", h.UploadPartHandler))).
|
||||
Queries(s3middleware.PartNumberQuery, s3middleware.UploadIDQuery).
|
||||
Handler(named(s3middleware.UploadPartOperation, h.UploadPartHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("acl").
|
||||
Handler(named("PutObjectACL", h.PutObjectACLHandler))).
|
||||
Queries(s3middleware.ACLQuery).
|
||||
Handler(named(s3middleware.PutObjectACLOperation, h.PutObjectACLHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("tagging").
|
||||
Handler(named("PutObjectTagging", h.PutObjectTaggingHandler))).
|
||||
Queries(s3middleware.TaggingQuery).
|
||||
Handler(named(s3middleware.PutObjectTaggingOperation, h.PutObjectTaggingHandler))).
|
||||
Add(NewFilter().
|
||||
Headers(AmzCopySource).
|
||||
Handler(named("CopyObject", h.CopyObjectHandler))).
|
||||
Handler(named(s3middleware.CopyObjectOperation, h.CopyObjectHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("retention").
|
||||
Handler(named("PutObjectRetention", h.PutObjectRetentionHandler))).
|
||||
Queries(s3middleware.RetentionQuery).
|
||||
Handler(named(s3middleware.PutObjectRetentionOperation, h.PutObjectRetentionHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("legal-hold").
|
||||
Handler(named("PutObjectLegalHold", h.PutObjectLegalHoldHandler))).
|
||||
DefaultHandler(named("PutObject", h.PutObjectHandler)))
|
||||
Queries(s3middleware.LegalHoldQuery).
|
||||
Handler(named(s3middleware.PutObjectLegalHoldOperation, h.PutObjectLegalHoldHandler))).
|
||||
DefaultHandler(named(s3middleware.PutObjectOperation, h.PutObjectHandler)))
|
||||
})
|
||||
|
||||
// POST method handlers
|
||||
objRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodPost, "/*", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Queries("uploadId").
|
||||
Handler(named("CompleteMultipartUpload", h.CompleteMultipartUploadHandler))).
|
||||
Queries(s3middleware.UploadIDQuery).
|
||||
Handler(named(s3middleware.CompleteMultipartUploadOperation, h.CompleteMultipartUploadHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("uploads").
|
||||
Handler(named("CreateMultipartUpload", h.CreateMultipartUploadHandler))).
|
||||
DefaultHandler(named("SelectObjectContent", h.SelectObjectContentHandler)))
|
||||
Queries(s3middleware.UploadsQuery).
|
||||
Handler(named(s3middleware.CreateMultipartUploadOperation, h.CreateMultipartUploadHandler))).
|
||||
DefaultHandler(named(s3middleware.SelectObjectContentOperation, h.SelectObjectContentHandler)))
|
||||
})
|
||||
|
||||
// DELETE method handlers
|
||||
objRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodDelete, "/*", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Queries("uploadId").
|
||||
Handler(named("AbortMultipartUpload", h.AbortMultipartUploadHandler))).
|
||||
Queries(s3middleware.UploadIDQuery).
|
||||
Handler(named(s3middleware.AbortMultipartUploadOperation, h.AbortMultipartUploadHandler))).
|
||||
Add(NewFilter().
|
||||
Queries("tagging").
|
||||
Handler(named("DeleteObjectTagging", h.DeleteObjectTaggingHandler))).
|
||||
DefaultHandler(named("DeleteObject", h.DeleteObjectHandler)))
|
||||
Queries(s3middleware.TaggingQuery).
|
||||
Handler(named(s3middleware.DeleteObjectTaggingOperation, h.DeleteObjectTaggingHandler))).
|
||||
DefaultHandler(named(s3middleware.DeleteObjectOperation, h.DeleteObjectHandler)))
|
||||
})
|
||||
|
||||
attachErrorHandler(objRouter)
|
||||
|
|
|
@ -6,17 +6,34 @@ import (
|
|||
"net/http"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const FrostfsNamespaceHeader = "X-Frostfs-Namespace"
|
||||
|
||||
type centerMock struct {
|
||||
}
|
||||
|
||||
func (c *centerMock) Authenticate(*http.Request) (*auth.Box, error) {
|
||||
return &auth.Box{}, nil
|
||||
func (c *centerMock) Authenticate(*http.Request) (*middleware.Box, error) {
|
||||
return &middleware.Box{}, nil
|
||||
}
|
||||
|
||||
type middlewareSettingsMock struct {
|
||||
denyByDefault bool
|
||||
}
|
||||
|
||||
func (r *middlewareSettingsMock) NamespaceHeader() string {
|
||||
return FrostfsNamespaceHeader
|
||||
}
|
||||
|
||||
func (r *middlewareSettingsMock) ResolveNamespaceAlias(ns string) string {
|
||||
return ns
|
||||
}
|
||||
|
||||
func (r *middlewareSettingsMock) PolicyDenyByDefault() bool {
|
||||
return r.denyByDefault
|
||||
}
|
||||
|
||||
type handlerMock struct {
|
||||
|
@ -100,7 +117,7 @@ func (h *handlerMock) PutObjectLegalHoldHandler(http.ResponseWriter, *http.Reque
|
|||
|
||||
func (h *handlerMock) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
res := &handlerResult{
|
||||
Method: "PutObject",
|
||||
Method: middleware.PutObjectOperation,
|
||||
ReqInfo: middleware.GetReqInfo(r.Context()),
|
||||
}
|
||||
|
||||
|
@ -285,9 +302,13 @@ func (h *handlerMock) CreateBucketHandler(http.ResponseWriter, *http.Request) {
|
|||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *handlerMock) HeadBucketHandler(http.ResponseWriter, *http.Request) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
func (h *handlerMock) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
res := &handlerResult{
|
||||
Method: middleware.HeadBucketOperation,
|
||||
ReqInfo: middleware.GetReqInfo(r.Context()),
|
||||
}
|
||||
|
||||
h.writeResponse(w, res)
|
||||
}
|
||||
|
||||
func (h *handlerMock) PostObject(http.ResponseWriter, *http.Request) {
|
||||
|
@ -320,9 +341,13 @@ func (h *handlerMock) DeleteBucketHandler(http.ResponseWriter, *http.Request) {
|
|||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *handlerMock) ListBucketsHandler(http.ResponseWriter, *http.Request) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
func (h *handlerMock) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
res := &handlerResult{
|
||||
Method: middleware.ListBucketsOperation,
|
||||
ReqInfo: middleware.GetReqInfo(r.Context()),
|
||||
}
|
||||
|
||||
h.writeResponse(w, res)
|
||||
}
|
||||
|
||||
func (h *handlerMock) Preflight(http.ResponseWriter, *http.Request) {
|
||||
|
|
|
@ -2,6 +2,7 @@ package api
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -10,13 +11,56 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
s3middleware "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/metrics"
|
||||
engineiam "git.frostfs.info/TrueCloudLab/policy-engine/iam"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/schema/s3"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
type routerMock struct {
|
||||
router *chi.Mux
|
||||
cfg Config
|
||||
middlewareSettings *middlewareSettingsMock
|
||||
policyChecker engine.LocalOverrideEngine
|
||||
}
|
||||
|
||||
func (m *routerMock) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
m.router.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func prepareRouter(t *testing.T) *routerMock {
|
||||
middlewareSettings := &middlewareSettingsMock{}
|
||||
policyChecker := inmemory.NewInMemoryLocalOverrides()
|
||||
|
||||
cfg := Config{
|
||||
Throttle: middleware.ThrottleOpts{
|
||||
Limit: 10,
|
||||
BacklogTimeout: 30 * time.Second,
|
||||
},
|
||||
Handler: &handlerMock{t: t},
|
||||
Center: ¢erMock{},
|
||||
Log: zaptest.NewLogger(t),
|
||||
Metrics: &metrics.AppMetrics{},
|
||||
MiddlewareSettings: middlewareSettings,
|
||||
PolicyChecker: policyChecker,
|
||||
Domains: []string{"domain1", "domain2"},
|
||||
}
|
||||
return &routerMock{
|
||||
router: NewRouter(cfg),
|
||||
cfg: cfg,
|
||||
middlewareSettings: middlewareSettings,
|
||||
policyChecker: policyChecker,
|
||||
}
|
||||
}
|
||||
|
||||
func TestRouterUploadPart(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
|
||||
|
@ -111,20 +155,102 @@ func TestRouterObjectEscaping(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func prepareRouter(t *testing.T) *chi.Mux {
|
||||
throttleOps := middleware.ThrottleOpts{
|
||||
Limit: 10,
|
||||
BacklogTimeout: 30 * time.Second,
|
||||
func TestPolicyChecker(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
namespace := "custom-ns"
|
||||
bktName, objName := "bucket", "object"
|
||||
target := fmt.Sprintf("/%s/%s", bktName, objName)
|
||||
|
||||
ruleChain := &chain.Chain{
|
||||
ID: chain.ID("id"),
|
||||
Rules: []chain.Rule{{
|
||||
Status: chain.AccessDenied,
|
||||
Actions: chain.Actions{Names: []string{"*"}},
|
||||
Resources: chain.Resources{Names: []string{fmt.Sprintf(s3.ResourceFormatS3BucketObjects, bktName)}},
|
||||
}},
|
||||
}
|
||||
|
||||
handleMock := &handlerMock{t: t}
|
||||
cntrMock := ¢erMock{}
|
||||
log := zaptest.NewLogger(t)
|
||||
metric := &metrics.AppMetrics{}
|
||||
_, _, err := chiRouter.policyChecker.MorphRuleChainStorage().AddMorphRuleChain(chain.S3, engine.NamespaceTarget(namespace), ruleChain)
|
||||
require.NoError(t, err)
|
||||
|
||||
chiRouter := chi.NewRouter()
|
||||
AttachChi(chiRouter, nil, throttleOps, handleMock, cntrMock, log, metric)
|
||||
return chiRouter
|
||||
// check we can access 'bucket' in default namespace
|
||||
w, r := httptest.NewRecorder(), httptest.NewRequest(http.MethodPut, target, nil)
|
||||
chiRouter.ServeHTTP(w, r)
|
||||
resp := readResponse(t, w)
|
||||
require.Equal(t, s3middleware.PutObjectOperation, resp.Method)
|
||||
|
||||
// check we can access 'other-bucket' in custom namespace
|
||||
w, r = httptest.NewRecorder(), httptest.NewRequest(http.MethodPut, "/other-bucket/object", nil)
|
||||
r.Header.Set(FrostfsNamespaceHeader, namespace)
|
||||
chiRouter.ServeHTTP(w, r)
|
||||
resp = readResponse(t, w)
|
||||
require.Equal(t, s3middleware.PutObjectOperation, resp.Method)
|
||||
|
||||
// check we cannot access 'bucket' in custom namespace
|
||||
w, r = httptest.NewRecorder(), httptest.NewRequest(http.MethodPut, target, nil)
|
||||
r.Header.Set(FrostfsNamespaceHeader, namespace)
|
||||
chiRouter.ServeHTTP(w, r)
|
||||
assertAPIError(t, w, apiErrors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
func TestPolicyCheckerReqTypeDetermination(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
bktName, objName := "bucket", "object"
|
||||
|
||||
policy := engineiam.Policy{
|
||||
Statement: []engineiam.Statement{{
|
||||
Principal: map[engineiam.PrincipalType][]string{engineiam.Wildcard: {}},
|
||||
Effect: engineiam.AllowEffect,
|
||||
Action: engineiam.Action{"s3:*"},
|
||||
Resource: engineiam.Resource{fmt.Sprintf(s3.ResourceFormatS3All)},
|
||||
}},
|
||||
}
|
||||
|
||||
ruleChain, err := engineiam.ConvertToS3Chain(policy, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = chiRouter.policyChecker.MorphRuleChainStorage().AddMorphRuleChain(chain.S3, engine.NamespaceTarget(""), ruleChain)
|
||||
require.NoError(t, err)
|
||||
|
||||
chiRouter.middlewareSettings.denyByDefault = true
|
||||
t.Run("can list buckets", func(t *testing.T) {
|
||||
w, r := httptest.NewRecorder(), httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
chiRouter.ServeHTTP(w, r)
|
||||
resp := readResponse(t, w)
|
||||
require.Equal(t, s3middleware.ListBucketsOperation, resp.Method)
|
||||
})
|
||||
|
||||
t.Run("can head 'bucket'", func(t *testing.T) {
|
||||
w, r := httptest.NewRecorder(), httptest.NewRequest(http.MethodHead, "/"+bktName, nil)
|
||||
chiRouter.ServeHTTP(w, r)
|
||||
resp := readResponse(t, w)
|
||||
require.Equal(t, s3middleware.HeadBucketOperation, resp.Method)
|
||||
})
|
||||
|
||||
t.Run("can put object into 'bucket'", func(t *testing.T) {
|
||||
w, r := httptest.NewRecorder(), httptest.NewRequest(http.MethodPut, fmt.Sprintf("/%s/%s", bktName, objName), nil)
|
||||
chiRouter.ServeHTTP(w, r)
|
||||
resp := readResponse(t, w)
|
||||
require.Equal(t, s3middleware.PutObjectOperation, resp.Method)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDefaultBehaviorPolicyChecker(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
bktName, objName := "bucket", "object"
|
||||
target := fmt.Sprintf("/%s/%s", bktName, objName)
|
||||
|
||||
// check we can access bucket if rules not found
|
||||
w, r := httptest.NewRecorder(), httptest.NewRequest(http.MethodPut, target, nil)
|
||||
chiRouter.ServeHTTP(w, r)
|
||||
resp := readResponse(t, w)
|
||||
require.Equal(t, s3middleware.PutObjectOperation, resp.Method)
|
||||
|
||||
// check we cannot access if rules not found when settings is enabled
|
||||
chiRouter.middlewareSettings.denyByDefault = true
|
||||
w, r = httptest.NewRecorder(), httptest.NewRequest(http.MethodPut, target, nil)
|
||||
chiRouter.ServeHTTP(w, r)
|
||||
assertAPIError(t, w, apiErrors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
func readResponse(t *testing.T, w *httptest.ResponseRecorder) handlerResult {
|
||||
|
@ -137,3 +263,18 @@ func readResponse(t *testing.T, w *httptest.ResponseRecorder) handlerResult {
|
|||
require.NoErrorf(t, err, "actual body: '%s'", string(resData))
|
||||
return res
|
||||
}
|
||||
|
||||
func assertAPIError(t *testing.T, w *httptest.ResponseRecorder, expectedErrorCode apiErrors.ErrorCode) {
|
||||
actualErrorResponse := &s3middleware.ErrorResponse{}
|
||||
err := xml.NewDecoder(w.Result().Body).Decode(actualErrorResponse)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedError := apiErrors.GetAPIError(expectedErrorCode)
|
||||
|
||||
require.Equal(t, expectedError.HTTPStatusCode, w.Code)
|
||||
require.Equal(t, expectedError.Code, actualErrorResponse.Code)
|
||||
|
||||
if expectedError.ErrCode != apiErrors.ErrInternalError {
|
||||
require.Contains(t, actualErrorResponse.Message, expectedError.Description)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
|
@ -81,6 +82,11 @@ type FrostFS interface {
|
|||
TimeToEpoch(context.Context, time.Time) (uint64, uint64, error)
|
||||
}
|
||||
|
||||
// FrostFSID represents interface to interact with frostfsid contract.
|
||||
type FrostFSID interface {
|
||||
RegisterPublicKey(ns string, key *keys.PublicKey) error
|
||||
}
|
||||
|
||||
// Agent contains client communicating with FrostFS and logger.
|
||||
type Agent struct {
|
||||
frostFS FrostFS
|
||||
|
@ -108,6 +114,7 @@ type (
|
|||
Lifetime time.Duration
|
||||
AwsCliCredentialsFile string
|
||||
ContainerPolicies ContainerPolicies
|
||||
CustomAttributes []object.Attribute
|
||||
}
|
||||
|
||||
// UpdateSecretOptions contains options for passing to Agent.UpdateSecret method.
|
||||
|
@ -116,6 +123,7 @@ type (
|
|||
GatesPublicKeys []*keys.PublicKey
|
||||
Address oid.Address
|
||||
GatePrivateKey *keys.PrivateKey
|
||||
CustomAttributes []object.Attribute
|
||||
}
|
||||
|
||||
tokenUpdateOptions struct {
|
||||
|
@ -271,9 +279,23 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
|
|||
a.log.Info(logs.StoreBearerTokenIntoFrostFS,
|
||||
zap.Stringer("owner_tkn", idOwner))
|
||||
|
||||
creds := tokens.New(a.frostFS, secrets.EphemeralKey, cache.DefaultAccessBoxConfig(a.log))
|
||||
cfg := tokens.Config{
|
||||
FrostFS: a.frostFS,
|
||||
Key: secrets.EphemeralKey,
|
||||
CacheConfig: cache.DefaultAccessBoxConfig(a.log),
|
||||
}
|
||||
|
||||
addr, err := creds.Put(ctx, id, idOwner, box, lifetime.Exp, options.GatesPublicKeys...)
|
||||
creds := tokens.New(cfg)
|
||||
|
||||
prm := tokens.CredentialsParam{
|
||||
OwnerID: idOwner,
|
||||
AccessBox: box,
|
||||
Expiration: lifetime.Exp,
|
||||
Keys: options.GatesPublicKeys,
|
||||
CustomAttributes: options.CustomAttributes,
|
||||
}
|
||||
|
||||
addr, err := creds.Put(ctx, id, prm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to put creds: %w", err)
|
||||
}
|
||||
|
@ -314,7 +336,13 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
|
|||
|
||||
// UpdateSecret updates an auth token (change list of gates that can use credential), puts new cred version to the FrostFS network and writes to io.Writer a result.
|
||||
func (a *Agent) UpdateSecret(ctx context.Context, w io.Writer, options *UpdateSecretOptions) error {
|
||||
creds := tokens.New(a.frostFS, options.GatePrivateKey, cache.DefaultAccessBoxConfig(a.log))
|
||||
cfg := tokens.Config{
|
||||
FrostFS: a.frostFS,
|
||||
Key: options.GatePrivateKey,
|
||||
CacheConfig: cache.DefaultAccessBoxConfig(a.log),
|
||||
}
|
||||
|
||||
creds := tokens.New(cfg)
|
||||
|
||||
box, err := creds.GetBox(ctx, options.Address)
|
||||
if err != nil {
|
||||
|
@ -349,8 +377,16 @@ func (a *Agent) UpdateSecret(ctx context.Context, w io.Writer, options *UpdateSe
|
|||
a.log.Info(logs.UpdateAccessCredObjectIntoFrostFS,
|
||||
zap.Stringer("owner_tkn", idOwner))
|
||||
|
||||
prm := tokens.CredentialsParam{
|
||||
OwnerID: idOwner,
|
||||
AccessBox: updatedBox,
|
||||
Expiration: lifetime.Exp,
|
||||
Keys: options.GatesPublicKeys,
|
||||
CustomAttributes: options.CustomAttributes,
|
||||
}
|
||||
|
||||
oldAddr := options.Address
|
||||
addr, err := creds.Update(ctx, oldAddr, idOwner, updatedBox, lifetime.Exp, options.GatesPublicKeys...)
|
||||
addr, err := creds.Update(ctx, oldAddr, prm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update creds: %w", err)
|
||||
}
|
||||
|
@ -382,7 +418,13 @@ func getLifetimeFromGateData(gateData *accessbox.GateData) lifetimeOptions {
|
|||
// ObtainSecret receives an existing secret access key from FrostFS and
|
||||
// writes to io.Writer the secret access key.
|
||||
func (a *Agent) ObtainSecret(ctx context.Context, w io.Writer, options *ObtainSecretOptions) error {
|
||||
bearerCreds := tokens.New(a.frostFS, options.GatePrivateKey, cache.DefaultAccessBoxConfig(a.log))
|
||||
cfg := tokens.Config{
|
||||
FrostFS: a.frostFS,
|
||||
Key: options.GatePrivateKey,
|
||||
CacheConfig: cache.DefaultAccessBoxConfig(a.log),
|
||||
}
|
||||
|
||||
bearerCreds := tokens.New(cfg)
|
||||
|
||||
var addr oid.Address
|
||||
if err := addr.DecodeString(options.SecretAddress); err != nil {
|
||||
|
|
|
@ -12,6 +12,10 @@ type (
|
|||
businessLogicError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
frostFSIDInitError struct {
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
func wrapPreparationError(e error) error {
|
||||
|
@ -38,6 +42,14 @@ func (e businessLogicError) Error() string {
|
|||
return e.err.Error()
|
||||
}
|
||||
|
||||
func wrapFrostFSIDInitError(e error) error {
|
||||
return frostFSIDInitError{e}
|
||||
}
|
||||
|
||||
func (e frostFSIDInitError) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
// ExitCode picks corresponding error code depending on the type of error provided.
|
||||
// Returns 1 if error type is unknown.
|
||||
func ExitCode(e error) int {
|
||||
|
@ -48,6 +60,8 @@ func ExitCode(e error) int {
|
|||
return 3
|
||||
case businessLogicError:
|
||||
return 4
|
||||
case frostFSIDInitError:
|
||||
return 4
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/frostfsid"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
|
@ -19,7 +20,8 @@ var issueSecretCmd = &cobra.Command{
|
|||
Use: "issue-secret",
|
||||
Short: "Issue a secret in FrostFS network",
|
||||
Long: "Creates new s3 credentials to use with frostfs-s3-gw",
|
||||
Example: `frostfs-s3-authmate issue-secret --wallet wallet.json --peer s01.frostfs.devenv:8080 --gate-public-key 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a`,
|
||||
Example: `frostfs-s3-authmate issue-secret --wallet wallet.json --peer s01.frostfs.devenv:8080 --gate-public-key 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a
|
||||
frostfs-s3-authmate issue-secret --wallet wallet.json --peer s01.frostfs.devenv:8080 --gate-public-key 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a --attributes LOGIN=NUUb82KR2JrVByHs2YSKgtK29gKnF5q6Vt`,
|
||||
RunE: runIssueSecretCmd,
|
||||
}
|
||||
|
||||
|
@ -37,6 +39,11 @@ const (
|
|||
lifetimeFlag = "lifetime"
|
||||
containerPolicyFlag = "container-policy"
|
||||
awsCLICredentialFlag = "aws-cli-credentials"
|
||||
frostfsIDFlag = "frostfsid"
|
||||
frostfsIDProxyFlag = "frostfsid-proxy"
|
||||
frostfsIDNamespaceFlag = "frostfsid-namespace"
|
||||
rpcEndpointFlag = "rpc-endpoint"
|
||||
attributesFlag = "attributes"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -77,6 +84,11 @@ func initIssueSecretCmd() {
|
|||
issueSecretCmd.Flags().Duration(poolHealthcheckTimeoutFlag, defaultPoolHealthcheckTimeout, "Timeout for request to node to decide if it is alive")
|
||||
issueSecretCmd.Flags().Duration(poolRebalanceIntervalFlag, defaultPoolRebalanceInterval, "Interval for updating nodes health status")
|
||||
issueSecretCmd.Flags().Duration(poolStreamTimeoutFlag, defaultPoolStreamTimeout, "Timeout for individual operation in streaming RPC")
|
||||
issueSecretCmd.Flags().String(frostfsIDFlag, "", "FrostfsID contract hash (LE) or name in NNS to register public key in contract (rpc-endpoint flag also must be provided)")
|
||||
issueSecretCmd.Flags().String(frostfsIDProxyFlag, "", "Proxy contract hash (LE) or name in NNS to use when interact with frostfsid contract")
|
||||
issueSecretCmd.Flags().String(frostfsIDNamespaceFlag, "", "Namespace to register public key in frostfsid contract")
|
||||
issueSecretCmd.Flags().String(rpcEndpointFlag, "", "NEO node RPC address")
|
||||
issueSecretCmd.Flags().String(attributesFlag, "", "User attributes in form of Key1=Value1,Key2=Value2 (note: you cannot override system attributes)")
|
||||
|
||||
_ = issueSecretCmd.MarkFlagRequired(walletFlag)
|
||||
_ = issueSecretCmd.MarkFlagRequired(peerFlag)
|
||||
|
@ -152,6 +164,34 @@ func runIssueSecretCmd(cmd *cobra.Command, _ []string) error {
|
|||
return wrapFrostFSInitError(fmt.Errorf("failed to create FrostFS component: %s", err))
|
||||
}
|
||||
|
||||
frostFSID := viper.GetString(frostfsIDFlag)
|
||||
if frostFSID != "" {
|
||||
rpcAddress := viper.GetString(rpcEndpointFlag)
|
||||
if rpcAddress == "" {
|
||||
return wrapPreparationError(fmt.Errorf("you can use '%s' flag only along with '%s'", frostfsIDFlag, rpcEndpointFlag))
|
||||
}
|
||||
cfg := frostfsid.Config{
|
||||
RPCAddress: rpcAddress,
|
||||
Contract: frostFSID,
|
||||
ProxyContract: viper.GetString(frostfsIDProxyFlag),
|
||||
Key: key,
|
||||
}
|
||||
|
||||
frostfsIDClient, err := createFrostFSID(ctx, log, cfg)
|
||||
if err != nil {
|
||||
return wrapFrostFSIDInitError(err)
|
||||
}
|
||||
|
||||
if err = frostfsIDClient.RegisterPublicKey(viper.GetString(frostfsIDNamespaceFlag), key.PublicKey()); err != nil {
|
||||
return wrapBusinessLogicError(fmt.Errorf("failed to register key in frostfsid: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
customAttrs, err := parseObjectAttrs(viper.GetString(attributesFlag))
|
||||
if err != nil {
|
||||
return wrapPreparationError(fmt.Errorf("failed to parse attributes: %s", err))
|
||||
}
|
||||
|
||||
issueSecretOptions := &authmate.IssueSecretOptions{
|
||||
Container: authmate.ContainerOptions{
|
||||
ID: cnrID,
|
||||
|
@ -167,6 +207,7 @@ func runIssueSecretCmd(cmd *cobra.Command, _ []string) error {
|
|||
ContainerPolicies: policies,
|
||||
Lifetime: lifetime,
|
||||
AwsCliCredentialsFile: viper.GetString(awsCLICredentialFlag),
|
||||
CustomAttributes: customAttrs,
|
||||
}
|
||||
|
||||
if err = authmate.New(log, frostFS).IssueSecret(ctx, os.Stdout, issueSecretOptions); err != nil {
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/frostfsid"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
|
@ -39,6 +40,11 @@ func initUpdateSecretCmd() {
|
|||
updateSecretCmd.Flags().Duration(poolHealthcheckTimeoutFlag, defaultPoolHealthcheckTimeout, "Timeout for request to node to decide if it is alive")
|
||||
updateSecretCmd.Flags().Duration(poolRebalanceIntervalFlag, defaultPoolRebalanceInterval, "Interval for updating nodes health status")
|
||||
updateSecretCmd.Flags().Duration(poolStreamTimeoutFlag, defaultPoolStreamTimeout, "Timeout for individual operation in streaming RPC")
|
||||
updateSecretCmd.Flags().String(frostfsIDFlag, "", "FrostfsID contract hash (LE) or name in NNS to register public key in contract (rpc-endpoint flag also must be provided)")
|
||||
updateSecretCmd.Flags().String(frostfsIDProxyFlag, "", "Proxy contract hash (LE) or name in NNS to use when interact with frostfsid contract")
|
||||
updateSecretCmd.Flags().String(frostfsIDNamespaceFlag, "", "Namespace to register public key in frostfsid contract")
|
||||
updateSecretCmd.Flags().String(rpcEndpointFlag, "", "NEO node RPC address")
|
||||
updateSecretCmd.Flags().String(attributesFlag, "", "User attributes in form of Key1=Value1,Key2=Value2 (note: you cannot override system attributes)")
|
||||
|
||||
_ = updateSecretCmd.MarkFlagRequired(walletFlag)
|
||||
_ = updateSecretCmd.MarkFlagRequired(peerFlag)
|
||||
|
@ -94,11 +100,40 @@ func runUpdateSecretCmd(cmd *cobra.Command, _ []string) error {
|
|||
return wrapFrostFSInitError(fmt.Errorf("failed to create FrostFS component: %s", err))
|
||||
}
|
||||
|
||||
frostFSID := viper.GetString(frostfsIDFlag)
|
||||
if frostFSID != "" {
|
||||
rpcAddress := viper.GetString(rpcEndpointFlag)
|
||||
if rpcAddress == "" {
|
||||
return wrapPreparationError(fmt.Errorf("you can use '%s' flag only along with '%s'", frostfsIDFlag, rpcEndpointFlag))
|
||||
}
|
||||
cfg := frostfsid.Config{
|
||||
RPCAddress: rpcAddress,
|
||||
Contract: frostFSID,
|
||||
ProxyContract: viper.GetString(frostfsIDProxyFlag),
|
||||
Key: key,
|
||||
}
|
||||
|
||||
frostfsIDClient, err := createFrostFSID(ctx, log, cfg)
|
||||
if err != nil {
|
||||
return wrapFrostFSIDInitError(err)
|
||||
}
|
||||
|
||||
if err = frostfsIDClient.RegisterPublicKey(viper.GetString(frostfsIDNamespaceFlag), key.PublicKey()); err != nil {
|
||||
return wrapBusinessLogicError(fmt.Errorf("failed to register key in frostfsid: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
customAttrs, err := parseObjectAttrs(viper.GetString(attributesFlag))
|
||||
if err != nil {
|
||||
return wrapPreparationError(fmt.Errorf("failed to parse attributes: %s", err))
|
||||
}
|
||||
|
||||
updateSecretOptions := &authmate.UpdateSecretOptions{
|
||||
Address: accessBoxAddress,
|
||||
FrostFSKey: key,
|
||||
GatesPublicKeys: gatesPublicKeys,
|
||||
GatePrivateKey: gateKey,
|
||||
CustomAttributes: customAttrs,
|
||||
}
|
||||
|
||||
if err = authmate.New(log, frostFS).UpdateSecret(ctx, os.Stdout, updateSecretOptions); err != nil {
|
||||
|
|
|
@ -5,12 +5,15 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/frostfsid"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/spf13/viper"
|
||||
|
@ -141,3 +144,34 @@ func getLogger() *zap.Logger {
|
|||
|
||||
return log
|
||||
}
|
||||
|
||||
func createFrostFSID(ctx context.Context, log *zap.Logger, cfg frostfsid.Config) (authmate.FrostFSID, error) {
|
||||
log.Debug(logs.PrepareFrostfsIDClient)
|
||||
|
||||
cli, err := frostfsid.New(ctx, cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create frostfsid client: %w", err)
|
||||
}
|
||||
|
||||
return cli, nil
|
||||
}
|
||||
|
||||
func parseObjectAttrs(attributes string) ([]object.Attribute, error) {
|
||||
if len(attributes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rawAttrs := strings.Split(attributes, ",")
|
||||
|
||||
attrs := make([]object.Attribute, len(rawAttrs))
|
||||
for i := range rawAttrs {
|
||||
k, v, found := strings.Cut(rawAttrs[i], "=")
|
||||
if !found {
|
||||
return nil, fmt.Errorf("invalid attribute format: %s", rawAttrs[i])
|
||||
}
|
||||
attrs[i].SetKey(k)
|
||||
attrs[i].SetValue(v)
|
||||
}
|
||||
|
||||
return attrs, nil
|
||||
}
|
||||
|
|
316
cmd/s3-gw/app.go
316
cmd/s3-gw/app.go
|
@ -4,8 +4,10 @@ import (
|
|||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
@ -14,6 +16,7 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
|
@ -21,24 +24,31 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/handler"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
s3middleware "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/notifications"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/frostfsid"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/policy"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/policy/contract"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/services"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/metrics"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/control"
|
||||
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/control/server"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/slices"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
|
@ -47,7 +57,7 @@ const awsDefaultNamespace = "http://s3.amazonaws.com/doc/2006-03-01/"
|
|||
type (
|
||||
// App is the main application structure.
|
||||
App struct {
|
||||
ctr auth.Center
|
||||
ctr s3middleware.Center
|
||||
log *zap.Logger
|
||||
cfg *viper.Viper
|
||||
pool *pool.Pool
|
||||
|
@ -57,8 +67,14 @@ type (
|
|||
obj layer.Client
|
||||
api api.Handler
|
||||
|
||||
frostfsid *frostfsid.FrostFSID
|
||||
|
||||
policyStorage *policy.Storage
|
||||
|
||||
servers []Server
|
||||
|
||||
controlAPI *grpc.Server
|
||||
|
||||
metrics *metrics.AppMetrics
|
||||
bucketResolver *resolver.BucketResolver
|
||||
services []*Service
|
||||
|
@ -75,17 +91,19 @@ type (
|
|||
notificatorEnabled bool
|
||||
resolveZoneList []string
|
||||
isResolveListAllow bool // True if ResolveZoneList contains allowed zones
|
||||
frostfsidValidation bool
|
||||
|
||||
mu sync.RWMutex
|
||||
defaultPolicy netmap.PlacementPolicy
|
||||
regionMap map[string]netmap.PlacementPolicy
|
||||
copiesNumbers map[string][]uint32
|
||||
defaultCopiesNumbers []uint32
|
||||
namespaces Namespaces
|
||||
defaultXMLNS bool
|
||||
bypassContentEncodingInChunks bool
|
||||
clientCut bool
|
||||
maxBufferSizeForPut uint64
|
||||
md5Enabled bool
|
||||
namespaceHeader string
|
||||
defaultNamespaces []string
|
||||
authorizedControlAPIKeys [][]byte
|
||||
policyDenyByDefault bool
|
||||
}
|
||||
|
||||
maxClientsConfig struct {
|
||||
|
@ -102,8 +120,15 @@ type (
|
|||
func newApp(ctx context.Context, log *Logger, v *viper.Viper) *App {
|
||||
objPool, treePool, key := getPools(ctx, log.logger, v)
|
||||
|
||||
cfg := tokens.Config{
|
||||
FrostFS: frostfs.NewAuthmateFrostFS(objPool, key),
|
||||
Key: key,
|
||||
CacheConfig: getAccessBoxCacheConfig(v, log.logger),
|
||||
RemovingCheckAfterDurations: fetchRemovingCheckInterval(v, log.logger),
|
||||
}
|
||||
|
||||
// prepare auth center
|
||||
ctr := auth.New(frostfs.NewAuthmateFrostFS(objPool, key), key, v.GetStringSlice(cfgAllowedAccessKeyIDPrefixes), getAccessBoxCacheConfig(v, log.logger))
|
||||
ctr := auth.New(tokens.New(cfg), v.GetStringSlice(cfgAllowedAccessKeyIDPrefixes))
|
||||
|
||||
app := &App{
|
||||
ctr: ctr,
|
||||
|
@ -116,7 +141,7 @@ func newApp(ctx context.Context, log *Logger, v *viper.Viper) *App {
|
|||
webDone: make(chan struct{}, 1),
|
||||
wrkDone: make(chan struct{}, 1),
|
||||
|
||||
settings: newAppSettings(log, v),
|
||||
settings: newAppSettings(log, v, key),
|
||||
}
|
||||
|
||||
app.init(ctx)
|
||||
|
@ -126,8 +151,11 @@ func newApp(ctx context.Context, log *Logger, v *viper.Viper) *App {
|
|||
|
||||
func (a *App) init(ctx context.Context) {
|
||||
a.setRuntimeParameters()
|
||||
a.initFrostfsID(ctx)
|
||||
a.initPolicyStorage(ctx)
|
||||
a.initAPI(ctx)
|
||||
a.initMetrics()
|
||||
a.initControlAPI()
|
||||
a.initServers(ctx)
|
||||
a.initTracing(ctx)
|
||||
}
|
||||
|
@ -145,7 +173,7 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
user.IDFromKey(&gateOwner, a.key.PrivateKey.PublicKey)
|
||||
|
||||
layerCfg := &layer.Config{
|
||||
Caches: getCacheOptions(a.cfg, a.log),
|
||||
Cache: layer.NewCache(getCacheOptions(a.cfg, a.log)),
|
||||
AnonKey: layer.AnonymousKey{
|
||||
Key: randomKey,
|
||||
},
|
||||
|
@ -171,13 +199,13 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
}
|
||||
}
|
||||
|
||||
func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
|
||||
func newAppSettings(log *Logger, v *viper.Viper, key *keys.PrivateKey) *appSettings {
|
||||
settings := &appSettings{
|
||||
logLevel: log.lvl,
|
||||
maxClient: newMaxClients(v),
|
||||
defaultXMLNS: v.GetBool(cfgKludgeUseDefaultXMLNS),
|
||||
defaultMaxAge: fetchDefaultMaxAge(v, log.logger),
|
||||
notificatorEnabled: v.GetBool(cfgEnableNATS),
|
||||
frostfsidValidation: v.GetBool(cfgFrostfsIDValidationEnabled),
|
||||
}
|
||||
|
||||
settings.resolveZoneList = v.GetStringSlice(cfgResolveBucketAllow)
|
||||
|
@ -186,15 +214,24 @@ func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
|
|||
settings.resolveZoneList = v.GetStringSlice(cfgResolveBucketDeny)
|
||||
}
|
||||
|
||||
settings.setBypassContentEncodingInChunks(v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
|
||||
settings.setClientCut(v.GetBool(cfgClientCut))
|
||||
settings.initPlacementPolicy(log.logger, v)
|
||||
settings.setBufferMaxSizeForPut(v.GetUint64(cfgBufferMaxSizeForPut))
|
||||
settings.setMD5Enabled(v.GetBool(cfgMD5Enabled))
|
||||
settings.update(v, log.logger, key)
|
||||
|
||||
return settings
|
||||
}
|
||||
|
||||
func (s *appSettings) update(v *viper.Viper, log *zap.Logger, key *keys.PrivateKey) {
|
||||
s.setNamespaceHeader(v.GetString(cfgResolveNamespaceHeader)) // should be updated before placement policies
|
||||
s.initPlacementPolicy(log, v)
|
||||
s.useDefaultXMLNamespace(v.GetBool(cfgKludgeUseDefaultXMLNS))
|
||||
s.setBypassContentEncodingInChunks(v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
|
||||
s.setClientCut(v.GetBool(cfgClientCut))
|
||||
s.setBufferMaxSizeForPut(v.GetUint64(cfgBufferMaxSizeForPut))
|
||||
s.setMD5Enabled(v.GetBool(cfgMD5Enabled))
|
||||
s.setDefaultNamespaces(fetchDefaultNamespaces(log, v))
|
||||
s.setAuthorizedControlAPIKeys(append(fetchAuthorizedKeys(log, v), key.PublicKey()))
|
||||
s.setPolicyDenyByDefault(v.GetBool(cfgPolicyDenyByDefault))
|
||||
}
|
||||
|
||||
func (s *appSettings) BypassContentEncodingInChunks() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
@ -232,46 +269,40 @@ func (s *appSettings) setBufferMaxSizeForPut(size uint64) {
|
|||
}
|
||||
|
||||
func (s *appSettings) initPlacementPolicy(l *zap.Logger, v *viper.Viper) {
|
||||
defaultPolicy := fetchDefaultPolicy(l, v)
|
||||
regionMap := fetchRegionMappingPolicies(l, v)
|
||||
defaultCopies := fetchDefaultCopiesNumbers(l, v)
|
||||
copiesNumbers := fetchCopiesNumbers(l, v)
|
||||
nsConfig := fetchNamespacesConfig(l, v)
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.defaultPolicy = defaultPolicy
|
||||
s.regionMap = regionMap
|
||||
s.defaultCopiesNumbers = defaultCopies
|
||||
s.copiesNumbers = copiesNumbers
|
||||
s.namespaces = nsConfig.Namespaces
|
||||
}
|
||||
|
||||
func (s *appSettings) DefaultPlacementPolicy() netmap.PlacementPolicy {
|
||||
func (s *appSettings) DefaultPlacementPolicy(namespace string) netmap.PlacementPolicy {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.defaultPolicy
|
||||
return s.namespaces[namespace].LocationConstraints[defaultConstraintName]
|
||||
}
|
||||
|
||||
func (s *appSettings) PlacementPolicy(name string) (netmap.PlacementPolicy, bool) {
|
||||
func (s *appSettings) PlacementPolicy(namespace, constraint string) (netmap.PlacementPolicy, bool) {
|
||||
s.mu.RLock()
|
||||
policy, ok := s.regionMap[name]
|
||||
placementPolicy, ok := s.namespaces[namespace].LocationConstraints[constraint]
|
||||
s.mu.RUnlock()
|
||||
|
||||
return policy, ok
|
||||
return placementPolicy, ok
|
||||
}
|
||||
|
||||
func (s *appSettings) CopiesNumbers(locationConstraint string) ([]uint32, bool) {
|
||||
func (s *appSettings) CopiesNumbers(namespace, constraint string) ([]uint32, bool) {
|
||||
s.mu.RLock()
|
||||
copiesNumbers, ok := s.copiesNumbers[locationConstraint]
|
||||
copiesNumbers, ok := s.namespaces[namespace].CopiesNumbers[constraint]
|
||||
s.mu.RUnlock()
|
||||
|
||||
return copiesNumbers, ok
|
||||
}
|
||||
|
||||
func (s *appSettings) DefaultCopiesNumbers() []uint32 {
|
||||
func (s *appSettings) DefaultCopiesNumbers(namespace string) []uint32 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.defaultCopiesNumbers
|
||||
return s.namespaces[namespace].CopiesNumbers[defaultConstraintName]
|
||||
}
|
||||
|
||||
func (s *appSettings) NewXMLDecoder(r io.Reader) *xml.Decoder {
|
||||
|
@ -320,32 +351,157 @@ func (s *appSettings) setMD5Enabled(md5Enabled bool) {
|
|||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) NamespaceHeader() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.namespaceHeader
|
||||
}
|
||||
|
||||
func (s *appSettings) setNamespaceHeader(nsHeader string) {
|
||||
s.mu.Lock()
|
||||
s.namespaceHeader = nsHeader
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool) {
|
||||
if s.IsDefaultNamespace(ns) {
|
||||
return v2container.SysAttributeZoneDefault, true
|
||||
}
|
||||
|
||||
return ns + ".ns", false
|
||||
}
|
||||
|
||||
func (s *appSettings) IsDefaultNamespace(ns string) bool {
|
||||
s.mu.RLock()
|
||||
namespaces := s.defaultNamespaces
|
||||
s.mu.RUnlock()
|
||||
return slices.Contains(namespaces, ns)
|
||||
}
|
||||
|
||||
func (s *appSettings) setDefaultNamespaces(namespaces []string) {
|
||||
s.mu.Lock()
|
||||
s.defaultNamespaces = namespaces
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) FetchRawKeys() [][]byte {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.authorizedControlAPIKeys
|
||||
}
|
||||
|
||||
func (s *appSettings) setAuthorizedControlAPIKeys(keys keys.PublicKeys) {
|
||||
rawPubs := make([][]byte, len(keys))
|
||||
for i := range keys {
|
||||
rawPubs[i] = keys[i].Bytes()
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.authorizedControlAPIKeys = rawPubs
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) ResolveNamespaceAlias(namespace string) string {
|
||||
if s.IsDefaultNamespace(namespace) {
|
||||
return defaultNamespace
|
||||
}
|
||||
|
||||
return namespace
|
||||
}
|
||||
|
||||
func (s *appSettings) PolicyDenyByDefault() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.policyDenyByDefault
|
||||
}
|
||||
|
||||
func (s *appSettings) setPolicyDenyByDefault(policyDenyByDefault bool) {
|
||||
s.mu.Lock()
|
||||
s.policyDenyByDefault = policyDenyByDefault
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (a *App) initAPI(ctx context.Context) {
|
||||
a.initLayer(ctx)
|
||||
a.initHandler()
|
||||
}
|
||||
|
||||
func (a *App) initControlAPI() {
|
||||
svc := controlSvc.New(
|
||||
controlSvc.WithSettings(a.settings),
|
||||
controlSvc.WithLogger(a.log),
|
||||
controlSvc.WithChainStorage(a.policyStorage.LocalStorage()),
|
||||
)
|
||||
|
||||
a.controlAPI = grpc.NewServer()
|
||||
|
||||
control.RegisterControlServiceServer(a.controlAPI, svc)
|
||||
}
|
||||
|
||||
func (a *App) initMetrics() {
|
||||
a.metrics = metrics.NewAppMetrics(a.log, frostfs.NewPoolStatistic(a.pool), a.cfg.GetBool(cfgPrometheusEnabled))
|
||||
a.metrics.State().SetHealth(metrics.HealthStatusStarting)
|
||||
}
|
||||
|
||||
func (a *App) initFrostfsID(ctx context.Context) {
|
||||
var err error
|
||||
a.frostfsid, err = frostfsid.New(ctx, frostfsid.Config{
|
||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||
Contract: a.cfg.GetString(cfgFrostfsIDContract),
|
||||
ProxyContract: a.cfg.GetString(cfgProxyContract),
|
||||
Key: a.key,
|
||||
})
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.InitFrostfsIDContractFailed, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) initPolicyStorage(ctx context.Context) {
|
||||
var (
|
||||
err error
|
||||
policyContract policy.Contract
|
||||
)
|
||||
|
||||
if a.cfg.GetBool(cfgPolicyEnabled) {
|
||||
policyContract, err = contract.New(ctx, contract.Config{
|
||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||
Contract: a.cfg.GetString(cfgPolicyContract),
|
||||
ProxyContract: a.cfg.GetString(cfgProxyContract),
|
||||
Key: a.key,
|
||||
})
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.InitPolicyContractFailed, zap.Error(err))
|
||||
}
|
||||
} else {
|
||||
policyContract = contract.NewInMemoryContract()
|
||||
}
|
||||
|
||||
a.policyStorage = policy.NewStorage(policy.StorageConfig{
|
||||
Contract: policyContract,
|
||||
Cache: cache.NewMorphPolicyCache(getMorphPolicyCacheConfig(a.cfg, a.log)),
|
||||
Log: a.log,
|
||||
})
|
||||
}
|
||||
|
||||
func (a *App) initResolver() {
|
||||
var err error
|
||||
a.bucketResolver, err = resolver.NewBucketResolver(a.getResolverConfig())
|
||||
a.bucketResolver, err = resolver.NewBucketResolver(a.getResolverOrder(), a.getResolverConfig())
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) getResolverConfig() ([]string, *resolver.Config) {
|
||||
resolveCfg := &resolver.Config{
|
||||
func (a *App) getResolverConfig() *resolver.Config {
|
||||
return &resolver.Config{
|
||||
FrostFS: frostfs.NewResolverFrostFS(a.pool),
|
||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||
Settings: a.settings,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) getResolverOrder() []string {
|
||||
order := a.cfg.GetStringSlice(cfgResolveOrder)
|
||||
if resolveCfg.RPCAddress == "" {
|
||||
if a.cfg.GetString(cfgRPCEndpoint) == "" {
|
||||
order = remove(order, resolver.NNSResolver)
|
||||
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided)
|
||||
}
|
||||
|
@ -354,7 +510,7 @@ func (a *App) getResolverConfig() ([]string, *resolver.Config) {
|
|||
a.log.Info(logs.ContainerResolverWillBeDisabled)
|
||||
}
|
||||
|
||||
return order, resolveCfg
|
||||
return order
|
||||
}
|
||||
|
||||
func (a *App) initTracing(ctx context.Context) {
|
||||
|
@ -439,6 +595,8 @@ func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.
|
|||
prm.SetLogger(logger)
|
||||
prmTree.SetLogger(logger)
|
||||
|
||||
prmTree.SetMaxRequestAttempts(cfg.GetInt(cfgTreePoolMaxAttempts))
|
||||
|
||||
var apiGRPCDialOpts []grpc.DialOption
|
||||
var treeGRPCDialOpts []grpc.DialOption
|
||||
if cfg.GetBool(cfgTracingEnabled) {
|
||||
|
@ -510,13 +668,25 @@ func (a *App) Serve(ctx context.Context) {
|
|||
domains := a.cfg.GetStringSlice(cfgListenDomains)
|
||||
a.log.Info(logs.FetchDomainsPrepareToUseAPI, zap.Strings("domains", domains))
|
||||
|
||||
throttleOps := middleware.ThrottleOpts{
|
||||
cfg := api.Config{
|
||||
Throttle: middleware.ThrottleOpts{
|
||||
Limit: a.settings.maxClient.count,
|
||||
BacklogTimeout: a.settings.maxClient.deadline,
|
||||
},
|
||||
Handler: a.api,
|
||||
Center: a.ctr,
|
||||
Log: a.log,
|
||||
Metrics: a.metrics,
|
||||
Domains: domains,
|
||||
|
||||
MiddlewareSettings: a.settings,
|
||||
PolicyChecker: a.policyStorage,
|
||||
|
||||
FrostfsID: a.frostfsid,
|
||||
FrostFSIDValidation: a.settings.frostfsidValidation,
|
||||
}
|
||||
|
||||
chiRouter := chi.NewRouter()
|
||||
api.AttachChi(chiRouter, domains, throttleOps, a.api, a.ctr, a.log, a.metrics)
|
||||
chiRouter := api.NewRouter(cfg)
|
||||
|
||||
// Use mux.Router as http.Handler
|
||||
srv := new(http.Server)
|
||||
|
@ -540,6 +710,16 @@ func (a *App) Serve(ctx context.Context) {
|
|||
}(i)
|
||||
}
|
||||
|
||||
go func() {
|
||||
address := a.cfg.GetString(cfgControlGRPCEndpoint)
|
||||
a.log.Info(logs.StartingControlAPI, zap.String("address", address))
|
||||
if listener, err := net.Listen("tcp", address); err != nil {
|
||||
a.log.Fatal(logs.ListenAndServe, zap.Error(err))
|
||||
} else if err = a.controlAPI.Serve(listener); err != nil {
|
||||
a.log.Fatal(logs.ListenAndServe, zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGHUP)
|
||||
|
||||
|
@ -558,6 +738,7 @@ LOOP:
|
|||
|
||||
a.log.Info(logs.StoppingServer, zap.Error(srv.Shutdown(ctx)))
|
||||
|
||||
a.stopControlAPI()
|
||||
a.metrics.Shutdown()
|
||||
a.stopServices()
|
||||
a.shutdownTracing()
|
||||
|
@ -569,6 +750,25 @@ func shutdownContext() (context.Context, context.CancelFunc) {
|
|||
return context.WithTimeout(context.Background(), defaultShutdownTimeout)
|
||||
}
|
||||
|
||||
func (a *App) stopControlAPI() {
|
||||
ctx, cancel := shutdownContext()
|
||||
defer cancel()
|
||||
|
||||
go func() {
|
||||
a.controlAPI.GracefulStop()
|
||||
cancel()
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
|
||||
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
||||
a.log.Info(logs.ControlAPICannotShutdownGracefully)
|
||||
a.controlAPI.Stop()
|
||||
}
|
||||
|
||||
a.log.Info(logs.ControlAPIServiceStopped)
|
||||
}
|
||||
|
||||
func (a *App) configReload(ctx context.Context) {
|
||||
a.log.Info(logs.SIGHUPConfigReloadStarted)
|
||||
|
||||
|
@ -581,7 +781,7 @@ func (a *App) configReload(ctx context.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
if err := a.bucketResolver.UpdateResolvers(a.getResolverConfig()); err != nil {
|
||||
if err := a.bucketResolver.UpdateResolvers(a.getResolverOrder()); err != nil {
|
||||
a.log.Warn(logs.FailedToReloadResolvers, zap.Error(err))
|
||||
}
|
||||
|
||||
|
@ -610,13 +810,7 @@ func (a *App) updateSettings() {
|
|||
a.settings.logLevel.SetLevel(lvl)
|
||||
}
|
||||
|
||||
a.settings.initPlacementPolicy(a.log, a.cfg)
|
||||
|
||||
a.settings.useDefaultXMLNamespace(a.cfg.GetBool(cfgKludgeUseDefaultXMLNS))
|
||||
a.settings.setBypassContentEncodingInChunks(a.cfg.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
|
||||
a.settings.setClientCut(a.cfg.GetBool(cfgClientCut))
|
||||
a.settings.setBufferMaxSizeForPut(a.cfg.GetUint64(cfgBufferMaxSizeForPut))
|
||||
a.settings.setMD5Enabled(a.cfg.GetBool(cfgMD5Enabled))
|
||||
a.settings.update(a.cfg, a.log, a.key)
|
||||
}
|
||||
|
||||
func (a *App) startServices() {
|
||||
|
@ -720,6 +914,9 @@ func getCacheOptions(v *viper.Viper, l *zap.Logger) *layer.CachesConfig {
|
|||
cacheCfg.ObjectsList.Lifetime = fetchCacheLifetime(v, l, cfgListObjectsCacheLifetime, cacheCfg.ObjectsList.Lifetime)
|
||||
cacheCfg.ObjectsList.Size = fetchCacheSize(v, l, cfgListObjectsCacheSize, cacheCfg.ObjectsList.Size)
|
||||
|
||||
cacheCfg.SessionList.Lifetime = fetchCacheLifetime(v, l, cfgSessionListCacheLifetime, cacheCfg.SessionList.Lifetime)
|
||||
cacheCfg.SessionList.Size = fetchCacheSize(v, l, cfgSessionListCacheSize, cacheCfg.SessionList.Size)
|
||||
|
||||
cacheCfg.Buckets.Lifetime = fetchCacheLifetime(v, l, cfgBucketsCacheLifetime, cacheCfg.Buckets.Lifetime)
|
||||
cacheCfg.Buckets.Size = fetchCacheSize(v, l, cfgBucketsCacheSize, cacheCfg.Buckets.Size)
|
||||
|
||||
|
@ -744,9 +941,26 @@ func getAccessBoxCacheConfig(v *viper.Viper, l *zap.Logger) *cache.Config {
|
|||
return cacheCfg
|
||||
}
|
||||
|
||||
func getMorphPolicyCacheConfig(v *viper.Viper, l *zap.Logger) *cache.Config {
|
||||
cacheCfg := cache.DefaultMorphPolicyConfig(l)
|
||||
|
||||
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgMorphPolicyCacheLifetime, cacheCfg.Lifetime)
|
||||
cacheCfg.Size = fetchCacheSize(v, l, cfgMorphPolicyCacheSize, cacheCfg.Size)
|
||||
|
||||
return cacheCfg
|
||||
}
|
||||
|
||||
func (a *App) initHandler() {
|
||||
var err error
|
||||
a.api, err = handler.New(a.log, a.obj, a.nc, a.settings)
|
||||
var (
|
||||
err error
|
||||
ffsid handler.FrostFSID
|
||||
)
|
||||
|
||||
if a.frostfsid != nil {
|
||||
ffsid = a.frostfsid
|
||||
}
|
||||
|
||||
a.api, err = handler.New(a.log, a.obj, a.nc, a.settings, a.policyStorage, ffsid)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotInitializeAPIHandler, zap.Error(err))
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"git.frostfs.info/TrueCloudLab/zapjournald"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/ssgreg/journald"
|
||||
|
@ -50,9 +51,20 @@ const (
|
|||
|
||||
defaultReadHeaderTimeout = 30 * time.Second
|
||||
defaultIdleTimeout = 30 * time.Second
|
||||
|
||||
defaultAccessBoxCacheRemovingCheckInterval = 5 * time.Minute
|
||||
|
||||
defaultNamespaceHeader = "X-Frostfs-Namespace"
|
||||
|
||||
defaultConstraintName = "default"
|
||||
|
||||
defaultNamespace = ""
|
||||
)
|
||||
|
||||
var defaultCopiesNumbers = []uint32{0}
|
||||
var (
|
||||
defaultCopiesNumbers = []uint32{0}
|
||||
defaultDefaultNamespaces = []string{"", "root"}
|
||||
)
|
||||
|
||||
const ( // Settings.
|
||||
// Logger.
|
||||
|
@ -72,6 +84,10 @@ const ( // Settings.
|
|||
cfgTLSKeyFile = "tls.key_file"
|
||||
cfgTLSCertFile = "tls.cert_file"
|
||||
|
||||
// Control API.
|
||||
cfgControlAuthorizedKeys = "control.authorized_keys"
|
||||
cfgControlGRPCEndpoint = "control.grpc.endpoint"
|
||||
|
||||
// Pool config.
|
||||
cfgConnectTimeout = "connect_timeout"
|
||||
cfgStreamTimeout = "stream_timeout"
|
||||
|
@ -84,6 +100,8 @@ const ( // Settings.
|
|||
cfgObjectsCacheSize = "cache.objects.size"
|
||||
cfgListObjectsCacheLifetime = "cache.list.lifetime"
|
||||
cfgListObjectsCacheSize = "cache.list.size"
|
||||
cfgSessionListCacheLifetime = "cache.list_session.lifetime"
|
||||
cfgSessionListCacheSize = "cache.list_session.size"
|
||||
cfgBucketsCacheLifetime = "cache.buckets.lifetime"
|
||||
cfgBucketsCacheSize = "cache.buckets.size"
|
||||
cfgNamesCacheLifetime = "cache.names.lifetime"
|
||||
|
@ -94,6 +112,10 @@ const ( // Settings.
|
|||
cfgAccessBoxCacheSize = "cache.accessbox.size"
|
||||
cfgAccessControlCacheLifetime = "cache.accesscontrol.lifetime"
|
||||
cfgAccessControlCacheSize = "cache.accesscontrol.size"
|
||||
cfgMorphPolicyCacheLifetime = "cache.morph_policy.lifetime"
|
||||
cfgMorphPolicyCacheSize = "cache.morph_policy.size"
|
||||
|
||||
cfgAccessBoxCacheRemovingCheckInterval = "cache.accessbox.removing_check_interval"
|
||||
|
||||
// NATS.
|
||||
cfgEnableNATS = "nats.enabled"
|
||||
|
@ -143,6 +165,7 @@ const ( // Settings.
|
|||
// Kludge.
|
||||
cfgKludgeUseDefaultXMLNS = "kludge.use_default_xmlns"
|
||||
cfgKludgeBypassContentEncodingCheckInChunks = "kludge.bypass_content_encoding_check_in_chunks"
|
||||
cfgKludgeDefaultNamespaces = "kludge.default_namespaces"
|
||||
|
||||
// Web.
|
||||
cfgWebReadTimeout = "web.read_timeout"
|
||||
|
@ -150,6 +173,9 @@ const ( // Settings.
|
|||
cfgWebWriteTimeout = "web.write_timeout"
|
||||
cfgWebIdleTimeout = "web.idle_timeout"
|
||||
|
||||
// Namespaces.
|
||||
cfgNamespacesConfig = "namespaces.config"
|
||||
|
||||
// Command line args.
|
||||
cmdHelp = "help"
|
||||
cmdVersion = "version"
|
||||
|
@ -167,11 +193,14 @@ const ( // Settings.
|
|||
cfgClientCut = "frostfs.client_cut"
|
||||
// Sets max buffer size for read payload in put operations.
|
||||
cfgBufferMaxSizeForPut = "frostfs.buffer_max_size_for_put"
|
||||
// Sets max attempt to make successful tree request.
|
||||
cfgTreePoolMaxAttempts = "frostfs.tree_pool_max_attempts"
|
||||
|
||||
// List of allowed AccessKeyID prefixes.
|
||||
cfgAllowedAccessKeyIDPrefixes = "allowed_access_key_id_prefixes"
|
||||
|
||||
// Bucket resolving options.
|
||||
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
|
||||
cfgResolveBucketAllow = "resolve_bucket.allow"
|
||||
cfgResolveBucketDeny = "resolve_bucket.deny"
|
||||
|
||||
|
@ -180,6 +209,18 @@ const ( // Settings.
|
|||
|
||||
// Enable return MD5 checksum in ETag.
|
||||
cfgMD5Enabled = "features.md5.enabled"
|
||||
cfgPolicyDenyByDefault = "features.policy.deny_by_default"
|
||||
|
||||
// FrostfsID.
|
||||
cfgFrostfsIDContract = "frostfsid.contract"
|
||||
cfgFrostfsIDValidationEnabled = "frostfsid.validation.enabled"
|
||||
|
||||
// Policy.
|
||||
cfgPolicyEnabled = "policy.enabled"
|
||||
cfgPolicyContract = "policy.contract"
|
||||
|
||||
// Proxy.
|
||||
cfgProxyContract = "proxy.contract"
|
||||
|
||||
// envPrefix is an environment variables prefix used for configuration.
|
||||
envPrefix = "S3_GW"
|
||||
|
@ -331,6 +372,24 @@ func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue
|
|||
return defaultValue
|
||||
}
|
||||
|
||||
func fetchRemovingCheckInterval(v *viper.Viper, l *zap.Logger) time.Duration {
|
||||
if !v.IsSet(cfgAccessBoxCacheRemovingCheckInterval) {
|
||||
return defaultAccessBoxCacheRemovingCheckInterval
|
||||
}
|
||||
|
||||
duration := v.GetDuration(cfgAccessBoxCacheRemovingCheckInterval)
|
||||
if duration >= 0 {
|
||||
return duration
|
||||
}
|
||||
|
||||
l.Error(logs.InvalidAccessBoxCacheRemovingCheckInterval,
|
||||
zap.String("parameter", cfgAccessBoxCacheRemovingCheckInterval),
|
||||
zap.Duration("value in config", duration),
|
||||
zap.Duration("default", defaultAccessBoxCacheRemovingCheckInterval))
|
||||
|
||||
return defaultAccessBoxCacheRemovingCheckInterval
|
||||
}
|
||||
|
||||
func fetchDefaultMaxAge(cfg *viper.Viper, l *zap.Logger) int {
|
||||
defaultMaxAge := handler.DefaultMaxAge
|
||||
|
||||
|
@ -442,6 +501,88 @@ func fetchCopiesNumbers(l *zap.Logger, v *viper.Viper) map[string][]uint32 {
|
|||
return copiesNums
|
||||
}
|
||||
|
||||
func fetchDefaultNamespaces(l *zap.Logger, v *viper.Viper) []string {
|
||||
defaultNamespaces := v.GetStringSlice(cfgKludgeDefaultNamespaces)
|
||||
if len(defaultNamespaces) == 0 {
|
||||
defaultNamespaces = defaultDefaultNamespaces
|
||||
l.Warn(logs.DefaultNamespacesCannotBeEmpty, zap.Strings("namespaces", defaultNamespaces))
|
||||
}
|
||||
|
||||
for i := range defaultNamespaces { // to be set namespaces in env variable as `S3_GW_KLUDGE_DEFAULT_NAMESPACES="" 'root'`
|
||||
defaultNamespaces[i] = strings.Trim(defaultNamespaces[i], "\"'")
|
||||
}
|
||||
|
||||
return defaultNamespaces
|
||||
}
|
||||
|
||||
func fetchNamespacesConfig(l *zap.Logger, v *viper.Viper) NamespacesConfig {
|
||||
defaultNSRegionMap := fetchRegionMappingPolicies(l, v)
|
||||
defaultNSRegionMap[defaultConstraintName] = fetchDefaultPolicy(l, v)
|
||||
|
||||
defaultNSCopiesNumbers := fetchCopiesNumbers(l, v)
|
||||
defaultNSCopiesNumbers[defaultConstraintName] = fetchDefaultCopiesNumbers(l, v)
|
||||
|
||||
defaultNSValue := Namespace{
|
||||
LocationConstraints: defaultNSRegionMap,
|
||||
CopiesNumbers: defaultNSCopiesNumbers,
|
||||
}
|
||||
|
||||
nsConfig, err := readNamespacesConfig(v.GetString(cfgNamespacesConfig))
|
||||
if err != nil {
|
||||
l.Warn(logs.FailedToParseNamespacesConfig, zap.Error(err))
|
||||
}
|
||||
|
||||
defaultNamespacesNames := fetchDefaultNamespaces(l, v)
|
||||
|
||||
var overrideDefaults []Namespace
|
||||
for _, name := range defaultNamespacesNames {
|
||||
if ns, ok := nsConfig.Namespaces[name]; ok {
|
||||
overrideDefaults = append(overrideDefaults, ns)
|
||||
delete(nsConfig.Namespaces, name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(overrideDefaults) > 0 {
|
||||
l.Warn(logs.DefaultNamespaceConfigValuesBeOverwritten)
|
||||
defaultNSValue.LocationConstraints = overrideDefaults[0].LocationConstraints
|
||||
defaultNSValue.CopiesNumbers = overrideDefaults[0].CopiesNumbers
|
||||
if len(overrideDefaults) > 1 {
|
||||
l.Warn(logs.MultipleDefaultOverridesFound, zap.String("name", overrideDefaults[0].Name))
|
||||
}
|
||||
}
|
||||
|
||||
for _, name := range defaultNamespacesNames {
|
||||
nsConfig.Namespaces[name] = Namespace{
|
||||
Name: name,
|
||||
LocationConstraints: defaultNSValue.LocationConstraints,
|
||||
CopiesNumbers: defaultNSValue.CopiesNumbers,
|
||||
}
|
||||
}
|
||||
|
||||
return nsConfig
|
||||
}
|
||||
|
||||
func readNamespacesConfig(filepath string) (NamespacesConfig, error) {
|
||||
nsConfig := NamespacesConfig{
|
||||
Namespaces: make(Namespaces),
|
||||
}
|
||||
|
||||
if filepath == "" {
|
||||
return nsConfig, nil
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return nsConfig, fmt.Errorf("failed to read namespace config '%s': %w", filepath, err)
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(data, &nsConfig); err != nil {
|
||||
return nsConfig, fmt.Errorf("failed to parse namespace config: %w", err)
|
||||
}
|
||||
|
||||
return nsConfig, nil
|
||||
}
|
||||
|
||||
func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
||||
var nodes []pool.NodeParam
|
||||
for i := 0; ; i++ {
|
||||
|
@ -494,6 +635,23 @@ func fetchServers(v *viper.Viper) []ServerInfo {
|
|||
return servers
|
||||
}
|
||||
|
||||
func fetchAuthorizedKeys(l *zap.Logger, v *viper.Viper) keys.PublicKeys {
|
||||
strKeys := v.GetStringSlice(cfgControlAuthorizedKeys)
|
||||
pubs := make(keys.PublicKeys, 0, len(strKeys))
|
||||
|
||||
for i := range strKeys {
|
||||
pub, err := keys.NewPublicKeyFromString(strKeys[i])
|
||||
if err != nil {
|
||||
l.Warn(logs.FailedToParsePublicKey, zap.String("key", strKeys[i]))
|
||||
continue
|
||||
}
|
||||
|
||||
pubs = append(pubs, pub)
|
||||
}
|
||||
|
||||
return pubs
|
||||
}
|
||||
|
||||
func newSettings() *viper.Viper {
|
||||
v := viper.New()
|
||||
|
||||
|
@ -539,6 +697,8 @@ func newSettings() *viper.Viper {
|
|||
|
||||
// set defaults:
|
||||
|
||||
v.SetDefault(cfgAccessBoxCacheRemovingCheckInterval, defaultAccessBoxCacheRemovingCheckInterval)
|
||||
|
||||
// logger:
|
||||
v.SetDefault(cfgLoggerLevel, "debug")
|
||||
v.SetDefault(cfgLoggerDestination, "stdout")
|
||||
|
@ -550,17 +710,33 @@ func newSettings() *viper.Viper {
|
|||
v.SetDefault(cfgPProfAddress, "localhost:8085")
|
||||
v.SetDefault(cfgPrometheusAddress, "localhost:8086")
|
||||
|
||||
v.SetDefault(cfgControlGRPCEndpoint, "localhost:8083")
|
||||
|
||||
// frostfs
|
||||
v.SetDefault(cfgBufferMaxSizeForPut, 1024*1024) // 1mb
|
||||
|
||||
// kludge
|
||||
v.SetDefault(cfgKludgeUseDefaultXMLNS, false)
|
||||
v.SetDefault(cfgKludgeBypassContentEncodingCheckInChunks, false)
|
||||
v.SetDefault(cfgKludgeDefaultNamespaces, defaultDefaultNamespaces)
|
||||
|
||||
// web
|
||||
v.SetDefault(cfgWebReadHeaderTimeout, defaultReadHeaderTimeout)
|
||||
v.SetDefault(cfgWebIdleTimeout, defaultIdleTimeout)
|
||||
|
||||
// frostfsid
|
||||
v.SetDefault(cfgFrostfsIDContract, "frostfsid.frostfs")
|
||||
|
||||
// policy
|
||||
v.SetDefault(cfgPolicyContract, "policy.frostfs")
|
||||
v.SetDefault(cfgPolicyEnabled, true)
|
||||
|
||||
// proxy
|
||||
v.SetDefault(cfgProxyContract, "proxy.frostfs")
|
||||
|
||||
// resolve
|
||||
v.SetDefault(cfgResolveNamespaceHeader, defaultNamespaceHeader)
|
||||
|
||||
// Bind flags
|
||||
if err := bindFlags(v, flags); err != nil {
|
||||
panic(fmt.Errorf("bind flags: %w", err))
|
||||
|
@ -801,7 +977,7 @@ func newJournaldLogger(lvl zapcore.Level) *Logger {
|
|||
c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
c.Level = zap.NewAtomicLevelAt(lvl)
|
||||
|
||||
encoder := zapcore.NewConsoleEncoder(c.EncoderConfig)
|
||||
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
|
||||
|
||||
core := zapjournald.NewCore(zap.NewAtomicLevelAt(lvl), encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||
coreWithContext := core.With([]zapcore.Field{
|
||||
|
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/handler"
|
||||
|
@ -93,3 +94,53 @@ func TestDefaultNamespace(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNamespacesMarshaling(t *testing.T) {
|
||||
dataJSON := `
|
||||
{
|
||||
"namespaces": {
|
||||
"kapusta": {
|
||||
"location_constraints": {
|
||||
"default": "REP 3",
|
||||
"load-1-1": "REP 1 CBF 1 SELECT 1 FROM *"
|
||||
},
|
||||
"copies_numbers": {
|
||||
"default": [
|
||||
0
|
||||
],
|
||||
"load-1-1": [
|
||||
1
|
||||
]
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"location_constraints": {
|
||||
"default": "REP 3",
|
||||
"test": "{\"replicas\":[{\"count\":1,\"selector\":\"\"}],\"containerBackupFactor\":1,\"selectors\":[{\"name\":\"\",\"count\":1,\"clause\":\"CLAUSE_UNSPECIFIED\",\"attribute\":\"\",\"filter\":\"Color\"}],\"filters\":[{\"name\":\"Color\",\"key\":\"Color\",\"op\":\"EQ\",\"value\":\"Red\",\"filters\":[]}],\"unique\":false}"
|
||||
},
|
||||
"copies_numbers": {
|
||||
"default": [
|
||||
0
|
||||
],
|
||||
"load-1-1": [
|
||||
1
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var nsConfig NamespacesConfig
|
||||
err := json.Unmarshal([]byte(dataJSON), &nsConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := json.Marshal(nsConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
var nsConfig2 NamespacesConfig
|
||||
err = json.Unmarshal(data, &nsConfig2)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, nsConfig, nsConfig2)
|
||||
}
|
||||
|
|
79
cmd/s3-gw/namespaces.go
Normal file
79
cmd/s3-gw/namespaces.go
Normal file
|
@ -0,0 +1,79 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
)
|
||||
|
||||
type NamespacesConfig struct {
|
||||
Namespaces Namespaces `json:"namespaces"`
|
||||
}
|
||||
|
||||
type Namespaces map[string]Namespace
|
||||
|
||||
type Namespace struct {
|
||||
Name string `json:"-"`
|
||||
LocationConstraints LocationConstraints `json:"location_constraints"`
|
||||
CopiesNumbers map[string][]uint32 `json:"copies_numbers"`
|
||||
}
|
||||
|
||||
type LocationConstraints map[string]netmap.PlacementPolicy
|
||||
|
||||
func (c *Namespaces) UnmarshalJSON(data []byte) error {
|
||||
namespaces := make(map[string]Namespace)
|
||||
if err := json.Unmarshal(data, &namespaces); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for name, namespace := range namespaces {
|
||||
namespace.Name = name
|
||||
namespaces[name] = namespace
|
||||
}
|
||||
|
||||
*c = namespaces
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *LocationConstraints) UnmarshalJSON(data []byte) error {
|
||||
m := make(map[string]string)
|
||||
if err := json.Unmarshal(data, &m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*c = make(LocationConstraints, len(m))
|
||||
for region, policy := range m {
|
||||
var pp netmap.PlacementPolicy
|
||||
if err := pp.DecodeString(policy); err == nil {
|
||||
(*c)[region] = pp
|
||||
continue
|
||||
}
|
||||
|
||||
if err := pp.UnmarshalJSON([]byte(policy)); err == nil {
|
||||
(*c)[region] = pp
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to parse location contraint '%s': '%s'", region, policy)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c LocationConstraints) MarshalJSON() ([]byte, error) {
|
||||
m := make(map[string]string, len(c))
|
||||
|
||||
for region, policy := range c {
|
||||
var sb strings.Builder
|
||||
if err := policy.WriteStringTo(&sb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m[region] = sb.String()
|
||||
}
|
||||
|
||||
return json.Marshal(m)
|
||||
}
|
|
@ -34,6 +34,9 @@ func (ms *Service) ShutDown(ctx context.Context) {
|
|||
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr))
|
||||
err := ms.Shutdown(ctx)
|
||||
if err != nil {
|
||||
ms.log.Panic(logs.CantShutDownService)
|
||||
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err))
|
||||
if err = ms.Close(); err != nil {
|
||||
ms.log.Panic(logs.CantShutDownService, zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,12 @@ S3_GW_SERVER_1_TLS_ENABLED=true
|
|||
S3_GW_SERVER_1_TLS_CERT_FILE=/path/to/tls/cert
|
||||
S3_GW_SERVER_1_TLS_KEY_FILE=/path/to/tls/key
|
||||
|
||||
# Control API
|
||||
# List of hex-encoded public keys that have rights to use the Control Service
|
||||
S3_GW_CONTROL_AUTHORIZED_KEYS=035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
|
||||
# Endpoint that is listened by the Control Service
|
||||
S3_GW_CONTROL_GRPC_ENDPOINT=localhost:8083
|
||||
|
||||
# Domains to be able to use virtual-hosted-style access to bucket.
|
||||
S3_GW_LISTEN_DOMAINS=s3dev.frostfs.devenv
|
||||
|
||||
|
@ -76,6 +82,9 @@ S3_GW_CACHE_OBJECTS_SIZE=1000000
|
|||
# Cache which keeps lists of objects in buckets
|
||||
S3_GW_CACHE_LIST_LIFETIME=1m
|
||||
S3_GW_CACHE_LIST_SIZE=100000
|
||||
# Cache which keeps listing session
|
||||
S3_GW_CACHE_LIST_SESSION_LIFETIME=1m
|
||||
S3_GW_CACHE_LIST_SESSION_SIZE=100
|
||||
# Cache which contains mapping of bucket name to bucket info
|
||||
S3_GW_CACHE_BUCKETS_LIFETIME=1m
|
||||
S3_GW_CACHE_BUCKETS_SIZE=1000
|
||||
|
@ -86,11 +95,15 @@ S3_GW_CACHE_NAMES_SIZE=10000
|
|||
S3_GW_CACHE_SYSTEM_LIFETIME=5m
|
||||
S3_GW_CACHE_SYSTEM_SIZE=100000
|
||||
# Cache which stores access box with tokens by its address
|
||||
S3_GW_CACHE_ACCESSBOX_REMOVING_CHECK_INTERVAL=5m
|
||||
S3_GW_CACHE_ACCESSBOX_LIFETIME=10m
|
||||
S3_GW_CACHE_ACCESSBOX_SIZE=100
|
||||
# Cache which stores owner to cache operation mapping
|
||||
S3_GW_CACHE_ACCESSCONTROL_LIFETIME=1m
|
||||
S3_GW_CACHE_ACCESSCONTROL_SIZE=100000
|
||||
# Cache which stores list of policy chains
|
||||
S3_GW_CACHE_MORPH_POLICY_LIFETIME=1m
|
||||
S3_GW_CACHE_MORPH_POLICY_SIZE=10000
|
||||
|
||||
# NATS
|
||||
S3_GW_NATS_ENABLED=true
|
||||
|
@ -129,11 +142,16 @@ S3_GW_FROSTFS_SET_COPIES_NUMBER=0
|
|||
S3_GW_FROSTFS_CLIENT_CUT=false
|
||||
# Sets max buffer size for read payload in put operations.
|
||||
S3_GW_FROSTFS_BUFFER_MAX_SIZE_FOR_PUT=1048576
|
||||
# max attempt to make successful tree request.
|
||||
# default value is 0 that means the number of attempts equals to number of nodes in pool.
|
||||
S3_GW_FROSTFS_TREE_POOL_MAX_ATTEMPTS=0
|
||||
|
||||
# List of allowed AccessKeyID prefixes
|
||||
# If not set, S3 GW will accept all AccessKeyIDs
|
||||
S3_GW_ALLOWED_ACCESS_KEY_ID_PREFIXES=Ck9BHsgKcnwfCTUSFm6pxhoNS4cBqgN2NQ8zVgPjqZDX 3stjWenX15YwYzczMr88gy3CQr4NYFBQ8P7keGzH5QFn
|
||||
|
||||
# Header to determine zone to resolve bucket name
|
||||
S3_GW_RESOLVE_NAMESPACE_HEADER=X-Frostfs-Namespace
|
||||
# List of container NNS zones which are allowed or restricted to resolve with HEAD request
|
||||
S3_GW_RESOLVE_BUCKET_ALLOW=container
|
||||
# S3_GW_RESOLVE_BUCKET_DENY=
|
||||
|
@ -141,7 +159,9 @@ S3_GW_RESOLVE_BUCKET_ALLOW=container
|
|||
# Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse xml bodies.
|
||||
S3_GW_KLUDGE_USE_DEFAULT_XMLNS=false
|
||||
# Use this flag to be able to use chunked upload approach without having `aws-chunked` value in `Content-Encoding` header.
|
||||
S3_GW_BYPASS_CONTENT_ENCODING_CHECK_IN_CHUNKS=false
|
||||
S3_GW_KLUDGE_BYPASS_CONTENT_ENCODING_CHECK_IN_CHUNKS=false
|
||||
# Namespaces that should be handled as default
|
||||
S3_GW_KLUDGE_DEFAULT_NAMESPACES="" "root"
|
||||
|
||||
S3_GW_TRACING_ENABLED=false
|
||||
S3_GW_TRACING_ENDPOINT="localhost:4318"
|
||||
|
@ -150,6 +170,8 @@ S3_GW_TRACING_EXPORTER="otlp_grpc"
|
|||
S3_GW_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
|
||||
|
||||
S3_GW_FEATURES_MD5_ENABLED=false
|
||||
# Enable denying access for request that doesn't match any policy chain rules.
|
||||
S3_GW_FEATURES_POLICY_DENY_BY_DEFAULT=false
|
||||
|
||||
# ReadTimeout is the maximum duration for reading the entire
|
||||
# request, including the body. A zero or negative value means
|
||||
|
@ -173,3 +195,22 @@ S3_GW_WEB_WRITE_TIMEOUT=0
|
|||
# is zero, the value of ReadTimeout is used. If both are
|
||||
# zero, there is no timeout.
|
||||
S3_GW_WEB_IDLE_TIMEOUT=30s
|
||||
|
||||
# FrostfsID contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||
# FrostfsID contract hash (LE) or name in NNS.
|
||||
S3_GW_FROSTFSID_CONTRACT=frostfsid.frostfs
|
||||
# Enables a check to only allow requests to users registered in the FrostfsID contract.
|
||||
S3_GW_FROSTFSID_VALIDATION_ENABLED=true
|
||||
|
||||
# Policy contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||
# Enables using policies from Policy contract.
|
||||
S3_GW_POLICY_ENABLED=true
|
||||
# Policy contract hash (LE) or name in NNS.
|
||||
S3_GW_POLICY_CONTRACT=policy.frostfs
|
||||
|
||||
# Proxy contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||
# Proxy contract hash (LE) or name in NNS.
|
||||
S3_GW_PROXY_CONTRACT=proxy.frostfs
|
||||
|
||||
# Namespaces configuration
|
||||
S3_GW_NAMESPACES_CONFIG=namespaces.json
|
||||
|
|
|
@ -37,6 +37,15 @@ server:
|
|||
cert_file: /path/to/cert
|
||||
key_file: /path/to/key
|
||||
|
||||
control:
|
||||
# List of hex-encoded public keys that have rights to use the Control Service
|
||||
authorized_keys:
|
||||
- 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
|
||||
- 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
|
||||
grpc:
|
||||
# Endpoint that is listened by the Control Service
|
||||
endpoint: localhost:8083
|
||||
|
||||
# Domains to be able to use virtual-hosted-style access to bucket.
|
||||
listen_domains:
|
||||
- s3dev.frostfs.devenv
|
||||
|
@ -91,6 +100,10 @@ cache:
|
|||
list:
|
||||
lifetime: 1m
|
||||
size: 100
|
||||
# Cache which keeps listing sessions
|
||||
list_session:
|
||||
lifetime: 1m
|
||||
size: 100
|
||||
# Cache which contains mapping of nice name to object addresses
|
||||
names:
|
||||
lifetime: 1m
|
||||
|
@ -105,12 +118,17 @@ cache:
|
|||
size: 1000
|
||||
# Cache which stores access box with tokens by its address
|
||||
accessbox:
|
||||
lifetime: 5m
|
||||
size: 10
|
||||
removing_check_interval: 5m
|
||||
lifetime: 10m
|
||||
size: 100
|
||||
# Cache which stores owner to cache operation mapping
|
||||
accesscontrol:
|
||||
lifetime: 1m
|
||||
size: 100000
|
||||
# Cache which stores list of policy chains
|
||||
morph_policy:
|
||||
lifetime: 1m
|
||||
size: 10000
|
||||
|
||||
nats:
|
||||
enabled: true
|
||||
|
@ -163,6 +181,7 @@ allowed_access_key_id_prefixes:
|
|||
- 3stjWenX15YwYzczMr88gy3CQr4NYFBQ8P7keGzH5QFn
|
||||
|
||||
resolve_bucket:
|
||||
namespace_header: X-Frostfs-Namespace
|
||||
allow:
|
||||
- container
|
||||
deny:
|
||||
|
@ -172,11 +191,16 @@ kludge:
|
|||
use_default_xmlns: false
|
||||
# Use this flag to be able to use chunked upload approach without having `aws-chunked` value in `Content-Encoding` header.
|
||||
bypass_content_encoding_check_in_chunks: false
|
||||
# Namespaces that should be handled as default
|
||||
default_namespaces: [ "", "root" ]
|
||||
|
||||
runtime:
|
||||
soft_memory_limit: 1gb
|
||||
|
||||
features:
|
||||
policy:
|
||||
# Enable denying access for request that doesn't match any policy chain rules.
|
||||
deny_by_default: false
|
||||
md5:
|
||||
enabled: false
|
||||
|
||||
|
@ -206,3 +230,26 @@ web:
|
|||
# is zero, the value of ReadTimeout is used. If both are
|
||||
# zero, there is no timeout.
|
||||
idle_timeout: 30s
|
||||
|
||||
# FrostfsID contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||
frostfsid:
|
||||
# FrostfsID contract hash (LE) or name in NNS.
|
||||
contract: frostfsid.frostfs
|
||||
validation:
|
||||
# Enables a check to only allow requests to users registered in the FrostfsID contract.
|
||||
enabled: true
|
||||
|
||||
# Policy contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||
policy:
|
||||
# Enables using policies from Policy contract.
|
||||
enabled: true
|
||||
# Policy contract hash (LE) or name in NNS.
|
||||
contract: policy.frostfs
|
||||
|
||||
# Proxy contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||
proxy:
|
||||
# Proxy contract hash (LE) or name in NNS.
|
||||
contract: proxy.frostfs
|
||||
|
||||
namespaces:
|
||||
config: namespaces.json
|
||||
|
|
|
@ -9,24 +9,45 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
// Credentials is a bearer token get/put interface.
|
||||
Credentials interface {
|
||||
GetBox(context.Context, oid.Address) (*accessbox.Box, error)
|
||||
Put(context.Context, cid.ID, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error)
|
||||
Update(context.Context, oid.Address, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error)
|
||||
Put(context.Context, cid.ID, CredentialsParam) (oid.Address, error)
|
||||
Update(context.Context, oid.Address, CredentialsParam) (oid.Address, error)
|
||||
}
|
||||
|
||||
CredentialsParam struct {
|
||||
OwnerID user.ID
|
||||
AccessBox *accessbox.AccessBox
|
||||
Expiration uint64
|
||||
Keys keys.PublicKeys
|
||||
CustomAttributes []object.Attribute
|
||||
}
|
||||
|
||||
cred struct {
|
||||
key *keys.PrivateKey
|
||||
frostFS FrostFS
|
||||
cache *cache.AccessBoxCache
|
||||
removingCheckDuration time.Duration
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
Config struct {
|
||||
FrostFS FrostFS
|
||||
Key *keys.PrivateKey
|
||||
CacheConfig *cache.Config
|
||||
RemovingCheckAfterDurations time.Duration
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -50,6 +71,9 @@ type PrmObjectCreate struct {
|
|||
|
||||
// Object payload.
|
||||
Payload []byte
|
||||
|
||||
// CustomAttributes are additional user provided attributes for box object.
|
||||
CustomAttributes []object.Attribute
|
||||
}
|
||||
|
||||
// FrostFS represents virtual connection to FrostFS network.
|
||||
|
@ -78,17 +102,23 @@ var (
|
|||
ErrEmptyBearerToken = errors.New("Bearer token could not be empty")
|
||||
)
|
||||
|
||||
var _ = New
|
||||
var _ Credentials = (*cred)(nil)
|
||||
|
||||
// New creates a new Credentials instance using the given cli and key.
|
||||
func New(frostFS FrostFS, key *keys.PrivateKey, config *cache.Config) Credentials {
|
||||
return &cred{frostFS: frostFS, key: key, cache: cache.NewAccessBoxCache(config)}
|
||||
func New(cfg Config) Credentials {
|
||||
return &cred{
|
||||
frostFS: cfg.FrostFS,
|
||||
key: cfg.Key,
|
||||
cache: cache.NewAccessBoxCache(cfg.CacheConfig),
|
||||
removingCheckDuration: cfg.RemovingCheckAfterDurations,
|
||||
log: cfg.CacheConfig.Logger,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cred) GetBox(ctx context.Context, addr oid.Address) (*accessbox.Box, error) {
|
||||
cachedBox := c.cache.Get(addr)
|
||||
if cachedBox != nil {
|
||||
return cachedBox, nil
|
||||
cachedBoxValue := c.cache.Get(addr)
|
||||
if cachedBoxValue != nil {
|
||||
return c.checkIfCredentialsAreRemoved(ctx, addr, cachedBoxValue)
|
||||
}
|
||||
|
||||
box, err := c.getAccessBox(ctx, addr)
|
||||
|
@ -96,18 +126,48 @@ func (c *cred) GetBox(ctx context.Context, addr oid.Address) (*accessbox.Box, er
|
|||
return nil, fmt.Errorf("get access box: %w", err)
|
||||
}
|
||||
|
||||
cachedBox, err = box.GetBox(c.key)
|
||||
cachedBox, err := box.GetBox(c.key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get box: %w", err)
|
||||
return nil, fmt.Errorf("get gate box: %w", err)
|
||||
}
|
||||
|
||||
if err = c.cache.Put(addr, cachedBox); err != nil {
|
||||
return nil, fmt.Errorf("put box into cache: %w", err)
|
||||
}
|
||||
c.putBoxToCache(addr, cachedBox)
|
||||
|
||||
return cachedBox, nil
|
||||
}
|
||||
|
||||
func (c *cred) checkIfCredentialsAreRemoved(ctx context.Context, addr oid.Address, cachedBoxValue *cache.AccessBoxCacheValue) (*accessbox.Box, error) {
|
||||
if time.Since(cachedBoxValue.PutTime) < c.removingCheckDuration {
|
||||
return cachedBoxValue.Box, nil
|
||||
}
|
||||
|
||||
box, err := c.getAccessBox(ctx, addr)
|
||||
if err != nil {
|
||||
if client.IsErrObjectAlreadyRemoved(err) {
|
||||
c.cache.Delete(addr)
|
||||
return nil, fmt.Errorf("get access box: %w", err)
|
||||
}
|
||||
return cachedBoxValue.Box, nil
|
||||
}
|
||||
|
||||
cachedBox, err := box.GetBox(c.key)
|
||||
if err != nil {
|
||||
c.cache.Delete(addr)
|
||||
return nil, fmt.Errorf("get gate box: %w", err)
|
||||
}
|
||||
// we need this to reset PutTime
|
||||
// to don't check for removing each time after removingCheckDuration interval
|
||||
c.putBoxToCache(addr, cachedBox)
|
||||
|
||||
return cachedBoxValue.Box, nil
|
||||
}
|
||||
|
||||
func (c *cred) putBoxToCache(addr oid.Address, box *accessbox.Box) {
|
||||
if err := c.cache.Put(addr, box); err != nil {
|
||||
c.log.Warn(logs.CouldntPutAccessBoxIntoCache, zap.String("address", addr.EncodeToString()))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cred) getAccessBox(ctx context.Context, addr oid.Address) (*accessbox.AccessBox, error) {
|
||||
data, err := c.frostFS.GetCredsPayload(ctx, addr)
|
||||
if err != nil {
|
||||
|
@ -123,33 +183,34 @@ func (c *cred) getAccessBox(ctx context.Context, addr oid.Address) (*accessbox.A
|
|||
return &box, nil
|
||||
}
|
||||
|
||||
func (c *cred) Put(ctx context.Context, idCnr cid.ID, issuer user.ID, box *accessbox.AccessBox, expiration uint64, keys ...*keys.PublicKey) (oid.Address, error) {
|
||||
return c.createObject(ctx, idCnr, nil, issuer, box, expiration, keys...)
|
||||
func (c *cred) Put(ctx context.Context, idCnr cid.ID, prm CredentialsParam) (oid.Address, error) {
|
||||
return c.createObject(ctx, idCnr, nil, prm)
|
||||
}
|
||||
|
||||
func (c *cred) Update(ctx context.Context, addr oid.Address, issuer user.ID, box *accessbox.AccessBox, expiration uint64, keys ...*keys.PublicKey) (oid.Address, error) {
|
||||
func (c *cred) Update(ctx context.Context, addr oid.Address, prm CredentialsParam) (oid.Address, error) {
|
||||
objID := addr.Object()
|
||||
return c.createObject(ctx, addr.Container(), &objID, issuer, box, expiration, keys...)
|
||||
return c.createObject(ctx, addr.Container(), &objID, prm)
|
||||
}
|
||||
|
||||
func (c *cred) createObject(ctx context.Context, cnrID cid.ID, newVersionFor *oid.ID, issuer user.ID, box *accessbox.AccessBox, expiration uint64, keys ...*keys.PublicKey) (oid.Address, error) {
|
||||
if len(keys) == 0 {
|
||||
func (c *cred) createObject(ctx context.Context, cnrID cid.ID, newVersionFor *oid.ID, prm CredentialsParam) (oid.Address, error) {
|
||||
if len(prm.Keys) == 0 {
|
||||
return oid.Address{}, ErrEmptyPublicKeys
|
||||
} else if box == nil {
|
||||
} else if prm.AccessBox == nil {
|
||||
return oid.Address{}, ErrEmptyBearerToken
|
||||
}
|
||||
data, err := box.Marshal()
|
||||
data, err := prm.AccessBox.Marshal()
|
||||
if err != nil {
|
||||
return oid.Address{}, fmt.Errorf("marshall box: %w", err)
|
||||
}
|
||||
|
||||
idObj, err := c.frostFS.CreateObject(ctx, PrmObjectCreate{
|
||||
Creator: issuer,
|
||||
Creator: prm.OwnerID,
|
||||
Container: cnrID,
|
||||
Filepath: strconv.FormatInt(time.Now().Unix(), 10) + "_access.box",
|
||||
ExpirationEpoch: expiration,
|
||||
ExpirationEpoch: prm.Expiration,
|
||||
NewVersionFor: newVersionFor,
|
||||
Payload: data,
|
||||
CustomAttributes: prm.CustomAttributes,
|
||||
})
|
||||
if err != nil {
|
||||
return oid.Address{}, fmt.Errorf("create object: %w", err)
|
||||
|
|
91
creds/tokens/credentials_test.go
Normal file
91
creds/tokens/credentials_test.go
Normal file
|
@ -0,0 +1,91 @@
|
|||
package tokens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
type frostfsMock struct {
|
||||
objects map[oid.Address][]byte
|
||||
errors map[oid.Address]error
|
||||
}
|
||||
|
||||
func (f *frostfsMock) CreateObject(context.Context, PrmObjectCreate) (oid.ID, error) {
|
||||
panic("implement me for test")
|
||||
}
|
||||
|
||||
func (f *frostfsMock) GetCredsPayload(_ context.Context, address oid.Address) ([]byte, error) {
|
||||
if err := f.errors[address]; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, ok := f.objects[address]
|
||||
if !ok {
|
||||
return nil, errors.New("not found")
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func TestRemovingAccessBox(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
gateData := []*accessbox.GateData{{
|
||||
BearerToken: &bearer.Token{},
|
||||
GateKey: key.PublicKey(),
|
||||
}}
|
||||
|
||||
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
|
||||
sk, err := hex.DecodeString(secretKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
accessBox, _, err := accessbox.PackTokens(gateData, sk)
|
||||
require.NoError(t, err)
|
||||
data, err := accessBox.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
||||
addr := oidtest.Address()
|
||||
frostfs := &frostfsMock{
|
||||
objects: map[oid.Address][]byte{addr: data},
|
||||
errors: map[oid.Address]error{},
|
||||
}
|
||||
|
||||
cfg := Config{
|
||||
FrostFS: frostfs,
|
||||
Key: key,
|
||||
CacheConfig: &cache.Config{
|
||||
Size: 10,
|
||||
Lifetime: 24 * time.Hour,
|
||||
Logger: zaptest.NewLogger(t),
|
||||
},
|
||||
RemovingCheckAfterDurations: 0, // means check always
|
||||
}
|
||||
|
||||
creds := New(cfg)
|
||||
|
||||
_, err = creds.GetBox(ctx, addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
frostfs.errors[addr] = errors.New("network error")
|
||||
_, err = creds.GetBox(ctx, addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
frostfs.errors[addr] = &apistatus.ObjectAlreadyRemoved{}
|
||||
_, err = creds.GetBox(ctx, addr)
|
||||
require.Error(t, err)
|
||||
}
|
|
@ -143,6 +143,9 @@ the secret. Format of `access_key_id`: `%cid0%oid`, where 0(zero) is a delimiter
|
|||
* `--access-key-id` -- credentials that you want to update (e.g. to add more gates that can use your creds)
|
||||
without changing values of `aws_access_key_id` and `aws_secret_access_key`. If you want to update credential you MUST
|
||||
provide also secret key using `AUTHMATE_SECRET_ACCESS_KEY` env variable.
|
||||
* `--frostfsid` -- FrostfsID contract hash (LE) or name in NNS to register public key in contract
|
||||
(`--rpc-endpoint` flag also must be provided).
|
||||
* `--rpc-endpoint` -- NEO node RPC address.
|
||||
|
||||
### Bearer tokens
|
||||
|
||||
|
|
|
@ -175,6 +175,7 @@ There are some custom types used for brevity:
|
|||
| `peers` | [Nodes configuration](#peers-section) |
|
||||
| `placement_policy` | [Placement policy configuration](#placement_policy-section) |
|
||||
| `server` | [Server configuration](#server-section) |
|
||||
| `control` | [Control API configuration](#control-section) |
|
||||
| `logger` | [Logger configuration](#logger-section) |
|
||||
| `cache` | [Cache configuration](#cache-section) |
|
||||
| `nats` | [NATS configuration](#nats-section) |
|
||||
|
@ -188,6 +189,10 @@ There are some custom types used for brevity:
|
|||
| `runtime` | [Runtime configuration](#runtime-section) |
|
||||
| `features` | [Features configuration](#features-section) |
|
||||
| `web` | [Web server configuration](#web-section) |
|
||||
| `frostfsid` | [FrostfsID configuration](#frostfsid-section) |
|
||||
| `policy` | [Policy contract configuration](#policy-section) |
|
||||
| `proxy` | [Proxy contract configuration](#proxy-section) |
|
||||
| `namespaces` | [Namespaces configuration](#namespaces-section) |
|
||||
|
||||
### General section
|
||||
|
||||
|
@ -216,18 +221,18 @@ allowed_access_key_id_prefixes:
|
|||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|----------------------------------|------------|---------------|----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `listen_domains` | `[]string` | | | Domains to be able to use virtual-hosted-style access to bucket. |
|
||||
| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names (required to use the `nns` resolver). |
|
||||
| `resolve_order` | `[]string` | yes | `[dns]` | Order of bucket name resolvers to use. Available resolvers: `dns`, `nns`. | |
|
||||
| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
|
||||
| `stream_timeout` | `duration` | | `10s` | Timeout for individual operations in streaming RPC. |
|
||||
| `healthcheck_timeout` | `duration` | | `15s` | Timeout to check node health during rebalance. |
|
||||
| `rebalance_interval` | `duration` | | `60s` | Interval to check node health. |
|
||||
| `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. |
|
||||
| `max_clients_count` | `int` | | `100` | Limits for processing of clients' requests. |
|
||||
| `max_clients_deadline` | `duration` | | `30s` | Deadline after which the gate sends error `RequestTimeout` to a client. |
|
||||
| `allowed_access_key_id_prefixes` | `[]string` | | | List of allowed `AccessKeyID` prefixes which S3 GW serve. If the parameter is omitted, all `AccessKeyID` will be accepted. |
|
||||
|----------------------------------|------------|---------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `listen_domains` | `[]string` | no | | Domains to be able to use virtual-hosted-style access to bucket. |
|
||||
| `rpc_endpoint` | `string` | no | | The address of the RPC host to which the gateway connects to resolve bucket names and interact with frostfs contracts (required to use the `nns` resolver and `frostfsid` contract). |
|
||||
| `resolve_order` | `[]string` | yes | `[dns]` | Order of bucket name resolvers to use. Available resolvers: `dns`, `nns`. |
|
||||
| `connect_timeout` | `duration` | no | `10s` | Timeout to connect to a node. |
|
||||
| `stream_timeout` | `duration` | no | `10s` | Timeout for individual operations in streaming RPC. |
|
||||
| `healthcheck_timeout` | `duration` | no | `15s` | Timeout to check node health during rebalance. |
|
||||
| `rebalance_interval` | `duration` | no | `60s` | Interval to check node health. |
|
||||
| `pool_error_threshold` | `uint32` | no | `100` | The number of errors on connection after which node is considered as unhealthy. |
|
||||
| `max_clients_count` | `int` | no | `100` | Limits for processing of clients' requests. |
|
||||
| `max_clients_deadline` | `duration` | no | `30s` | Deadline after which the gate sends error `RequestTimeout` to a client. |
|
||||
| `allowed_access_key_id_prefixes` | `[]string` | no | | List of allowed `AccessKeyID` prefixes which S3 GW serve. If the parameter is omitted, all `AccessKeyID` will be accepted. |
|
||||
|
||||
### `wallet` section
|
||||
|
||||
|
@ -350,6 +355,24 @@ server:
|
|||
| `tls.cert_file` | `string` | yes | | Path to the TLS certificate. |
|
||||
| `tls.key_file` | `string` | yes | | Path to the key. |
|
||||
|
||||
### `control` section
|
||||
|
||||
Control API parameters.
|
||||
|
||||
```yaml
|
||||
control:
|
||||
authorized_keys:
|
||||
- 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
|
||||
- 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
|
||||
grpc:
|
||||
endpoint: localhost:8083
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-------------------|------------|---------------|------------------|------------------------------------------------------------------------------|
|
||||
| `authorized_keys` | `[]string` | yes | | List of hex-encoded public keys that have rights to use the Control Service. |
|
||||
| `grpc.endpoint` | `string` | | `localhost:8083` | Endpoint that is listened by the Control Service. |
|
||||
|
||||
### `logger` section
|
||||
|
||||
```yaml
|
||||
|
@ -373,6 +396,9 @@ cache:
|
|||
list:
|
||||
lifetime: 1m
|
||||
size: 100
|
||||
list_session:
|
||||
lifetime: 1m
|
||||
size: 100
|
||||
names:
|
||||
lifetime: 1m
|
||||
size: 1000
|
||||
|
@ -383,22 +409,28 @@ cache:
|
|||
lifetime: 2m
|
||||
size: 1000
|
||||
accessbox:
|
||||
lifetime: 5m
|
||||
size: 10
|
||||
removing_check_interval: 5m
|
||||
lifetime: 10m
|
||||
size: 100
|
||||
accesscontrol:
|
||||
lifetime: 1m
|
||||
size: 100000
|
||||
morph_policy:
|
||||
lifetime: 30s
|
||||
size: 10000
|
||||
```
|
||||
|
||||
| Parameter | Type | Default value | Description |
|
||||
|-----------------|-----------------------------------|-----------------------------------|----------------------------------------------------------------------------------------|
|
||||
|-----------------|-------------------------------------------------|-----------------------------------|----------------------------------------------------------------------------------------|
|
||||
| `objects` | [Cache config](#cache-subsection) | `lifetime: 5m`<br>`size: 1000000` | Cache for objects (FrostFS headers). |
|
||||
| `list` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 100000` | Cache which keeps lists of objects in buckets. |
|
||||
| `list_session` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 100` | Cache which keeps listing session. |
|
||||
| `names` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 10000` | Cache which contains mapping of nice name to object addresses. |
|
||||
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
|
||||
| `system` | [Cache config](#cache-subsection) | `lifetime: 5m`<br>`size: 10000` | Cache for system objects in a bucket: bucket settings, notification configuration etc. |
|
||||
| `accessbox` | [Cache config](#cache-subsection) | `lifetime: 10m`<br>`size: 100` | Cache which stores access box with tokens by its address. |
|
||||
| `accessbox` | [Accessbox cache config](#accessbox-subsection) | `lifetime: 10m`<br>`size: 100` | Cache which stores access box with tokens by its address. |
|
||||
| `accesscontrol` | [Cache config](#cache-subsection) | `lifetime: 1m`<br>`size: 100000` | Cache which stores owner to cache operation mapping. |
|
||||
| `morph_policy` | [Cache config](#cache-subsection) | `lifetime: 1m`<br>`size: 10000` | Cache which stores list of policy chains. |
|
||||
|
||||
#### `cache` subsection
|
||||
|
||||
|
@ -412,6 +444,20 @@ size: 1000
|
|||
| `lifetime` | `duration` | depends on cache | Lifetime of entries in cache. |
|
||||
| `size` | `int` | depends on cache | LRU cache size. |
|
||||
|
||||
#### `accessbox` subsection
|
||||
|
||||
```yaml
|
||||
|
||||
lifetime: 10m
|
||||
size: 100
|
||||
```
|
||||
|
||||
| Parameter | Type | Default value | Description |
|
||||
|---------------------------|------------|---------------|-------------------------------------------------------|
|
||||
| `removing_check_interval` | `duration` | `5m' | Time after which creds should be checked for removal. |
|
||||
| `lifetime` | `duration` | '10m' | Lifetime of entries in cache. |
|
||||
| `size` | `int` | '100 | LRU cache size. |
|
||||
|
||||
### `nats` section
|
||||
|
||||
This is an advanced section, use with caution.
|
||||
|
@ -514,6 +560,7 @@ frostfs:
|
|||
set_copies_number: [0]
|
||||
client_cut: false
|
||||
buffer_max_size_for_put: 1048576 # 1mb
|
||||
tree_pool_max_attempts: 0
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|
@ -521,22 +568,25 @@ frostfs:
|
|||
| `set_copies_number` | `[]uint32` | yes | `[0]` | Numbers of the object copies (for each replica) to consider PUT to FrostFS successful. <br/>Default value `[0]` or empty list means that object will be processed according to the container's placement policy |
|
||||
| `client_cut` | `bool` | yes | `false` | This flag enables client side object preparing. |
|
||||
| `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. |
|
||||
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
|
||||
|
||||
# `resolve_bucket` section
|
||||
|
||||
Bucket name resolving parameters from and to container ID with `HEAD` request.
|
||||
Bucket name resolving parameters from and to container ID.
|
||||
|
||||
```yaml
|
||||
resolve_bucket:
|
||||
namespace_header: X-Frostfs-Namespace
|
||||
allow:
|
||||
- container
|
||||
deny:
|
||||
```
|
||||
|
||||
| Parameter | Type | Default value | Description |
|
||||
|-----------|------------|---------------|--------------------------------------------------------------------------------------------------------------------------|
|
||||
| `allow` | `[]string` | | List of container zones which are available to resolve. Mutual exclusive with `deny` list. Prioritized over `deny` list. |
|
||||
| `deny` | `[]string` | | List of container zones which are restricted to resolve. Mutual exclusive with `allow` list. |
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|--------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------|
|
||||
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
|
||||
| `allow` | `[]string` | no | | List of container zones which are available to resolve. Mutual exclusive with `deny` list. Prioritized over `deny` list. |
|
||||
| `deny` | `[]string` | no | | List of container zones which are restricted to resolve. Mutual exclusive with `allow` list. |
|
||||
|
||||
# `kludge` section
|
||||
|
||||
|
@ -546,12 +596,14 @@ Workarounds for non-standard use cases.
|
|||
kludge:
|
||||
use_default_xmlns: false
|
||||
bypass_content_encoding_check_in_chunks: false
|
||||
default_namespaces: [ "", "root" ]
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-------------------------------------------|------------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------|
|
||||
|-------------------------------------------|------------|---------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `use_default_xmlns` | `bool` | yes | false | Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse xml bodies. |
|
||||
| `bypass_content_encoding_check_in_chunks` | `bool` | yes | false | Use this flag to be able to use [chunked upload approach](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html) without having `aws-chunked` value in `Content-Encoding` header. |
|
||||
| `default_namespaces` | `[]string` | n/d | ["","root"] | Namespaces that should be handled as default. |
|
||||
|
||||
# `runtime` section
|
||||
Contains runtime parameters.
|
||||
|
@ -570,13 +622,16 @@ Contains parameters for enabling features.
|
|||
|
||||
```yaml
|
||||
features:
|
||||
policy:
|
||||
deny_by_default: false
|
||||
md5:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|---------------|--------|---------------|---------------|----------------------------------------------------------------|
|
||||
|--------------------------|--------|---------------|---------------|------------------------------------------------------------------------------|
|
||||
| `md5.enabled` | `bool` | yes | false | Flag to enable return MD5 checksum in ETag headers and fields. |
|
||||
| `policy.deny_by_default` | `bool` | yes | false | Enable denying access for request that doesn't match any policy chain rules. |
|
||||
|
||||
# `web` section
|
||||
Contains web server configuration parameters.
|
||||
|
@ -595,3 +650,84 @@ web:
|
|||
| `read_header_timeout` | `duration` | no | `30s` | The amount of time allowed to read request headers. If `read_header_timeout` is zero, the value of `read_timeout` is used. If both are zero, there is no timeout. |
|
||||
| `write_timeout` | `duration` | no | `0` | The maximum duration before timing out writes of the response. A zero or negative value means there will be no timeout. |
|
||||
| `idle_timeout` | `duration` | no | `30s` | The maximum amount of time to wait for the next request when keep-alives are enabled. If `idle_timeout` is zero, the value of `read_timeout` is used. If both are zero, there is no timeout. |
|
||||
|
||||
# `frostfsid` section
|
||||
|
||||
FrostfsID contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||
|
||||
```yaml
|
||||
frostfsid:
|
||||
contract: frostfsid.frostfs
|
||||
validation:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|----------------------|----------|---------------|---------------------|---------------------------------------------------------------------------------------|
|
||||
| `contract` | `string` | no | `frostfsid.frostfs` | FrostfsID contract hash (LE) or name in NNS. |
|
||||
| `validation.enabled` | `bool` | no | `false` | Enables a check to only allow requests to users registered in the FrostfsID contract. |
|
||||
|
||||
# `policy` section
|
||||
|
||||
Policy contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||
|
||||
```yaml
|
||||
policy:
|
||||
enabled: false
|
||||
contract: policy.frostfs
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|------------|----------|---------------|----------------|-------------------------------------------------------------------|
|
||||
| `enabled` | `bool` | no | true | Enables using policies from Policy contract to check permissions. |
|
||||
| `contract` | `string` | no | policy.frostfs | Policy contract hash (LE) or name in NNS. |
|
||||
|
||||
# `proxy` section
|
||||
|
||||
Proxy contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||
|
||||
```yaml
|
||||
proxy:
|
||||
contract: proxy.frostfs
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|------------|----------|---------------|-----------------|------------------------------------------|
|
||||
| `contract` | `string` | no | `proxy.frostfs` | Proxy contract hash (LE) or name in NNS. |
|
||||
|
||||
# `namespaces` section
|
||||
|
||||
Namespaces configuration.
|
||||
|
||||
```yaml
|
||||
namespaces:
|
||||
config: namespace.json
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-----------|----------|---------------|---------------|-----------------------------------------------------|
|
||||
| `config` | `string` | yes | | Path to json file with config value for namespaces. |
|
||||
|
||||
## `namespaces.config` subsection
|
||||
|
||||
Example of `namespaces.json`.
|
||||
Note that config values from `namespaces.json` can override config values for default namespaces
|
||||
(value for which are fetched from regular config value e.g. [placement-policy](#placement_policy-section)).
|
||||
To override config values for default namespaces use namespace names that are provided in `kludge.default_namespaces`.
|
||||
|
||||
```json
|
||||
{
|
||||
"namespaces": {
|
||||
"namespace1": {
|
||||
"location_constraints": {
|
||||
"default": "REP 3",
|
||||
"test": "{\"replicas\":[{\"count\":1,\"selector\":\"\"}],\"containerBackupFactor\":0,\"selectors\":[],\"filters\":[],\"unique\":false}"
|
||||
},
|
||||
"copies_numbers": {
|
||||
"default": [ 0 ],
|
||||
"test": [ 1 ]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
|
50
go.mod
50
go.mod
|
@ -3,17 +3,19 @@ module git.frostfs.info/TrueCloudLab/frostfs-s3-gw
|
|||
go 1.20
|
||||
|
||||
require (
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230802075510-964c3edb3f44
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20231121085847-241a9f1ad0a4
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.18.1-0.20231218084346-bce7ef18c83b
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20231003164722-60463871dbc2
|
||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20231018083019-2b6d84de9a3d
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20231107114540-ab75edd70939
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240206111236-8354a074c4df
|
||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
||||
github.com/aws/aws-sdk-go v1.44.6
|
||||
github.com/bluele/gcache v0.0.2
|
||||
github.com/go-chi/chi/v5 v5.0.8
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/google/uuid v1.3.1
|
||||
github.com/minio/sio v0.3.0
|
||||
github.com/nats-io/nats.go v1.13.1-0.20220121202836-972a071d373d
|
||||
github.com/nspcc-dev/neo-go v0.101.2-0.20230601131642-a0117042e8fc
|
||||
github.com/nspcc-dev/neo-go v0.104.1-0.20231206061802-441eb8aa86be
|
||||
github.com/panjf2000/ants/v2 v2.5.0
|
||||
github.com/prometheus/client_golang v1.15.1
|
||||
github.com/prometheus/client_model v0.3.0
|
||||
|
@ -21,24 +23,23 @@ require (
|
|||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.15.0
|
||||
github.com/ssgreg/journald v1.0.0
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/urfave/cli/v2 v2.3.0
|
||||
go.opentelemetry.io/otel v1.16.0
|
||||
go.opentelemetry.io/otel/trace v1.16.0
|
||||
go.uber.org/zap v1.24.0
|
||||
golang.org/x/crypto v0.9.0
|
||||
google.golang.org/grpc v1.55.0
|
||||
google.golang.org/protobuf v1.30.0
|
||||
go.uber.org/zap v1.26.0
|
||||
golang.org/x/crypto v0.14.0
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
|
||||
google.golang.org/grpc v1.59.0
|
||||
google.golang.org/protobuf v1.31.0
|
||||
)
|
||||
|
||||
require (
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb // indirect
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/benbjohnson/clock v1.1.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
|
@ -49,23 +50,24 @@ require (
|
|||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.3 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
|
||||
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/nats-io/nats-server/v2 v2.7.1 // indirect
|
||||
github.com/nats-io/nkeys v0.3.0 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 // indirect
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230615193820-9185820289ce // indirect
|
||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20231123160306-3374ff1e7a3c // indirect
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20231127165613-b35f351f0ba0 // indirect
|
||||
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
|
@ -85,15 +87,15 @@ require (
|
|||
go.opentelemetry.io/otel/metric v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.16.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||
golang.org/x/net v0.10.0 // indirect
|
||||
golang.org/x/sync v0.2.0 // indirect
|
||||
golang.org/x/sys v0.8.0 // indirect
|
||||
golang.org/x/term v0.8.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
golang.org/x/net v0.17.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.13.0 // indirect
|
||||
golang.org/x/term v0.13.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
351
go.sum
351
go.sum
|
@ -36,76 +36,43 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
|
|||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230802075510-964c3edb3f44 h1:v6JqBD/VzZx3QSxbaXnUwnnJ1KEYheU4LzLGr3IhsAE=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230802075510-964c3edb3f44/go.mod h1:pKJJRLOChW4zDQsAt1e8k/snWKljJtpkiPfxV53ngjI=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb h1:S/TrbOOu9qEXZRZ9/Ddw7crnxbBUQLo68PSzQWYrc9M=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb/go.mod h1:nkR5gaGeez3Zv2SE7aceP0YwxG2FzIB5cGKpQO2vV2o=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20231121085847-241a9f1ad0a4 h1:wjLfZ3WCt7qNGsQv+Jl0TXnmtg0uVk/jToKPFTBc/jo=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20231121085847-241a9f1ad0a4/go.mod h1:uY0AYmCznjZdghDnAk7THFIe1Vlg531IxUcus7ZfUJI=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.18.1-0.20231218084346-bce7ef18c83b h1:zdbOxyqkxRyOLc7/2oNFu5tBwwg0Q6+0tJM3RkAxHlE=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.18.1-0.20231218084346-bce7ef18c83b/go.mod h1:YMFtNZy2MgeiSwt0t8lqk8dYBGzlbhmV1cbbstJJ6oY=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20231003164722-60463871dbc2 h1:PHZX/Gh59ZPNG10JtTjBkmKbhKNq84CKu+dJpbzPVOc=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20231003164722-60463871dbc2/go.mod h1:t1akKcUH7iBrFHX8rSXScYMP17k2kYQXMbZooiL5Juw=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20231107114540-ab75edd70939 h1:jZEepi9yWmqrWgLRQcHQu4YPJaudmd7d2AEhpmM3m4U=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20231107114540-ab75edd70939/go.mod h1:t1akKcUH7iBrFHX8rSXScYMP17k2kYQXMbZooiL5Juw=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240206111236-8354a074c4df h1:FLk850Ti+aj9vdJTUPvtS4KDIpISze9vTNKV15WIbME=
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240206111236-8354a074c4df/go.mod h1:YVL7yFaT0QNSpA0z+RHudLvrLwT+lsFYGyBSVc1ustI=
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0/go.mod h1:dhY+oy274hV8wGvGL4MwwMpdL3GYvaX1a8GQZQHvlF8=
|
||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20231018083019-2b6d84de9a3d h1:Z9UuI+jxzPtwQZUMmATdTuA8/8l2jzBY1rVh/gwBDsw=
|
||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20231018083019-2b6d84de9a3d/go.mod h1:rQFJJdEOV7KbbMtQYR2lNfiZk+ONRDJSbMCTWxKt8Fw=
|
||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 h1:HeY8n27VyPRQe49l/fzyVMkWEB2fsLJYKp64pwA7tz4=
|
||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.mod h1:rQFJJdEOV7KbbMtQYR2lNfiZk+ONRDJSbMCTWxKt8Fw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/CityOfZion/neo-go v0.62.1-pre.0.20191114145240-e740fbe708f8/go.mod h1:MJCkWUBhi9pn/CrYO1Q3P687y2KeahrOPS9BD9LDGb0=
|
||||
github.com/CityOfZion/neo-go v0.70.1-pre.0.20191209120015-fccb0085941e/go.mod h1:0enZl0az8xA6PVkwzEOwPWVJGqlt/GO4hA4kmQ5Xzig=
|
||||
github.com/CityOfZion/neo-go v0.70.1-pre.0.20191212173117-32ac01130d4c/go.mod h1:JtlHfeqLywZLswKIKFnAp+yzezY4Dji9qlfQKB2OD/I=
|
||||
github.com/CityOfZion/neo-go v0.71.1-pre.0.20200129171427-f773ec69fb84/go.mod h1:FLI526IrRWHmcsO+mHsCbj64pJZhwQFTLJZu+A4PGOA=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/Workiva/go-datastructures v1.0.50/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA=
|
||||
github.com/abiosoft/ishell v2.0.0+incompatible/go.mod h1:HQR9AqF2R3P4XXpMpI0NAzgHf/aS6+zVXRj14cVk9qg=
|
||||
github.com/abiosoft/ishell/v2 v2.0.2/go.mod h1:E4oTCXfo6QjoCart0QYa5m9w4S+deXs/P/9jA77A9Bs=
|
||||
github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db/go.mod h1:rB3B4rKii8V21ydCbIzH5hZiCQE7f5E9SzUb/ZZx530=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210521073959-f0d4d129b7f1/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/aws/aws-sdk-go v1.44.6 h1:Y+uHxmZfhRTLX2X3khkdxCoTZAyGEX21aOUHe1U6geg=
|
||||
github.com/aws/aws-sdk-go v1.44.6/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
|
||||
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
|
@ -120,18 +87,14 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH
|
|||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
|
@ -140,14 +103,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
|
|||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
|
||||
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:rZfgFAXFS/z/lEd6LJmf9HVZ1LkgYiHx5pHhV5DR16M=
|
||||
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
|
||||
github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
|
@ -156,26 +112,14 @@ github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITL
|
|||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-redis/redis v6.10.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
|
||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
@ -186,7 +130,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
|
|||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
|
@ -205,11 +148,7 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
|
|||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
|
@ -223,9 +162,7 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
|
@ -241,75 +178,49 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.13.4 h1:0zhec2I8zGnjWcKyLl6i3gPqKANCCn5e9xmviEEeX6s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0=
|
||||
|
@ -317,16 +228,8 @@ github.com/minio/sio v0.3.0 h1:syEFBewzOMOYVzSTFpp1MqpSZk8rUNbz8VIIc+PNzus=
|
|||
github.com/minio/sio v0.3.0/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296 h1:vU9tpM3apjYlLLeY23zRWJ9Zktr5jp+mloR942LEOpY=
|
||||
github.com/nats-io/nats-server/v2 v2.7.1 h1:SDj8R0PJPVekw3EgHxGtTfJUuMbsuaul1nwWFI3xTyk=
|
||||
github.com/nats-io/nats-server/v2 v2.7.1/go.mod h1:tckmrt0M6bVaDT3kmh9UrIq/CBOBBse+TpXQi5ldaa8=
|
||||
|
@ -336,103 +239,39 @@ github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
|
|||
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20191205084618-dacb1a30c254/go.mod h1:w1Ln2aT+dBlPhLnuZhBV+DfPEdS2CHWWLp5JTScY3bw=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20191209120240-0d6b7568d9ae/go.mod h1:3FjXOoHmA51EGfb5GS/HOv7VdmngNRTssSeQ729dvGY=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200117124306-478e5cfbf03a/go.mod h1:/YFK+XOxxg0Bfm6P92lY5eDSLYfp06XOdL8KAVgXjVk=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200219114139-199d286ed6c1/go.mod h1:O0qtn62prQSqizzoagHmuuKoz8QMkU3SzBoKdEvm3aQ=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20210721160347-1b03241391ac/go.mod h1:U8MSnEShH+o5hexfWJdze6uMFJteP0ko7J2frO7Yu1Y=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20220902113116-58a5e763e647/go.mod h1:g9xisXmX9NP9MjioaTe862n9SlZTrP+6PVUWLBYOr98=
|
||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20210915112629-e1b6cce73d02/go.mod h1:79bEUDEviBHJMFV6Iq6in57FEOCMcRhfQnfaf0ETA5U=
|
||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 h1:n4ZaFCKt1pQJd7PXoMJabZWK9ejjbLOVrkl/lOUmshg=
|
||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22/go.mod h1:79bEUDEviBHJMFV6Iq6in57FEOCMcRhfQnfaf0ETA5U=
|
||||
github.com/nspcc-dev/hrw v1.0.9/go.mod h1:l/W2vx83vMQo6aStyx2AuZrJ+07lGv2JQGlVkPG06MU=
|
||||
github.com/nspcc-dev/neo-go v0.73.1-pre.0.20200303142215-f5a1b928ce09/go.mod h1:pPYwPZ2ks+uMnlRLUyXOpLieaDQSEaf4NM3zHVbRjmg=
|
||||
github.com/nspcc-dev/neo-go v0.98.0/go.mod h1:E3cc1x6RXSXrJb2nDWXTXjnXk3rIqVN8YdFyWv+FrqM=
|
||||
github.com/nspcc-dev/neo-go v0.99.4/go.mod h1:mKTolfRUfKjFso5HPvGSQtUZc70n0VKBMs16eGuC5gA=
|
||||
github.com/nspcc-dev/neo-go v0.101.2-0.20230601131642-a0117042e8fc h1:fySIWvUQsitK5e5qYIHnTDCXuPpwzz89SEUEIyY11sg=
|
||||
github.com/nspcc-dev/neo-go v0.101.2-0.20230601131642-a0117042e8fc/go.mod h1:s9QhjMC784MWqTURovMbyYduIJc86mnCruxcMiAebpc=
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20220927123257-24c107e3a262/go.mod h1:23bBw0v6pBYcrWs8CBEEDIEDJNbcFoIh8pGGcf2Vv8s=
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230615193820-9185820289ce h1:vLGuUNDkmQrWMa4rr4vTd1u8ULqejWxVmNz1L7ocTEI=
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230615193820-9185820289ce/go.mod h1:ZUuXOkdtHZgaC13za/zMgXfQFncZ0jLzfQTe+OsDOtg=
|
||||
github.com/nspcc-dev/neofs-api-go/v2 v2.11.0-pre.0.20211201134523-3604d96f3fe1/go.mod h1:oS8dycEh8PPf2Jjp6+8dlwWyEv2Dy77h/XhhcdxYEFs=
|
||||
github.com/nspcc-dev/neofs-api-go/v2 v2.11.1/go.mod h1:oS8dycEh8PPf2Jjp6+8dlwWyEv2Dy77h/XhhcdxYEFs=
|
||||
github.com/nspcc-dev/neofs-crypto v0.2.0/go.mod h1:F/96fUzPM3wR+UGsPi3faVNmFlA9KAEAUQR7dMxZmNA=
|
||||
github.com/nspcc-dev/neofs-crypto v0.2.3/go.mod h1:8w16GEJbH6791ktVqHN9YRNH3s9BEEKYxGhlFnp0cDw=
|
||||
github.com/nspcc-dev/neofs-crypto v0.3.0/go.mod h1:8w16GEJbH6791ktVqHN9YRNH3s9BEEKYxGhlFnp0cDw=
|
||||
github.com/nspcc-dev/neofs-crypto v0.4.0/go.mod h1:6XJ8kbXgOfevbI2WMruOtI+qUJXNwSGM/E9eClXxPHs=
|
||||
github.com/nspcc-dev/neofs-sdk-go v0.0.0-20211201182451-a5b61c4f6477/go.mod h1:dfMtQWmBHYpl9Dez23TGtIUKiFvCIxUZq/CkSIhEpz4=
|
||||
github.com/nspcc-dev/neofs-sdk-go v0.0.0-20220113123743-7f3162110659/go.mod h1:/jay1lr3w7NQd/VDBkEhkJmDmyPNsu4W+QV2obsUV40=
|
||||
github.com/nspcc-dev/rfc6979 v0.1.0/go.mod h1:exhIh1PdpDC5vQmyEsGvc4YDM/lyQp/452QxGq/UEso=
|
||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20231123160306-3374ff1e7a3c h1:OOQeE613BH93ICPq3eke5N78gWNeMjcBWkmD2NKyXVg=
|
||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20231123160306-3374ff1e7a3c/go.mod h1:79bEUDEviBHJMFV6Iq6in57FEOCMcRhfQnfaf0ETA5U=
|
||||
github.com/nspcc-dev/neo-go v0.104.1-0.20231206061802-441eb8aa86be h1:nZ2Hi5JSXdq3JXDi/8lms1UXQDAA5LVGpOpcrf2bRVA=
|
||||
github.com/nspcc-dev/neo-go v0.104.1-0.20231206061802-441eb8aa86be/go.mod h1:dsu8+VDMgGF7QNtPFBU4seE3pxSq8fYCuk3A6he4+ZQ=
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20231127165613-b35f351f0ba0 h1:N+dMIBmteXjJpkH6UZ7HmNftuFxkqszfGLbhsEctnv0=
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20231127165613-b35f351f0ba0/go.mod h1:J/Mk6+nKeKSW4wygkZQFLQ6SkLOSGX5Ga0RuuuktEag=
|
||||
github.com/nspcc-dev/rfc6979 v0.2.0 h1:3e1WNxrN60/6N0DW7+UYisLeZJyfqZTNOjeV/toYvOE=
|
||||
github.com/nspcc-dev/rfc6979 v0.2.0/go.mod h1:exhIh1PdpDC5vQmyEsGvc4YDM/lyQp/452QxGq/UEso=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/panjf2000/ants/v2 v2.5.0 h1:1rWGWSnxCsQBga+nQbA4/iY6VMeNoOIAM0ZWh9u3q2Q=
|
||||
github.com/panjf2000/ants/v2 v2.5.0/go.mod h1:cU93usDlihJZ5CfRGNDYsiBYvoilLvBF5Qp/BT2GNRE=
|
||||
github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU=
|
||||
github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
|
||||
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
|
||||
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk=
|
||||
github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
|
||||
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
||||
|
@ -448,28 +287,22 @@ github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jH
|
|||
github.com/ssgreg/journald v1.0.0 h1:0YmTDPJXxcWDPba12qNMdO6TxvfkFSYpFIJ31CwmLcU=
|
||||
github.com/ssgreg/journald v1.0.0/go.mod h1:RUckwmTM8ghGWPslq2+ZBZzbb9/2KgjzYZ4JEP+oRt0=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
|
||||
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
||||
github.com/syndtr/goleveldb v0.0.0-20180307113352-169b1b37be73/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 h1:xQdMZ1WLrgkkvOZ/LDQxjVxMLdby7osSh4ZEVa5sIjs=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
|
||||
github.com/twmb/murmur3 v1.1.5/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||
github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg=
|
||||
github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU=
|
||||
github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
|
||||
|
@ -480,12 +313,7 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
|
||||
github.com/yuin/gopher-lua v0.0.0-20191128022950-c6266f4fe8d7/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
|
@ -511,38 +339,22 @@ go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLk
|
|||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
|
||||
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
||||
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
|
||||
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -553,8 +365,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -578,12 +390,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
|
@ -591,7 +399,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
|||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
|
@ -605,11 +412,9 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
|
|||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
|
@ -617,12 +422,9 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -632,9 +434,7 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -646,36 +446,21 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -686,39 +471,28 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210429154555-c04ba851c2a4/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -726,15 +500,13 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
|
||||
golang.org/x/tools v0.0.0-20180318012157-96caea41033d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
|
@ -750,7 +522,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
|
|||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
|
@ -777,14 +548,12 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
|
|||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||
golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -853,8 +622,12 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
||||
google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg=
|
||||
google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
@ -874,10 +647,9 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5
|
|||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
|
||||
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -891,29 +663,18 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
|||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||||
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/abiosoft/ishell.v2 v2.0.0/go.mod h1:sFp+cGtH6o4s1FtpVPTMcHq2yue+c4DGOVohJCPUzwY=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
|
|
|
@ -120,6 +120,11 @@ func (x *AuthmateFrostFS) CreateObject(ctx context.Context, prm tokens.PrmObject
|
|||
attributes = append(attributes, [2]string{accessBoxCRDTNameAttr, versions.Name()})
|
||||
}
|
||||
|
||||
for _, attr := range prm.CustomAttributes {
|
||||
// we don't check attribute duplication since storage node does this
|
||||
attributes = append(attributes, [2]string{attr.Key(), attr.Value()})
|
||||
}
|
||||
|
||||
return x.frostFS.CreateObject(ctx, layer.PrmObjectCreate{
|
||||
Container: prm.Container,
|
||||
Filepath: prm.Filepath,
|
||||
|
|
|
@ -126,6 +126,7 @@ func (x *FrostFS) CreateContainer(ctx context.Context, prm layer.PrmContainerCre
|
|||
if prm.Name != "" {
|
||||
var d container.Domain
|
||||
d.SetName(prm.Name)
|
||||
d.SetZone(prm.Zone)
|
||||
|
||||
container.WriteDomain(&cnr, d)
|
||||
container.SetName(&cnr, prm.Name)
|
||||
|
|
118
internal/frostfs/frostfsid/frostfsid.go
Normal file
118
internal/frostfs/frostfsid/frostfsid.go
Normal file
|
@ -0,0 +1,118 @@
|
|||
package frostfsid
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/handler"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
frostfsutil "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
)
|
||||
|
||||
type FrostFSID struct {
|
||||
cli *client.Client
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
// RPCAddress is an endpoint to connect to neo rpc.
|
||||
RPCAddress string
|
||||
|
||||
// Contract is hash of contract or its name in NNS.
|
||||
Contract string
|
||||
|
||||
// ProxyContract is hash of proxy contract or its name in NNS to interact with frostfsid.
|
||||
ProxyContract string
|
||||
|
||||
// Key is used to interact with frostfsid contract.
|
||||
// If this is nil than random key will be generated.
|
||||
Key *keys.PrivateKey
|
||||
}
|
||||
|
||||
var (
|
||||
_ api.FrostFSID = (*FrostFSID)(nil)
|
||||
_ authmate.FrostFSID = (*FrostFSID)(nil)
|
||||
_ handler.FrostFSID = (*FrostFSID)(nil)
|
||||
)
|
||||
|
||||
// New creates new FrostfsID contract wrapper that implements auth.FrostFSID interface.
|
||||
func New(ctx context.Context, cfg Config) (*FrostFSID, error) {
|
||||
contractHash, err := frostfsutil.ResolveContractHash(cfg.Contract, cfg.RPCAddress)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve frostfs contract hash: %w", err)
|
||||
}
|
||||
|
||||
key := cfg.Key
|
||||
if key == nil {
|
||||
if key, err = keys.NewPrivateKey(); err != nil {
|
||||
return nil, fmt.Errorf("generate anon private key for frostfsid: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
rpcCli, err := rpcclient.New(ctx, cfg.RPCAddress, rpcclient.Options{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init rpc client: %w", err)
|
||||
}
|
||||
|
||||
var opt client.Options
|
||||
opt.ProxyContract, err = frostfsutil.ResolveContractHash(cfg.ProxyContract, cfg.RPCAddress)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve frostfs contract hash: %w", err)
|
||||
}
|
||||
|
||||
cli, err := client.New(rpcCli, wallet.NewAccountFromPrivateKey(key), contractHash, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init frostfsid client: %w", err)
|
||||
}
|
||||
|
||||
return &FrostFSID{
|
||||
cli: cli,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *FrostFSID) ValidatePublicKey(key *keys.PublicKey) error {
|
||||
_, err := f.cli.GetSubjectByKey(key)
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *FrostFSID) RegisterPublicKey(ns string, key *keys.PublicKey) error {
|
||||
_, err := f.cli.Wait(f.cli.CreateSubject(ns, key))
|
||||
if err != nil && !strings.Contains(err.Error(), "subject already exists") {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FrostFSID) GetUserAddress(namespace, name string) (string, error) {
|
||||
key, err := f.cli.GetSubjectKeyByName(namespace, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return key.Address(), nil
|
||||
}
|
||||
|
||||
func (f *FrostFSID) GetUserGroupIDs(userHash util.Uint160) ([]string, error) {
|
||||
subjExt, err := f.cli.GetSubjectExtended(userHash)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := make([]string, len(subjExt.Groups))
|
||||
for i, group := range subjExt.Groups {
|
||||
res[i] = strconv.FormatInt(group.ID, 10)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
132
internal/frostfs/policy/contract/contract.go
Normal file
132
internal/frostfs/policy/contract/contract.go
Normal file
|
@ -0,0 +1,132 @@
|
|||
package contract
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
policycontract "git.frostfs.info/TrueCloudLab/frostfs-contract/policy"
|
||||
policyclient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/policy"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/policy"
|
||||
frostfsutil "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/notary"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
actor *actor.Actor
|
||||
policyContract *policyclient.Contract
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
// RPCAddress is an endpoint to connect to neo rpc.
|
||||
RPCAddress string
|
||||
|
||||
// Contract is hash of contract or its name in NNS.
|
||||
Contract string
|
||||
|
||||
// ProxyContract is hash of proxy contract or its name in NNS to interact with policy.
|
||||
ProxyContract string
|
||||
|
||||
// Key is used to interact with policy contract.
|
||||
// If this is nil than random key will be generated.
|
||||
Key *keys.PrivateKey
|
||||
}
|
||||
|
||||
var _ policy.Contract = (*Client)(nil)
|
||||
|
||||
// New creates new Policy contract wrapper.
|
||||
func New(ctx context.Context, cfg Config) (*Client, error) {
|
||||
contractHash, err := frostfsutil.ResolveContractHash(cfg.Contract, cfg.RPCAddress)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve frostfs contract hash: %w", err)
|
||||
}
|
||||
|
||||
key := cfg.Key
|
||||
if key == nil {
|
||||
if key, err = keys.NewPrivateKey(); err != nil {
|
||||
return nil, fmt.Errorf("generate anon private key for policy: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
rpcCli, err := rpcclient.New(ctx, cfg.RPCAddress, rpcclient.Options{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create policy rpc client: %w", err)
|
||||
}
|
||||
|
||||
proxyContractHash, err := frostfsutil.ResolveContractHash(cfg.ProxyContract, cfg.RPCAddress)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve frostfs contract hash: %w", err)
|
||||
}
|
||||
|
||||
act, err := actor.New(rpcCli, getSigners(key, proxyContractHash, contractHash))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create new actor: %w", err)
|
||||
}
|
||||
|
||||
return &Client{
|
||||
actor: act,
|
||||
policyContract: policyclient.New(act, contractHash),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getSigners(key *keys.PrivateKey, proxyHash, contractHash util.Uint160) []actor.SignerAccount {
|
||||
acc := wallet.NewAccountFromPrivateKey(key)
|
||||
|
||||
return []actor.SignerAccount{
|
||||
{
|
||||
Signer: transaction.Signer{
|
||||
Account: proxyHash,
|
||||
Scopes: transaction.CustomContracts,
|
||||
AllowedContracts: []util.Uint160{contractHash},
|
||||
},
|
||||
Account: notary.FakeContractAccount(proxyHash),
|
||||
},
|
||||
{
|
||||
Signer: transaction.Signer{
|
||||
Account: acc.Contract.ScriptHash(),
|
||||
Scopes: transaction.CalledByEntry,
|
||||
},
|
||||
Account: acc,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) AddChain(kind policycontract.Kind, entity string, name []byte, chain []byte) (util.Uint256, uint32, error) {
|
||||
return c.policyContract.AddChain(big.NewInt(int64(kind)), entity, name, chain)
|
||||
}
|
||||
|
||||
func (c *Client) GetChain(kind policycontract.Kind, entity string, name []byte) ([]byte, error) {
|
||||
return c.policyContract.GetChain(big.NewInt(int64(kind)), entity, name)
|
||||
}
|
||||
|
||||
func (c *Client) RemoveChain(kind policycontract.Kind, entity string, name []byte) (util.Uint256, uint32, error) {
|
||||
return c.policyContract.RemoveChain(big.NewInt(int64(kind)), entity, name)
|
||||
}
|
||||
|
||||
func (c *Client) ListChains(kind policycontract.Kind, entity string, name []byte) ([][]byte, error) {
|
||||
items, err := c.policyContract.ListChainsByPrefix(big.NewInt(int64(kind)), entity, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := make([][]byte, len(items))
|
||||
for i, item := range items {
|
||||
res[i], err = item.TryBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (c *Client) Wait(tx util.Uint256, vub uint32, err error) error {
|
||||
_, err = c.actor.Wait(tx, vub, err)
|
||||
return err
|
||||
}
|
97
internal/frostfs/policy/contract/inmemory.go
Normal file
97
internal/frostfs/policy/contract/inmemory.go
Normal file
|
@ -0,0 +1,97 @@
|
|||
package contract
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
policycontract "git.frostfs.info/TrueCloudLab/frostfs-contract/policy"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/policy"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
type InMemoryContract struct {
|
||||
iamChains *syncedMap
|
||||
containerChains *syncedMap
|
||||
namespaceChains *syncedMap
|
||||
}
|
||||
|
||||
type syncedMap struct {
|
||||
mu sync.RWMutex
|
||||
data map[string][]byte
|
||||
}
|
||||
|
||||
var _ policy.Contract = (*InMemoryContract)(nil)
|
||||
|
||||
var ErrChainNotFound = errors.New("chain not found")
|
||||
|
||||
// NewInMemoryContract creates new inmemory Policy contract wrapper.
|
||||
func NewInMemoryContract() *InMemoryContract {
|
||||
return &InMemoryContract{
|
||||
iamChains: &syncedMap{data: map[string][]byte{}},
|
||||
containerChains: &syncedMap{data: map[string][]byte{}},
|
||||
namespaceChains: &syncedMap{data: map[string][]byte{}},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *InMemoryContract) AddChain(kind policycontract.Kind, entity string, name []byte, chain []byte) (util.Uint256, uint32, error) {
|
||||
syncMap := c.getMap(kind)
|
||||
syncMap.mu.Lock()
|
||||
syncMap.data[entity+string(name)] = chain
|
||||
syncMap.mu.Unlock()
|
||||
|
||||
return util.Uint256{}, 0, nil
|
||||
}
|
||||
|
||||
func (c *InMemoryContract) GetChain(kind policycontract.Kind, entity string, name []byte) ([]byte, error) {
|
||||
syncMap := c.getMap(kind)
|
||||
syncMap.mu.RLock()
|
||||
defer syncMap.mu.RUnlock()
|
||||
|
||||
val, ok := syncMap.data[entity+string(name)]
|
||||
if !ok {
|
||||
return nil, ErrChainNotFound
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *InMemoryContract) RemoveChain(kind policycontract.Kind, entity string, name []byte) (util.Uint256, uint32, error) {
|
||||
syncMap := c.getMap(kind)
|
||||
syncMap.mu.Lock()
|
||||
delete(syncMap.data, entity+string(name))
|
||||
syncMap.mu.Unlock()
|
||||
|
||||
return util.Uint256{}, 0, nil
|
||||
}
|
||||
|
||||
func (c *InMemoryContract) ListChains(kind policycontract.Kind, entity string, name []byte) ([][]byte, error) {
|
||||
syncMap := c.getMap(kind)
|
||||
syncMap.mu.RLock()
|
||||
defer syncMap.mu.RUnlock()
|
||||
|
||||
var res [][]byte
|
||||
for key, val := range syncMap.data {
|
||||
if strings.HasPrefix(key, entity+string(name)) {
|
||||
res = append(res, val)
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (c *InMemoryContract) Wait(_ util.Uint256, _ uint32, err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *InMemoryContract) getMap(kind policycontract.Kind) *syncedMap {
|
||||
switch kind {
|
||||
case policycontract.IAM:
|
||||
return c.iamChains
|
||||
case policycontract.Container:
|
||||
return c.containerChains
|
||||
case policycontract.Namespace:
|
||||
return c.namespaceChains
|
||||
default:
|
||||
return &syncedMap{data: map[string][]byte{}}
|
||||
}
|
||||
}
|
46
internal/frostfs/policy/morph_policy_storage.go
Normal file
46
internal/frostfs/policy/morph_policy_storage.go
Normal file
|
@ -0,0 +1,46 @@
|
|||
package policy
|
||||
|
||||
import (
|
||||
policycontract "git.frostfs.info/TrueCloudLab/frostfs-contract/policy"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/handler"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type MorphPolicyStorage struct {
|
||||
contract Contract
|
||||
}
|
||||
|
||||
type MorphPolicyStorageConfig struct {
|
||||
Contract Contract
|
||||
Log *zap.Logger
|
||||
}
|
||||
|
||||
var _ handler.PolicyStorage = (*MorphPolicyStorage)(nil)
|
||||
|
||||
const policyStoragePrefix = 'b'
|
||||
|
||||
func NewMorphPolicyStorage(config *MorphPolicyStorageConfig) *MorphPolicyStorage {
|
||||
return &MorphPolicyStorage{
|
||||
contract: config.Contract,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *MorphPolicyStorage) PutPolicy(namespace string, cnrID cid.ID, policy []byte) error {
|
||||
name := getPolicyStorageName(cnrID)
|
||||
return c.contract.Wait(c.contract.AddChain(policycontract.IAM, namespace, name, policy))
|
||||
}
|
||||
|
||||
func (c *MorphPolicyStorage) GetPolicy(namespace string, cnrID cid.ID) ([]byte, error) {
|
||||
name := getPolicyStorageName(cnrID)
|
||||
return c.contract.GetChain(policycontract.IAM, namespace, name)
|
||||
}
|
||||
|
||||
func (c *MorphPolicyStorage) DeletePolicy(namespace string, cnrID cid.ID) error {
|
||||
name := getPolicyStorageName(cnrID)
|
||||
return c.contract.Wait(c.contract.RemoveChain(policycontract.IAM, namespace, name))
|
||||
}
|
||||
|
||||
func getPolicyStorageName(cnrID cid.ID) []byte {
|
||||
return append([]byte{policyStoragePrefix}, cnrID[:]...)
|
||||
}
|
101
internal/frostfs/policy/morph_rule_chain_storage.go
Normal file
101
internal/frostfs/policy/morph_rule_chain_storage.go
Normal file
|
@ -0,0 +1,101 @@
|
|||
package policy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
policycontract "git.frostfs.info/TrueCloudLab/frostfs-contract/policy"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/handler"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type MorphRuleChainStorage struct {
|
||||
contract Contract
|
||||
cache *cache.MorphPolicyCache
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
type MorphRuleChainStorageConfig struct {
|
||||
Contract Contract
|
||||
Cache *cache.MorphPolicyCache
|
||||
Log *zap.Logger
|
||||
}
|
||||
|
||||
var (
|
||||
_ engine.MorphRuleChainStorage = (*MorphRuleChainStorage)(nil)
|
||||
_ handler.MorphRuleChainStorage = (*MorphRuleChainStorage)(nil)
|
||||
)
|
||||
|
||||
func NewMorphRuleChainStorage(config *MorphRuleChainStorageConfig) *MorphRuleChainStorage {
|
||||
return &MorphRuleChainStorage{
|
||||
contract: config.Contract,
|
||||
cache: config.Cache,
|
||||
log: config.Log,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *MorphRuleChainStorage) AddChain(target engine.Target, policyChain *chain.Chain) error {
|
||||
return c.contract.Wait(c.AddMorphRuleChain(chain.S3, target, policyChain))
|
||||
}
|
||||
|
||||
func (c *MorphRuleChainStorage) RemoveChain(target engine.Target, chainID chain.ID) error {
|
||||
return c.contract.Wait(c.RemoveMorphRuleChain(chain.S3, target, chainID))
|
||||
}
|
||||
|
||||
func (c *MorphRuleChainStorage) ListChains(target engine.Target) ([]*chain.Chain, error) {
|
||||
return c.ListMorphRuleChains(chain.S3, target)
|
||||
}
|
||||
|
||||
func (c *MorphRuleChainStorage) AddMorphRuleChain(name chain.Name, target engine.Target, policyChain *chain.Chain) (util.Uint256, uint32, error) {
|
||||
c.cache.Delete(cache.MorphPolicyCacheKey{Target: target, Name: name})
|
||||
return c.contract.AddChain(getKind(target), target.Name, getName(name, policyChain.ID), policyChain.Bytes())
|
||||
}
|
||||
|
||||
func (c *MorphRuleChainStorage) RemoveMorphRuleChain(name chain.Name, target engine.Target, chainID chain.ID) (util.Uint256, uint32, error) {
|
||||
c.cache.Delete(cache.MorphPolicyCacheKey{Target: target, Name: name})
|
||||
return c.contract.RemoveChain(getKind(target), target.Name, getName(name, chainID))
|
||||
}
|
||||
|
||||
func (c *MorphRuleChainStorage) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) {
|
||||
key := cache.MorphPolicyCacheKey{Target: target, Name: name}
|
||||
list := c.cache.Get(key)
|
||||
if list != nil {
|
||||
return list, nil
|
||||
}
|
||||
|
||||
listChains, err := c.contract.ListChains(getKind(target), target.Name, []byte(name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
list = make([]*chain.Chain, len(listChains))
|
||||
for i, listChain := range listChains {
|
||||
var item chain.Chain
|
||||
if err = item.DecodeBytes(listChain); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal chain: %w", err)
|
||||
}
|
||||
list[i] = &item
|
||||
}
|
||||
|
||||
if err = c.cache.Put(key, list); err != nil {
|
||||
c.log.Warn(logs.CouldntCacheListPolicyChains)
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func getKind(target engine.Target) policycontract.Kind {
|
||||
var kind policycontract.Kind = policycontract.Container
|
||||
if target.Type != engine.Container {
|
||||
kind = policycontract.Namespace
|
||||
}
|
||||
|
||||
return kind
|
||||
}
|
||||
func getName(name chain.Name, chainID chain.ID) []byte {
|
||||
return append([]byte(name), []byte(chainID)...)
|
||||
}
|
95
internal/frostfs/policy/storage.go
Normal file
95
internal/frostfs/policy/storage.go
Normal file
|
@ -0,0 +1,95 @@
|
|||
package policy
|
||||
|
||||
import (
|
||||
policycontract "git.frostfs.info/TrueCloudLab/frostfs-contract/policy"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/handler"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/resource"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Storage struct {
|
||||
router engine.ChainRouter
|
||||
|
||||
morph handler.MorphRuleChainStorage
|
||||
|
||||
local engine.LocalOverrideStorage
|
||||
|
||||
policy handler.PolicyStorage
|
||||
}
|
||||
|
||||
type StorageConfig struct {
|
||||
Contract Contract
|
||||
Cache *cache.MorphPolicyCache
|
||||
Log *zap.Logger
|
||||
}
|
||||
|
||||
type Contract interface {
|
||||
AddChain(kind policycontract.Kind, entity string, name []byte, chain []byte) (util.Uint256, uint32, error)
|
||||
GetChain(kind policycontract.Kind, entity string, name []byte) ([]byte, error)
|
||||
RemoveChain(kind policycontract.Kind, entity string, name []byte) (util.Uint256, uint32, error)
|
||||
ListChains(kind policycontract.Kind, entity string, name []byte) ([][]byte, error)
|
||||
Wait(tx util.Uint256, vub uint32, err error) error
|
||||
}
|
||||
|
||||
var _ handler.APE = (*Storage)(nil)
|
||||
|
||||
func NewStorage(cfg StorageConfig) *Storage {
|
||||
// todo use thread safe inmemory https://git.frostfs.info/TrueCloudLab/policy-engine/issues/35
|
||||
local := inmemory.NewInmemoryLocalStorage()
|
||||
|
||||
morph := NewMorphRuleChainStorage(&MorphRuleChainStorageConfig{
|
||||
Contract: cfg.Contract,
|
||||
Cache: cfg.Cache,
|
||||
Log: cfg.Log,
|
||||
})
|
||||
|
||||
policyStorage := NewMorphPolicyStorage(&MorphPolicyStorageConfig{
|
||||
Contract: cfg.Contract,
|
||||
Log: cfg.Log,
|
||||
})
|
||||
|
||||
return &Storage{
|
||||
router: engine.NewDefaultChainRouterWithLocalOverrides(morph, local),
|
||||
morph: morph,
|
||||
local: local,
|
||||
policy: policyStorage,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Storage) IsAllowed(name chain.Name, target engine.RequestTarget, r resource.Request) (status chain.Status, found bool, err error) {
|
||||
return s.router.IsAllowed(name, target, r)
|
||||
}
|
||||
|
||||
func (s *Storage) LocalStorage() engine.LocalOverrideStorage {
|
||||
return s.local
|
||||
}
|
||||
|
||||
func (s *Storage) AddChain(target engine.Target, policyChain *chain.Chain) error {
|
||||
return s.morph.AddChain(target, policyChain)
|
||||
}
|
||||
|
||||
func (s *Storage) RemoveChain(target engine.Target, chainID chain.ID) error {
|
||||
return s.morph.RemoveChain(target, chainID)
|
||||
}
|
||||
|
||||
func (s *Storage) ListChains(target engine.Target) ([]*chain.Chain, error) {
|
||||
return s.morph.ListChains(target)
|
||||
}
|
||||
|
||||
func (s *Storage) PutPolicy(namespace string, cnrID cid.ID, policy []byte) error {
|
||||
return s.policy.PutPolicy(namespace, cnrID, policy)
|
||||
}
|
||||
|
||||
func (s *Storage) GetPolicy(namespace string, cnrID cid.ID) ([]byte, error) {
|
||||
return s.policy.GetPolicy(namespace, cnrID)
|
||||
}
|
||||
|
||||
func (s *Storage) DeletePolicy(namespace string, cnrID cid.ID) error {
|
||||
return s.policy.DeletePolicy(namespace, cnrID)
|
||||
}
|
|
@ -124,6 +124,65 @@ func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo,
|
|||
return subtree, nil
|
||||
}
|
||||
|
||||
type SubTreeStreamImpl struct {
|
||||
r *treepool.SubTreeReader
|
||||
buffer []*grpcService.GetSubTreeResponse_Body
|
||||
eof bool
|
||||
index int
|
||||
ln int
|
||||
}
|
||||
|
||||
const bufSize = 1000
|
||||
|
||||
func (s *SubTreeStreamImpl) Next() (tree.NodeResponse, error) {
|
||||
if s.index != -1 {
|
||||
node := s.buffer[s.index]
|
||||
s.index++
|
||||
if s.index >= s.ln {
|
||||
s.index = -1
|
||||
}
|
||||
return GetSubTreeResponseBodyWrapper{response: node}, nil
|
||||
}
|
||||
if s.eof {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
var err error
|
||||
s.ln, err = s.r.Read(s.buffer)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return nil, fmt.Errorf("sub tree stream impl pool wrap: %w", handleError(err))
|
||||
}
|
||||
s.eof = true
|
||||
}
|
||||
if s.ln > 0 {
|
||||
s.index = 0
|
||||
}
|
||||
|
||||
return s.Next()
|
||||
}
|
||||
|
||||
func (w *PoolWrapper) GetSubTreeStream(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) (tree.SubTreeStream, error) {
|
||||
poolPrm := treepool.GetSubTreeParams{
|
||||
CID: bktInfo.CID,
|
||||
TreeID: treeID,
|
||||
RootID: rootID,
|
||||
Depth: depth,
|
||||
BearerToken: getBearer(ctx, bktInfo),
|
||||
}
|
||||
|
||||
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
return &SubTreeStreamImpl{
|
||||
r: subTreeReader,
|
||||
buffer: make([]*grpcService.GetSubTreeResponse_Body, bufSize),
|
||||
index: -1,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (w *PoolWrapper) AddNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, parent uint64, meta map[string]string) (uint64, error) {
|
||||
nodeID, err := w.p.AddNode(ctx, treepool.AddNodeParams{
|
||||
CID: bktInfo.CID,
|
||||
|
|
33
internal/frostfs/util/util.go
Normal file
33
internal/frostfs/util/util.go
Normal file
|
@ -0,0 +1,33 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
// ResolveContractHash determine contract hash by resolving NNS name.
|
||||
func ResolveContractHash(contractHash, rpcAddress string) (util.Uint160, error) {
|
||||
if hash, err := util.Uint160DecodeStringLE(contractHash); err == nil {
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
splitName := strings.Split(contractHash, ".")
|
||||
if len(splitName) != 2 {
|
||||
return util.Uint160{}, fmt.Errorf("invalid contract name: '%s'", contractHash)
|
||||
}
|
||||
|
||||
var domain container.Domain
|
||||
domain.SetName(splitName[0])
|
||||
domain.SetZone(splitName[1])
|
||||
|
||||
var nns ns.NNS
|
||||
if err := nns.Dial(rpcAddress); err != nil {
|
||||
return util.Uint160{}, fmt.Errorf("dial nns %s: %w", rpcAddress, err)
|
||||
}
|
||||
|
||||
return nns.ResolveContractHash(domain)
|
||||
}
|
|
@ -12,6 +12,7 @@ const (
|
|||
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port" // Warn in ../../cmd/s3-gw/service.go
|
||||
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled" // Info in ../../cmd/s3-gw/service.go
|
||||
ShuttingDownService = "shutting down service" // Info in ../../cmd/s3-gw/service.go
|
||||
CantGracefullyShutDownService = "can't gracefully shut down service, force stop" // Error in ../../cmd/s3-gw/service.go
|
||||
ContainerResolverWillBeDisabled = "container resolver will be disabled because of resolvers 'resolver_order' is empty" // Info in ../../cmd/s3-gw/app.go
|
||||
FailedToInitializeTracing = "failed to initialize tracing" // Warn in ../../cmd/s3-gw/app.go
|
||||
TracingConfigUpdated = "tracing config updated" // Info in ../../cmd/s3-gw/app.go
|
||||
|
@ -21,6 +22,7 @@ const (
|
|||
ApplicationFinished = "application finished" // Info in ../../cmd/s3-gw/app.go
|
||||
FetchDomainsPrepareToUseAPI = "fetch domains, prepare to use API" // Info in ../../cmd/s3-gw/app.go
|
||||
StartingServer = "starting server" // Info in ../../cmd/s3-gw/app.go
|
||||
StartingControlAPI = "starting control API server" // Info in ../../cmd/s3-gw/app.go
|
||||
StoppingServer = "stopping server" // Info in ../../cmd/s3-gw/app.go
|
||||
SIGHUPConfigReloadStarted = "SIGHUP config reload started" // Info in ../../cmd/s3-gw/app.go
|
||||
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed" // Warn in ../../cmd/s3-gw/app.go
|
||||
|
@ -32,6 +34,8 @@ const (
|
|||
FailedToAddServer = "failed to add server" // Warn in ../../cmd/s3-gw/app.go
|
||||
AddServer = "add server" // Info in ../../cmd/s3-gw/app.go
|
||||
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver 'nns' won't be used since 'rpc_endpoint' isn't provided" // Warn in ../../cmd/s3-gw/app.go
|
||||
ControlAPICannotShutdownGracefully = "control API cannot shutdown gracefully, forcing stop" // Info in ../../cmd/s3-gw/app.go
|
||||
ControlAPIServiceStopped = "control API service stopped" // Info in ../../cmd/s3-gw/app.go
|
||||
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)" // Error in ../../cmd/s3-gw/app_settings.go
|
||||
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value" // Error in ../../cmd/s3-gw/app_settings.go
|
||||
FailedToParseDefaultLocationConstraint = "failed to parse 'default' location constraint, default one will be used" // Warn in cmd/s3-gw/app_settings.go
|
||||
|
@ -40,11 +44,17 @@ const (
|
|||
FailedToParseLocationConstraint = "failed to parse location constraint, it cannot be used" // Warn in cmd/s3-gw/app_settings.go
|
||||
FailedToParseDefaultCopiesNumbers = "failed to parse 'default' copies numbers, default one will be used" // Warn in cmd/s3-gw/app_settings.go
|
||||
FailedToParseCopiesNumbers = "failed to parse copies numbers, skip" // Warn in cmd/s3-gw/app_settings.go
|
||||
FailedToParsePublicKey = "failed to parse public key, skip" // Warn in cmd/s3-gw/app_settings.go
|
||||
DefaultNamespacesCannotBeEmpty = "default namespaces cannot be empty, defaults will be used" // Warn in cmd/s3-gw/app_settings.go
|
||||
FailedToParseNamespacesConfig = "failed to unmarshal namespaces config" // Warn in cmd/s3-gw/app_settings.go
|
||||
DefaultNamespaceConfigValuesBeOverwritten = "default namespace config value be overwritten by values from 'namespaces.config'" // Warn in cmd/s3-gw/app_settings.go
|
||||
MultipleDefaultOverridesFound = "multiple default overrides found, only one will be used" // Warn in cmd/s3-gw/app_settings.go
|
||||
FailedToParseDefaultDefaultLocationConstraint = "failed to parse default 'default' location constraint" // Fatal in cmd/s3-gw/app_settings.go
|
||||
ConstraintAdded = "constraint added" // Info in ../../cmd/s3-gw/app_settings.go
|
||||
SkipEmptyAddress = "skip, empty address" // Warn in ../../cmd/s3-gw/app_settings.go
|
||||
AddedStoragePeer = "added storage peer" // Info in ../../cmd/s3-gw/app_settings.go
|
||||
PrepareConnectionPool = "prepare connection pool" // Debug in ../../cmd/s3-authmate/modules/utils.go
|
||||
PrepareFrostfsIDClient = "prepare frostfsid client" // Debug in ../../cmd/s3-authmate/modules/utils.go
|
||||
InvalidCacheEntryType = "invalid cache entry type" // Warn in ../../api/cache/*
|
||||
InvalidCacheKeyType = "invalid cache key type" // Warn in ../../api/cache/objectslist.go
|
||||
ObjectIsCopied = "object is copied" // Info in ../../api/handler/copy.go
|
||||
|
@ -70,8 +80,6 @@ const (
|
|||
CouldntDeletePart = "couldn't delete part" // Warn in ../../api/layer/multipart_upload.go
|
||||
PartDetails = "part details" // Debug in ../../api/layer/multipart_upload.go
|
||||
GetObject = "get object" // Debug in ../../api/layer/layer.go
|
||||
ObjectAlreadyRemoved = "object already removed" // Debug in ../../api/layer/layer.go
|
||||
ObjectNotFound = "object not found" // Debug in ../../api/layer/layer.go
|
||||
ResolveBucket = "resolve bucket" // Info in ../../api/layer/layer.go
|
||||
CouldntDeleteCorsObject = "couldn't delete cors object" // Error in ../../api/layer/cors.go
|
||||
PutObject = "put object" // Debug in ../../api/layer/object.go
|
||||
|
@ -85,11 +93,13 @@ const (
|
|||
CouldntCacheAccessControlOperation = "couldn't cache access control operation" // Warn in ../../api/layer/cache.go
|
||||
CouldntPutObjAddressToNameCache = "couldn't put obj address to name cache" // Warn in ../../api/layer/cache.go
|
||||
CouldntCacheListOfObjects = "couldn't cache list of objects" // Warn in ../../api/layer/cache.go
|
||||
CouldntCacheListSession = "couldn't cache list session" // Warn in ../../api/layer/cache.go
|
||||
CouldntCacheTags = "couldn't cache tags" // Error in ../../api/layer/cache.go
|
||||
CouldntCacheLockInfo = "couldn't cache lock info" // Error in ../../api/layer/cache.go
|
||||
CouldntCacheBucketSettings = "couldn't cache bucket settings" // Warn in ../../api/layer/cache.go
|
||||
CouldntCacheCors = "couldn't cache cors" // Warn in ../../api/layer/cache.go
|
||||
CouldntCacheNotificationConfiguration = "couldn't cache notification configuration" // Warn in ../../api/layer/cache.go
|
||||
CouldntCacheListPolicyChains = "couldn't cache list policy chains" // Warn in ../../api/layer/cache.go
|
||||
RequestEnd = "request end" // Info in ../../api/middleware/response.go
|
||||
CouldntReceiveAccessBoxForGateKeyRandomKeyWillBeUsed = "couldn't receive access box for gate key, random key will be used" // Debug in ../../api/middleware/auth.go
|
||||
FailedToPassAuthentication = "failed to pass authentication" // Error in ../../api/middleware/auth.go
|
||||
|
@ -116,4 +126,19 @@ const (
|
|||
CouldNotInitializeAPIHandler = "could not initialize API handler" // Fatal in ../../cmd/s3-gw/app.go
|
||||
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" // Warn in ../../cmd/s3-gw/app.go
|
||||
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" // Info in ../../cmd/s3-gw/app.go
|
||||
AnonRequestSkipFrostfsIDValidation = "anon request, skip FrostfsID validation" // Debug in ../../api/middleware/auth.go
|
||||
FrostfsIDValidationFailed = "FrostfsID validation failed" // Error in ../../api/middleware/auth.go
|
||||
InitFrostfsIDContractFailed = "init frostfsid contract failed" // Fatal in ../../cmd/s3-gw/app.go
|
||||
InitPolicyContractFailed = "init policy contract failed" // Fatal in ../../cmd/s3-gw/app.go
|
||||
ControlAPIHealthcheck = "healthcheck request"
|
||||
ControlAPIPutPolicies = "put policies request"
|
||||
ControlAPIRemovePolicies = "remove policies request"
|
||||
ControlAPIGetPolicy = "get policy request"
|
||||
ControlAPIListPolicies = "list policies request"
|
||||
PolicyValidationFailed = "policy validation failed"
|
||||
ParseTreeNode = "parse tree node"
|
||||
FailedToGetRealObjectSize = "failed to get real object size"
|
||||
CouldntDeleteObjectFromStorageContinueDeleting = "couldn't delete object from storage, continue deleting from tree"
|
||||
CouldntPutAccessBoxIntoCache = "couldn't put accessbox into cache"
|
||||
InvalidAccessBoxCacheRemovingCheckInterval = "invalid accessbox check removing interval, using default value"
|
||||
)
|
||||
|
|
|
@ -65,12 +65,12 @@ func (m *AppMetrics) Handler() http.Handler {
|
|||
return m.gate.Handler()
|
||||
}
|
||||
|
||||
func (m *AppMetrics) Update(user, bucket, cnrID string, reqType RequestType, in, out uint64) {
|
||||
func (m *AppMetrics) Update(user, bucket, cnrID, ns string, reqType RequestType, in, out uint64) {
|
||||
if !m.isEnabled() {
|
||||
return
|
||||
}
|
||||
|
||||
m.gate.Billing.apiStat.Update(user, bucket, cnrID, reqType, in, out)
|
||||
m.gate.Billing.apiStat.Update(user, bucket, cnrID, ns, reqType, in, out)
|
||||
}
|
||||
|
||||
func (m *AppMetrics) Statistic() *APIStatMetrics {
|
||||
|
|
|
@ -71,6 +71,7 @@ type (
|
|||
bucketKey struct {
|
||||
name string
|
||||
cid string
|
||||
namespace string
|
||||
}
|
||||
|
||||
bucketStat struct {
|
||||
|
@ -88,6 +89,7 @@ type (
|
|||
User string
|
||||
Bucket string
|
||||
ContainerID string
|
||||
Namespace string
|
||||
}
|
||||
|
||||
UserMetricsInfo struct {
|
||||
|
@ -108,7 +110,7 @@ type (
|
|||
}
|
||||
)
|
||||
|
||||
func (u *UsersAPIStats) Update(user, bucket, cnrID string, reqType RequestType, in, out uint64) {
|
||||
func (u *UsersAPIStats) Update(user, bucket, cnrID, ns string, reqType RequestType, in, out uint64) {
|
||||
u.Lock()
|
||||
defer u.Unlock()
|
||||
|
||||
|
@ -127,6 +129,7 @@ func (u *UsersAPIStats) Update(user, bucket, cnrID string, reqType RequestType,
|
|||
key := bucketKey{
|
||||
name: bucket,
|
||||
cid: cnrID,
|
||||
namespace: ns,
|
||||
}
|
||||
|
||||
bktStat := usersStat.buckets[key]
|
||||
|
@ -151,6 +154,7 @@ func (u *UsersAPIStats) DumpMetrics() UserMetrics {
|
|||
User: user,
|
||||
Bucket: key.name,
|
||||
ContainerID: key.cid,
|
||||
Namespace: key.namespace,
|
||||
}
|
||||
|
||||
if bktStat.InTraffic != 0 {
|
||||
|
@ -232,6 +236,7 @@ func (b *billingMetrics) Collect(ch chan<- prometheus.Metric) {
|
|||
value.Bucket,
|
||||
value.ContainerID,
|
||||
value.Operation.String(),
|
||||
value.Namespace,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -244,6 +249,7 @@ func (b *billingMetrics) Collect(ch chan<- prometheus.Metric) {
|
|||
value.Bucket,
|
||||
value.ContainerID,
|
||||
value.Type.String(),
|
||||
value.Namespace,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ var appMetricsDesc = map[string]map[string]Description{
|
|||
Subsystem: billingSubsystem,
|
||||
Name: userRequestsMetric,
|
||||
Help: "Accumulated user requests",
|
||||
VariableLabels: []string{"user", "bucket", "cid", "operation"},
|
||||
VariableLabels: []string{"user", "bucket", "cid", "operation", "namespace"},
|
||||
},
|
||||
userTrafficMetric: Description{
|
||||
Type: dto.MetricType_GAUGE,
|
||||
|
@ -72,7 +72,7 @@ var appMetricsDesc = map[string]map[string]Description{
|
|||
Subsystem: billingSubsystem,
|
||||
Name: userTrafficMetric,
|
||||
Help: "Accumulated user traffic",
|
||||
VariableLabels: []string{"user", "bucket", "cid", "direction"},
|
||||
VariableLabels: []string{"user", "bucket", "cid", "direction", "namespace"},
|
||||
},
|
||||
},
|
||||
stateSubsystem: {
|
||||
|
|
162
pkg/service/control/client/client.go
Normal file
162
pkg/service/control/client/client.go
Normal file
|
@ -0,0 +1,162 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/control"
|
||||
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/control/server"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
svc control.ControlServiceClient
|
||||
key *keys.PrivateKey
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Logger *zap.Logger
|
||||
}
|
||||
|
||||
type PolicyData struct {
|
||||
Namespace string
|
||||
Chain *chain.Chain
|
||||
}
|
||||
|
||||
type PolicyInfo struct {
|
||||
Namespace string
|
||||
ChainID chain.ID
|
||||
}
|
||||
|
||||
func New(ctx context.Context, addr string, key *keys.PrivateKey) (*Client, error) {
|
||||
conn, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to dial s3 gw control api: %w", err)
|
||||
}
|
||||
|
||||
svc := control.NewControlServiceClient(conn)
|
||||
|
||||
cli := &Client{
|
||||
svc: svc,
|
||||
key: key,
|
||||
}
|
||||
|
||||
return cli, cli.Healthcheck(ctx)
|
||||
}
|
||||
|
||||
func (c *Client) Healthcheck(ctx context.Context) error {
|
||||
req := &control.HealthCheckRequest{}
|
||||
if err := controlSvc.SignMessage(&c.key.PrivateKey, req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := c.svc.HealthCheck(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if res.Body.HealthStatus != control.HealthStatus_READY {
|
||||
return fmt.Errorf("service isn't ready, status: %s", res.Body.HealthStatus)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) PutPolicies(ctx context.Context, policies []PolicyData) error {
|
||||
chainDatas := make([]*control.PutPoliciesRequest_ChainData, len(policies))
|
||||
for i := range policies {
|
||||
chainDatas[i] = &control.PutPoliciesRequest_ChainData{
|
||||
Namespace: policies[i].Namespace,
|
||||
Chain: policies[i].Chain.Bytes(),
|
||||
}
|
||||
}
|
||||
|
||||
req := &control.PutPoliciesRequest{
|
||||
Body: &control.PutPoliciesRequest_Body{
|
||||
ChainDatas: chainDatas,
|
||||
},
|
||||
}
|
||||
|
||||
if err := controlSvc.SignMessage(&c.key.PrivateKey, req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := c.svc.PutPolicies(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Client) RemovePolicies(ctx context.Context, policies []PolicyInfo) error {
|
||||
chainInfos := make([]*control.RemovePoliciesRequest_ChainInfo, len(policies))
|
||||
for i := range policies {
|
||||
chainInfos[i] = &control.RemovePoliciesRequest_ChainInfo{
|
||||
Namespace: policies[i].Namespace,
|
||||
ChainID: []byte(policies[i].ChainID),
|
||||
}
|
||||
}
|
||||
|
||||
req := &control.RemovePoliciesRequest{
|
||||
Body: &control.RemovePoliciesRequest_Body{
|
||||
ChainInfos: chainInfos,
|
||||
},
|
||||
}
|
||||
|
||||
if err := controlSvc.SignMessage(&c.key.PrivateKey, req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := c.svc.RemovePolicies(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Client) GetPolicy(ctx context.Context, namespace string, chainID chain.ID) (*chain.Chain, error) {
|
||||
req := &control.GetPolicyRequest{
|
||||
Body: &control.GetPolicyRequest_Body{
|
||||
Namespace: namespace,
|
||||
ChainID: []byte(chainID),
|
||||
},
|
||||
}
|
||||
|
||||
if err := controlSvc.SignMessage(&c.key.PrivateKey, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.svc.GetPolicy(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var policyChain chain.Chain
|
||||
if err = policyChain.DecodeBytes(resp.GetBody().GetChain()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &policyChain, nil
|
||||
}
|
||||
|
||||
func (c *Client) ListPolicies(ctx context.Context, namespace string) ([]chain.ID, error) {
|
||||
req := &control.ListPoliciesRequest{
|
||||
Body: &control.ListPoliciesRequest_Body{
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
if err := controlSvc.SignMessage(&c.key.PrivateKey, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.svc.ListPolicies(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := make([]chain.ID, len(resp.GetBody().GetChainIDs()))
|
||||
for i, chainID := range resp.GetBody().GetChainIDs() {
|
||||
res[i] = chain.ID(chainID)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
316
pkg/service/control/server/server.go
Normal file
316
pkg/service/control/server/server.go
Normal file
|
@ -0,0 +1,316 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/control"
|
||||
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
|
||||
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
*cfg
|
||||
}
|
||||
|
||||
type Settings interface {
|
||||
ResolveNamespaceAlias(ns string) string
|
||||
FetchRawKeys() [][]byte
|
||||
}
|
||||
|
||||
type defaultSettings struct{}
|
||||
|
||||
func (f defaultSettings) FetchRawKeys() [][]byte { return nil }
|
||||
func (f defaultSettings) ResolveNamespaceAlias(ns string) string { return ns }
|
||||
|
||||
// Option of the Server's constructor.
|
||||
type Option func(*cfg)
|
||||
|
||||
type cfg struct {
|
||||
log *zap.Logger
|
||||
|
||||
settings Settings
|
||||
|
||||
chainStorage engine.LocalOverrideStorage
|
||||
}
|
||||
|
||||
func defaultCfg() *cfg {
|
||||
return &cfg{
|
||||
log: zap.NewNop(),
|
||||
settings: defaultSettings{},
|
||||
chainStorage: inmemory.NewInmemoryLocalStorage(),
|
||||
}
|
||||
}
|
||||
|
||||
// New creates, initializes and returns new Server instance.
|
||||
func New(opts ...Option) *Server {
|
||||
c := defaultCfg()
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
|
||||
c.log = c.log.With(zap.String("service", "control API"))
|
||||
|
||||
return &Server{
|
||||
cfg: c,
|
||||
}
|
||||
}
|
||||
|
||||
// WithSettings returns option to add settings to use Control service.
|
||||
func WithSettings(settings Settings) Option {
|
||||
return func(c *cfg) {
|
||||
c.settings = settings
|
||||
}
|
||||
}
|
||||
|
||||
// WithLogger returns option to set logger.
|
||||
func WithLogger(log *zap.Logger) Option {
|
||||
return func(c *cfg) {
|
||||
c.log = log
|
||||
}
|
||||
}
|
||||
|
||||
// WithChainStorage returns option to set logger.
|
||||
func WithChainStorage(chainStorage engine.LocalOverrideStorage) Option {
|
||||
return func(c *cfg) {
|
||||
c.chainStorage = chainStorage
|
||||
}
|
||||
}
|
||||
|
||||
// HealthCheck returns health status of the local node.
|
||||
//
|
||||
// If request is unsigned or signed by disallowed key, permission error returns.
|
||||
func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest) (*control.HealthCheckResponse, error) {
|
||||
s.log.Info(logs.ControlAPIHealthcheck, zap.String("key", hex.EncodeToString(req.Signature.Key)))
|
||||
|
||||
// verify request
|
||||
if err := s.isValidRequest(req); err != nil {
|
||||
return nil, status.Error(codes.PermissionDenied, err.Error())
|
||||
}
|
||||
|
||||
resp := &control.HealthCheckResponse{
|
||||
Body: &control.HealthCheckResponse_Body{
|
||||
HealthStatus: control.HealthStatus_READY,
|
||||
},
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// PutPolicies replaces existing policies.
|
||||
//
|
||||
// If request is unsigned or signed by disallowed key, permission error returns.
|
||||
func (s *Server) PutPolicies(_ context.Context, req *control.PutPoliciesRequest) (*control.PutPoliciesResponse, error) {
|
||||
s.log.Info(logs.ControlAPIPutPolicies, zap.String("key", hex.EncodeToString(req.Signature.Key)))
|
||||
|
||||
// verify request
|
||||
if err := s.isValidRequest(req); err != nil {
|
||||
return nil, status.Error(codes.PermissionDenied, err.Error())
|
||||
}
|
||||
|
||||
for _, data := range req.GetBody().GetChainDatas() {
|
||||
if err := s.putPolicy(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &control.PutPoliciesResponse{}, nil
|
||||
}
|
||||
|
||||
func (s *Server) putPolicy(data *control.PutPoliciesRequest_ChainData) error {
|
||||
var overrideChain chain.Chain
|
||||
if err := overrideChain.DecodeBytes(data.GetChain()); err != nil {
|
||||
return status.Error(codes.InvalidArgument, fmt.Sprintf("failed to parse body: %s", err.Error()))
|
||||
}
|
||||
|
||||
if len(overrideChain.ID) == 0 {
|
||||
return status.Error(codes.InvalidArgument, "missing chain id")
|
||||
}
|
||||
|
||||
ns := s.settings.ResolveNamespaceAlias(data.GetNamespace())
|
||||
if _, err := s.chainStorage.AddOverride(chain.S3, engine.NamespaceTarget(ns), &overrideChain); err != nil {
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemovePolicies removes existing policies.
|
||||
//
|
||||
// If request is unsigned or signed by disallowed key, permission error returns.
|
||||
func (s *Server) RemovePolicies(_ context.Context, req *control.RemovePoliciesRequest) (*control.RemovePoliciesResponse, error) {
|
||||
s.log.Info(logs.ControlAPIRemovePolicies, zap.String("key", hex.EncodeToString(req.Signature.Key)))
|
||||
|
||||
// verify request
|
||||
if err := s.isValidRequest(req); err != nil {
|
||||
return nil, status.Error(codes.PermissionDenied, err.Error())
|
||||
}
|
||||
|
||||
for _, info := range req.GetBody().GetChainInfos() {
|
||||
if err := s.removePolicy(info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &control.RemovePoliciesResponse{}, nil
|
||||
}
|
||||
|
||||
func (s *Server) removePolicy(info *control.RemovePoliciesRequest_ChainInfo) error {
|
||||
ns := s.settings.ResolveNamespaceAlias(info.GetNamespace())
|
||||
err := s.chainStorage.RemoveOverride(chain.S3, engine.NamespaceTarget(ns), chain.ID(info.GetChainID()))
|
||||
if err != nil {
|
||||
if isNotFoundError(err) {
|
||||
return status.Error(codes.NotFound, err.Error())
|
||||
}
|
||||
return status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPolicy returns existing policy.
|
||||
//
|
||||
// If request is unsigned or signed by disallowed key, permission error returns.
|
||||
func (s *Server) GetPolicy(_ context.Context, req *control.GetPolicyRequest) (*control.GetPolicyResponse, error) {
|
||||
s.log.Info(logs.ControlAPIGetPolicy, zap.String("namespace", req.GetBody().GetNamespace()),
|
||||
zap.Binary("chainId", req.GetBody().GetChainID()), zap.String("key", hex.EncodeToString(req.Signature.Key)))
|
||||
|
||||
// verify request
|
||||
if err := s.isValidRequest(req); err != nil {
|
||||
return nil, status.Error(codes.PermissionDenied, err.Error())
|
||||
}
|
||||
|
||||
ns := s.settings.ResolveNamespaceAlias(req.GetBody().GetNamespace())
|
||||
overrideChain, err := s.chainStorage.GetOverride(chain.S3, engine.NamespaceTarget(ns), chain.ID(req.GetBody().GetChainID()))
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
return &control.GetPolicyResponse{Body: &control.GetPolicyResponse_Body{Chain: overrideChain.Bytes()}}, nil
|
||||
}
|
||||
|
||||
// ListPolicies lists existing policies.
|
||||
//
|
||||
// If request is unsigned or signed by disallowed key, permission error returns.
|
||||
func (s *Server) ListPolicies(_ context.Context, req *control.ListPoliciesRequest) (*control.ListPoliciesResponse, error) {
|
||||
s.log.Info(logs.ControlAPIListPolicies, zap.String("namespace", req.GetBody().GetNamespace()),
|
||||
zap.String("key", hex.EncodeToString(req.Signature.Key)))
|
||||
|
||||
// verify request
|
||||
if err := s.isValidRequest(req); err != nil {
|
||||
return nil, status.Error(codes.PermissionDenied, err.Error())
|
||||
}
|
||||
|
||||
ns := s.settings.ResolveNamespaceAlias(req.GetBody().GetNamespace())
|
||||
chains, err := s.chainStorage.ListOverrides(chain.S3, engine.NamespaceTarget(ns))
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
res := make([][]byte, len(chains))
|
||||
for i := range chains {
|
||||
res[i] = []byte(chains[i].ID)
|
||||
}
|
||||
|
||||
return &control.ListPoliciesResponse{Body: &control.ListPoliciesResponse_Body{ChainIDs: res}}, nil
|
||||
}
|
||||
|
||||
// SignedMessage is an interface of Control service message.
|
||||
type SignedMessage interface {
|
||||
ReadSignedData([]byte) ([]byte, error)
|
||||
GetSignature() *control.Signature
|
||||
SetSignature(*control.Signature)
|
||||
}
|
||||
|
||||
var errDisallowedKey = errors.New("key is not in the allowed list")
|
||||
var errMissingSignature = errors.New("missing signature")
|
||||
var errInvalidSignature = errors.New("invalid signature")
|
||||
|
||||
func (s *Server) isValidRequest(req SignedMessage) error {
|
||||
sign := req.GetSignature()
|
||||
if sign == nil {
|
||||
return errMissingSignature
|
||||
}
|
||||
|
||||
var (
|
||||
key = sign.GetKey()
|
||||
allowed = false
|
||||
)
|
||||
|
||||
// check if key is allowed
|
||||
for _, authKey := range s.settings.FetchRawKeys() {
|
||||
if allowed = bytes.Equal(authKey, key); allowed {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !allowed {
|
||||
return errDisallowedKey
|
||||
}
|
||||
|
||||
// verify signature
|
||||
binBody, err := req.ReadSignedData(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal request body: %w", err)
|
||||
}
|
||||
|
||||
var sigV2 refs.Signature
|
||||
sigV2.SetKey(sign.GetKey())
|
||||
sigV2.SetSign(sign.GetSign())
|
||||
sigV2.SetScheme(refs.ECDSA_SHA512)
|
||||
|
||||
var sig frostfscrypto.Signature
|
||||
if err := sig.ReadFromV2(sigV2); err != nil {
|
||||
return fmt.Errorf("can't read signature: %w", err)
|
||||
}
|
||||
|
||||
if !sig.Verify(binBody) {
|
||||
return errInvalidSignature
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignMessage signs Control service message with private key.
|
||||
func SignMessage(key *ecdsa.PrivateKey, msg SignedMessage) error {
|
||||
binBody, err := msg.ReadSignedData(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal request body: %w", err)
|
||||
}
|
||||
|
||||
var sig frostfscrypto.Signature
|
||||
|
||||
err = sig.Calculate(frostfsecdsa.Signer(*key), binBody)
|
||||
if err != nil {
|
||||
return fmt.Errorf("calculate signature: %w", err)
|
||||
}
|
||||
|
||||
var sigV2 refs.Signature
|
||||
sig.WriteToV2(&sigV2)
|
||||
|
||||
var sigControl control.Signature
|
||||
sigControl.Key = sigV2.GetKey()
|
||||
sigControl.Sign = sigV2.GetSign()
|
||||
|
||||
msg.SetSignature(&sigControl)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isNotFoundError(err error) bool {
|
||||
return errors.Is(err, engine.ErrChainNameNotFound) ||
|
||||
errors.Is(err, engine.ErrChainNotFound) ||
|
||||
errors.Is(err, engine.ErrResourceNotFound)
|
||||
}
|
1832
pkg/service/control/service.pb.go
Normal file
1832
pkg/service/control/service.pb.go
Normal file
File diff suppressed because it is too large
Load diff
180
pkg/service/control/service.proto
Normal file
180
pkg/service/control/service.proto
Normal file
|
@ -0,0 +1,180 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package s3gw.control;
|
||||
|
||||
option go_package = "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/control";
|
||||
|
||||
// `ControlService` provides an interface for internal work with the storage node.
|
||||
service ControlService {
|
||||
// Performs health check of the storage node.
|
||||
rpc HealthCheck (HealthCheckRequest) returns (HealthCheckResponse);
|
||||
|
||||
rpc PutPolicies (PutPoliciesRequest) returns (PutPoliciesResponse);
|
||||
|
||||
rpc RemovePolicies (RemovePoliciesRequest) returns (RemovePoliciesResponse);
|
||||
|
||||
rpc GetPolicy (GetPolicyRequest) returns (GetPolicyResponse);
|
||||
|
||||
rpc ListPolicies (ListPoliciesRequest) returns (ListPoliciesResponse);
|
||||
}
|
||||
|
||||
// Signature of some message.
|
||||
message Signature {
|
||||
// Public key used for signing.
|
||||
bytes key = 1 [json_name = "key"];
|
||||
|
||||
// Binary signature.
|
||||
bytes sign = 2 [json_name = "signature"];
|
||||
}
|
||||
|
||||
// Health check request.
|
||||
message HealthCheckRequest {
|
||||
message Body {
|
||||
}
|
||||
|
||||
// Body of health check request message.
|
||||
Body body = 1;
|
||||
|
||||
// Body signature.
|
||||
Signature signature = 2;
|
||||
}
|
||||
|
||||
// Health check response.
|
||||
message HealthCheckResponse {
|
||||
// Health check response body
|
||||
message Body {
|
||||
// Health status of storage node application.
|
||||
HealthStatus health_status = 1;
|
||||
}
|
||||
|
||||
// Body of health check response message.
|
||||
Body body = 1;
|
||||
|
||||
Signature signature = 2;
|
||||
}
|
||||
|
||||
|
||||
// Health status of the storage node application.
|
||||
enum HealthStatus {
|
||||
// Undefined status, default value.
|
||||
HEALTH_STATUS_UNDEFINED = 0;
|
||||
|
||||
// Storage node application is starting.
|
||||
STARTING = 1;
|
||||
|
||||
// Storage node application is started and serves all services.
|
||||
READY = 2;
|
||||
|
||||
// Storage node application is shutting down.
|
||||
SHUTTING_DOWN = 3;
|
||||
}
|
||||
|
||||
// Put policies request.
|
||||
message PutPoliciesRequest {
|
||||
message ChainData {
|
||||
// Namespace.
|
||||
string namespace = 1;
|
||||
// Chain rules.
|
||||
bytes chain = 2;
|
||||
}
|
||||
|
||||
message Body {
|
||||
repeated ChainData chainDatas = 1;
|
||||
}
|
||||
|
||||
Body body = 1;
|
||||
|
||||
// Body signature.
|
||||
Signature signature = 2;
|
||||
}
|
||||
|
||||
// Put policies response.
|
||||
message PutPoliciesResponse {
|
||||
message Body {
|
||||
}
|
||||
|
||||
Body body = 1;
|
||||
|
||||
Signature signature = 2;
|
||||
}
|
||||
|
||||
// Remove policies request.
|
||||
message RemovePoliciesRequest {
|
||||
message ChainInfo {
|
||||
// Namespace.
|
||||
string namespace = 1;
|
||||
// Chain id to remove.
|
||||
bytes chainID = 2;
|
||||
}
|
||||
|
||||
message Body {
|
||||
repeated ChainInfo chainInfos = 1;
|
||||
}
|
||||
|
||||
Body body = 1;
|
||||
|
||||
// Body signature.
|
||||
Signature signature = 2;
|
||||
}
|
||||
|
||||
// Remove policies response.
|
||||
message RemovePoliciesResponse {
|
||||
message Body {
|
||||
}
|
||||
|
||||
Body body = 1;
|
||||
|
||||
Signature signature = 2;
|
||||
}
|
||||
|
||||
// Get policy request.
|
||||
message GetPolicyRequest {
|
||||
message Body {
|
||||
// Namespace.
|
||||
string namespace = 1;
|
||||
// Chain id to remove.
|
||||
bytes chainID = 2;
|
||||
}
|
||||
|
||||
Body body = 1;
|
||||
|
||||
// Body signature.
|
||||
Signature signature = 2;
|
||||
}
|
||||
|
||||
// Get policy response.
|
||||
message GetPolicyResponse {
|
||||
message Body {
|
||||
// Chain rules.
|
||||
bytes chain = 1;
|
||||
}
|
||||
|
||||
Body body = 1;
|
||||
|
||||
Signature signature = 2;
|
||||
}
|
||||
|
||||
// List policies request.
|
||||
message ListPoliciesRequest {
|
||||
message Body {
|
||||
// Namespace.
|
||||
string namespace = 1;
|
||||
}
|
||||
|
||||
Body body = 1;
|
||||
|
||||
// Body signature.
|
||||
Signature signature = 2;
|
||||
}
|
||||
|
||||
// List policies response.
|
||||
message ListPoliciesResponse {
|
||||
message Body {
|
||||
// Chain ids.
|
||||
repeated bytes chainIDs = 1;
|
||||
}
|
||||
|
||||
Body body = 1;
|
||||
|
||||
Signature signature = 2;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue