Compare commits
No commits in common. "master" and "master" have entirely different histories.
72 changed files with 759 additions and 2059 deletions
|
@ -16,7 +16,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23.6'
|
||||
go-version: '1.23.5'
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
|
35
CHANGELOG.md
35
CHANGELOG.md
|
@ -4,34 +4,6 @@ This document outlines major changes between releases.
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.32.9] - 2025-02-12
|
||||
|
||||
### Fixed
|
||||
- Make `Content-Md5` header check optional (#612)
|
||||
|
||||
## [0.32.8] - 2025-02-11
|
||||
|
||||
### Fixed
|
||||
- Return 404 instead of 500 when object is missing in object storage and available in the tree (#626)
|
||||
|
||||
### Added
|
||||
- `tree_stream_timeout` configuration parameter (#627)
|
||||
|
||||
## [0.32.7] - 2025-02-06
|
||||
|
||||
### Fixed
|
||||
- Correct passing copies number during multipart upload (#623)
|
||||
|
||||
## [0.32.6] - 2025-02-05
|
||||
|
||||
### Fixed
|
||||
- Connection leak when `feature.tree_pool_netmap_support` is enabled (#622)
|
||||
|
||||
## [0.32.5] - 2025-02-04
|
||||
|
||||
### Fixed
|
||||
- Support trailing headers signature during aws-chunk upload (#607)
|
||||
|
||||
## [0.32.4] - 2025-02-03
|
||||
|
||||
### Fixed
|
||||
|
@ -447,9 +419,4 @@ To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs
|
|||
[0.32.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.1...v0.32.2
|
||||
[0.32.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.2...v0.32.3
|
||||
[0.32.4]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.3...v0.32.4
|
||||
[0.32.5]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.4...v0.32.5
|
||||
[0.32.6]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.5...v0.32.6
|
||||
[0.32.7]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.6...v0.32.7
|
||||
[0.32.8]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.7...v0.32.8
|
||||
[0.32.9]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.8...v0.32.9
|
||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.9...master
|
||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.4...master
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v0.32.9
|
||||
v0.32.4
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
// This file is adopting https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/stream.go for sigv4a.
|
||||
// with changes
|
||||
// * add VerifyTrailerSignature
|
||||
|
||||
package v4a
|
||||
|
||||
|
@ -90,39 +88,6 @@ func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSi
|
|||
}, "\n")
|
||||
}
|
||||
|
||||
func (s *StreamSigner) VerifyTrailerSignature(payload []byte, signingTime time.Time, signature []byte) error {
|
||||
prevSignature := s.prevSignature
|
||||
|
||||
st := v4Internal.NewSigningTime(signingTime)
|
||||
|
||||
scope := buildCredentialScope(st, s.service)
|
||||
|
||||
stringToSign := s.buildEventStreamStringToSignTrailer(payload, prevSignature, scope, &st)
|
||||
|
||||
ok, err := signerCrypto.VerifySignature(&s.credentials.PrivateKey.PublicKey, makeHash(sha256.New(), []byte(stringToSign)), signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("v4a: invalid signature")
|
||||
}
|
||||
|
||||
s.prevSignature = signature
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StreamSigner) buildEventStreamStringToSignTrailer(payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
|
||||
hash := sha256.New()
|
||||
return strings.Join([]string{
|
||||
"AWS4-ECDSA-P256-SHA256-TRAILER",
|
||||
signingTime.TimeFormat(),
|
||||
credentialScope,
|
||||
hex.EncodeToString(previousSignature),
|
||||
hex.EncodeToString(makeHash(hash, payload)),
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
func buildCredentialScope(st v4Internal.SigningTime, service string) string {
|
||||
return strings.Join([]string{
|
||||
st.Format(shortTimeFormat),
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/stream.go
|
||||
// with changes
|
||||
// * add GetTrailingSignature
|
||||
|
||||
package v4
|
||||
|
||||
|
@ -89,32 +87,3 @@ func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSi
|
|||
hex.EncodeToString(makeHash(hash, payload)),
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
// GetTrailerSignature signs the provided header and payload bytes.
|
||||
func (s *StreamSigner) GetTrailerSignature(payload []byte, signingTime time.Time) ([]byte, error) {
|
||||
prevSignature := s.prevSignature
|
||||
|
||||
st := v4Internal.NewSigningTime(signingTime)
|
||||
|
||||
sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st)
|
||||
|
||||
scope := v4Internal.BuildCredentialScope(st, s.region, s.service)
|
||||
|
||||
stringToSign := s.buildEventStreamStringToSignTrailer(payload, prevSignature, scope, &st)
|
||||
|
||||
signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign))
|
||||
s.prevSignature = signature
|
||||
|
||||
return signature, nil
|
||||
}
|
||||
|
||||
func (s *StreamSigner) buildEventStreamStringToSignTrailer(payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
|
||||
hash := sha256.New()
|
||||
return strings.Join([]string{
|
||||
"AWS4-HMAC-SHA256-TRAILER",
|
||||
signingTime.TimeFormat(),
|
||||
credentialScope,
|
||||
hex.EncodeToString(previousSignature),
|
||||
hex.EncodeToString(makeHash(hash, payload)),
|
||||
}, "\n")
|
||||
}
|
||||
|
|
2
api/cache/access_control.go
vendored
2
api/cache/access_control.go
vendored
|
@ -48,7 +48,7 @@ func (o *AccessControlCache) Get(owner user.ID, key string) bool {
|
|||
result, ok := entry.(bool)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
2
api/cache/accessbox.go
vendored
2
api/cache/accessbox.go
vendored
|
@ -67,7 +67,7 @@ func (o *AccessBoxCache) Get(accessKeyID string) *AccessBoxCacheValue {
|
|||
result, ok := entry.(*AccessBoxCacheValue)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
4
api/cache/buckets.go
vendored
4
api/cache/buckets.go
vendored
|
@ -65,7 +65,7 @@ func (o *BucketCache) GetByCID(cnrID cid.ID) *data.BucketInfo {
|
|||
key, ok := entry.(string)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", key)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", key)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,7 @@ func (o *BucketCache) get(key string) *data.BucketInfo {
|
|||
result, ok := entry.(*data.BucketInfo)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
2
api/cache/frostfsid.go
vendored
2
api/cache/frostfsid.go
vendored
|
@ -69,7 +69,7 @@ func get[T any](c *FrostfsIDCache, key any) *T {
|
|||
result, ok := entry.(*T)
|
||||
if !ok {
|
||||
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
4
api/cache/listsession.go
vendored
4
api/cache/listsession.go
vendored
|
@ -52,7 +52,7 @@ func NewListSessionCache(config *Config) *ListSessionCache {
|
|||
session, ok := val.(*data.ListSession)
|
||||
if !ok {
|
||||
config.Logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", val)),
|
||||
zap.String("expected", fmt.Sprintf("%T", session)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", session)))
|
||||
}
|
||||
|
||||
if !session.Acquired.Load() {
|
||||
|
@ -72,7 +72,7 @@ func (l *ListSessionCache) GetListSession(key ListSessionKey) *data.ListSession
|
|||
result, ok := entry.(*data.ListSession)
|
||||
if !ok {
|
||||
l.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
2
api/cache/names.go
vendored
2
api/cache/names.go
vendored
|
@ -50,7 +50,7 @@ func (o *ObjectsNameCache) Get(key string) *oid.Address {
|
|||
result, ok := entry.(oid.Address)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
4
api/cache/network.go
vendored
4
api/cache/network.go
vendored
|
@ -54,7 +54,7 @@ func (c *NetworkCache) GetNetworkInfo() *netmap.NetworkInfo {
|
|||
result, ok := entry.(netmap.NetworkInfo)
|
||||
if !ok {
|
||||
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ func (c *NetworkCache) GetNetmap() *netmap.NetMap {
|
|||
result, ok := entry.(netmap.NetMap)
|
||||
if !ok {
|
||||
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
2
api/cache/objects.go
vendored
2
api/cache/objects.go
vendored
|
@ -49,7 +49,7 @@ func (o *ObjectsCache) GetObject(address oid.Address) *data.ExtendedObjectInfo {
|
|||
result, ok := entry.(*data.ExtendedObjectInfo)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
4
api/cache/objectslist.go
vendored
4
api/cache/objectslist.go
vendored
|
@ -77,7 +77,7 @@ func (l *ObjectsListCache) GetVersions(key ObjectsListKey) []*data.NodeVersion {
|
|||
result, ok := entry.([]*data.NodeVersion)
|
||||
if !ok {
|
||||
l.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -96,7 +96,7 @@ func (l *ObjectsListCache) CleanCacheEntriesContainingObject(objectName string,
|
|||
k, ok := key.(ObjectsListKey)
|
||||
if !ok {
|
||||
l.logger.Warn(logs.InvalidCacheKeyType, zap.String("actual", fmt.Sprintf("%T", key)),
|
||||
zap.String("expected", fmt.Sprintf("%T", k)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", k)))
|
||||
continue
|
||||
}
|
||||
if cnr.Equals(k.cid) && strings.HasPrefix(objectName, k.prefix) {
|
||||
|
|
2
api/cache/policy.go
vendored
2
api/cache/policy.go
vendored
|
@ -54,7 +54,7 @@ func (o *MorphPolicyCache) Get(key MorphPolicyCacheKey) []*chain.Chain {
|
|||
result, ok := entry.([]*chain.Chain)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
8
api/cache/system.go
vendored
8
api/cache/system.go
vendored
|
@ -50,7 +50,7 @@ func (o *SystemCache) GetObject(key string) *data.ObjectInfo {
|
|||
result, ok := entry.(*data.ObjectInfo)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,7 @@ func (o *SystemCache) GetCORS(key string) *data.CORSConfiguration {
|
|||
result, ok := entry.(*data.CORSConfiguration)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -97,7 +97,7 @@ func (o *SystemCache) GetLifecycleConfiguration(key string) *data.LifecycleConfi
|
|||
result, ok := entry.(*data.LifecycleConfiguration)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -113,7 +113,7 @@ func (o *SystemCache) GetSettings(key string) *data.BucketSettings {
|
|||
result, ok := entry.(*data.BucketSettings)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -109,6 +109,7 @@ type MultipartInfo struct {
|
|||
Owner user.ID
|
||||
Created time.Time
|
||||
Meta map[string]string
|
||||
CopiesNumbers []uint32
|
||||
Finished bool
|
||||
CreationEpoch uint64
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ const (
|
|||
ErrMalformedACL
|
||||
ErrMalformedXML
|
||||
ErrMissingContentLength
|
||||
ErrMissingContentMD5
|
||||
ErrMissingRequestBodyError
|
||||
ErrMissingSecurityHeader
|
||||
ErrNoSuchBucket
|
||||
|
@ -477,6 +478,12 @@ var errorCodes = errorCodeMap{
|
|||
Description: "You must provide the Content-Length HTTP header.",
|
||||
HTTPStatusCode: http.StatusLengthRequired,
|
||||
},
|
||||
ErrMissingContentMD5: {
|
||||
ErrCode: ErrMissingContentMD5,
|
||||
Code: "MissingContentMD5",
|
||||
Description: "Missing required header for this request: Content-Md5.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingSecurityHeader: {
|
||||
ErrCode: ErrMissingSecurityHeader,
|
||||
Code: "MissingSecurityHeader",
|
||||
|
|
|
@ -99,7 +99,7 @@ func (h *handler) encodePrivateCannedACL(ctx context.Context, bktInfo *data.Buck
|
|||
ownerEncodedID := ownerDisplayName
|
||||
|
||||
if settings.OwnerKey == nil {
|
||||
h.reqLogger(ctx).Warn(logs.BucketOwnerKeyIsMissing, zap.String("owner", bktInfo.Owner.String()), logs.TagField(logs.TagDatapath))
|
||||
h.reqLogger(ctx).Warn(logs.BucketOwnerKeyIsMissing, zap.String("owner", bktInfo.Owner.String()))
|
||||
} else {
|
||||
ownerDisplayName = settings.OwnerKey.Address()
|
||||
ownerEncodedID = hex.EncodeToString(settings.OwnerKey.Bytes())
|
||||
|
@ -150,7 +150,7 @@ func (h *handler) putBucketACLAPEHandler(w http.ResponseWriter, r *http.Request,
|
|||
|
||||
defer func() {
|
||||
if errBody := r.Body.Close(); errBody != nil {
|
||||
h.reqLogger(ctx).Warn(logs.CouldNotCloseRequestBody, zap.Error(errBody), logs.TagField(logs.TagDatapath))
|
||||
h.reqLogger(ctx).Warn(logs.CouldNotCloseRequestBody, zap.Error(errBody))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -382,7 +382,7 @@ func (h *handler) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request)
|
|||
h.logAndSendError(ctx, w, "could not convert s3 policy to native chain policy", reqInfo, err)
|
||||
return
|
||||
} else {
|
||||
h.reqLogger(ctx).Warn(logs.PolicyCouldntBeConvertedToNativeRules, logs.TagField(logs.TagDatapath))
|
||||
h.reqLogger(ctx).Warn(logs.PolicyCouldntBeConvertedToNativeRules)
|
||||
}
|
||||
|
||||
chainsToSave := []*chain.Chain{s3Chain}
|
||||
|
|
|
@ -244,7 +244,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
h.reqLogger(ctx).Info(logs.ObjectIsCopied, zap.Stringer("object_id", dstObjInfo.ID), logs.TagField(logs.TagExternalStorage))
|
||||
h.reqLogger(ctx).Info(logs.ObjectIsCopied, zap.Stringer("object_id", dstObjInfo.ID))
|
||||
|
||||
if dstEncryptionParams.Enabled() {
|
||||
addSSECHeaders(w.Header(), r.Header)
|
||||
|
|
|
@ -108,13 +108,13 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
bktInfo, err := h.getBucketInfo(ctx, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.reqLogger(ctx).Warn(logs.GetBucketInfo, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
h.reqLogger(ctx).Warn(logs.GetBucketInfo, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo, h.cfg.NewXMLDecoder)
|
||||
if err != nil {
|
||||
h.reqLogger(ctx).Warn(logs.GetBucketCors, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
h.reqLogger(ctx).Warn(logs.GetBucketCors, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -131,6 +131,13 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
// Content-Md5 is required and should be set
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if _, ok := r.Header[api.ContentMD5]; !ok {
|
||||
h.logAndSendError(ctx, w, "missing Content-MD5", reqInfo, errors.GetAPIError(errors.ErrMissingContentMD5))
|
||||
return
|
||||
}
|
||||
|
||||
// Content-Length is required and should be non-zero
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if r.ContentLength <= 0 {
|
||||
|
|
|
@ -542,6 +542,7 @@ func deleteObjectsBase(hc *handlerContext, bktName string, objVersions [][2]stri
|
|||
}
|
||||
|
||||
w, r := prepareTestRequest(hc, bktName, "", req)
|
||||
r.Header.Set(api.ContentMD5, "")
|
||||
hc.Handler().DeleteMultipleObjectsHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
|
||||
|
|
|
@ -296,12 +296,12 @@ func parseConditionalHeaders(headers http.Header, log *zap.Logger) *conditionalA
|
|||
if httpTime, err := parseHTTPTime(headers.Get(api.IfModifiedSince)); err == nil {
|
||||
args.IfModifiedSince = httpTime
|
||||
} else {
|
||||
log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfModifiedSince, headers.Get(api.IfModifiedSince)), zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfModifiedSince, headers.Get(api.IfModifiedSince)), zap.Error(err))
|
||||
}
|
||||
if httpTime, err := parseHTTPTime(headers.Get(api.IfUnmodifiedSince)); err == nil {
|
||||
args.IfUnmodifiedSince = httpTime
|
||||
} else {
|
||||
log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfUnmodifiedSince, headers.Get(api.IfUnmodifiedSince)), zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfUnmodifiedSince, headers.Get(api.IfUnmodifiedSince)), zap.Error(err))
|
||||
}
|
||||
|
||||
return args
|
||||
|
|
|
@ -197,33 +197,6 @@ func TestGetObject(t *testing.T) {
|
|||
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, apierr.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
func TestGetDeletedObject(t *testing.T) {
|
||||
hc := prepareHandlerContextWithMinCache(t)
|
||||
bktName, objName := "bucket", "obj"
|
||||
bktInfo, objInfo := createVersionedBucketAndObject(hc.t, hc, bktName, objName)
|
||||
|
||||
putObject(hc, bktName, objName)
|
||||
|
||||
checkFound(hc.t, hc, bktName, objName, objInfo.VersionID())
|
||||
checkFound(hc.t, hc, bktName, objName, emptyVersion)
|
||||
|
||||
addr := getAddressOfLastVersion(hc, bktInfo, objName)
|
||||
t.Run("not found error", func(_ *testing.T) {
|
||||
hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{})
|
||||
hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectNotFound{})
|
||||
|
||||
getObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), apierr.ErrNoSuchVersion)
|
||||
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, apierr.ErrNoSuchKey)
|
||||
})
|
||||
t.Run("already removed error", func(_ *testing.T) {
|
||||
hc.tp.SetObjectError(addr, &apistatus.ObjectAlreadyRemoved{})
|
||||
hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectAlreadyRemoved{})
|
||||
|
||||
getObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), apierr.ErrNoSuchVersion)
|
||||
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, apierr.ErrNoSuchKey)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetObjectEnabledMD5(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName, objName := "bucket", "obj"
|
||||
|
|
|
@ -55,29 +55,34 @@ func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Reque
|
|||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
// Content-Md5 is required and should be set
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
|
||||
if _, ok := r.Header[api.ContentMD5]; !ok {
|
||||
h.logAndSendError(ctx, w, "missing Content-MD5", reqInfo, apierr.GetAPIError(apierr.ErrMissingContentMD5))
|
||||
return
|
||||
}
|
||||
|
||||
headerMD5, err := base64.StdEncoding.DecodeString(r.Header.Get(api.ContentMD5))
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "invalid Content-MD5", reqInfo, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||
return
|
||||
}
|
||||
|
||||
cfg := new(data.LifecycleConfiguration)
|
||||
if err := h.cfg.NewXMLDecoder(tee, r.UserAgent()).Decode(cfg); err != nil {
|
||||
if err = h.cfg.NewXMLDecoder(tee, r.UserAgent()).Decode(cfg); err != nil {
|
||||
h.logAndSendError(ctx, w, "could not decode body", reqInfo, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if _, ok := r.Header[api.ContentMD5]; ok {
|
||||
headerMD5, err := base64.StdEncoding.DecodeString(r.Header.Get(api.ContentMD5))
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "invalid Content-MD5", reqInfo, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||
return
|
||||
}
|
||||
bodyMD5, err := getContentMD5(&buf)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "could not get content md5", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
bodyMD5, err := getContentMD5(&buf)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "could not get content md5", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.Equal(headerMD5, bodyMD5) {
|
||||
h.logAndSendError(ctx, w, "Content-MD5 does not match", reqInfo, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(headerMD5, bodyMD5) {
|
||||
h.logAndSendError(ctx, w, "Content-MD5 does not match", reqInfo, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
|
|
|
@ -29,8 +29,6 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
|||
for _, tc := range []struct {
|
||||
name string
|
||||
body *data.LifecycleConfiguration
|
||||
headers map[string]string
|
||||
addMD5 bool
|
||||
errorCode apierr.ErrorCode
|
||||
}{
|
||||
{
|
||||
|
@ -72,22 +70,6 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
name: "correct Content-Md5 header",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
ID: "rule",
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
Days: ptr(21),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
addMD5: true,
|
||||
},
|
||||
{
|
||||
name: "too many rules",
|
||||
body: func() *data.LifecycleConfiguration {
|
||||
|
@ -425,44 +407,14 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
|||
},
|
||||
errorCode: apierr.ErrInvalidRequest,
|
||||
},
|
||||
{
|
||||
name: "invalid Content-Md5 header",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
Days: ptr(21),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
headers: map[string]string{api.ContentMD5: "invalid"},
|
||||
errorCode: apierr.ErrInvalidDigest,
|
||||
},
|
||||
{
|
||||
name: "Content-Md5 header does not match body md5 hash",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
Days: ptr(21),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
headers: map[string]string{api.ContentMD5: base64.StdEncoding.EncodeToString([]byte("some-hash"))},
|
||||
errorCode: apierr.ErrInvalidDigest,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.errorCode > 0 {
|
||||
putBucketLifecycleConfigurationErr(hc, bktName, tc.body, tc.headers, apierr.GetAPIError(tc.errorCode))
|
||||
putBucketLifecycleConfigurationErr(hc, bktName, tc.body, apierr.GetAPIError(tc.errorCode))
|
||||
return
|
||||
}
|
||||
|
||||
putBucketLifecycleConfiguration(hc, bktName, tc.body, tc.headers, tc.addMD5)
|
||||
putBucketLifecycleConfiguration(hc, bktName, tc.body)
|
||||
|
||||
cfg := getBucketLifecycleConfiguration(hc, bktName)
|
||||
require.Equal(t, tc.body.Rules, cfg.Rules)
|
||||
|
@ -496,13 +448,45 @@ func TestPutBucketLifecycleIDGeneration(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
putBucketLifecycleConfiguration(hc, bktName, lifecycle, nil, false)
|
||||
putBucketLifecycleConfiguration(hc, bktName, lifecycle)
|
||||
cfg := getBucketLifecycleConfiguration(hc, bktName)
|
||||
require.Len(t, cfg.Rules, 2)
|
||||
require.NotEmpty(t, cfg.Rules[0].ID)
|
||||
require.NotEmpty(t, cfg.Rules[1].ID)
|
||||
}
|
||||
|
||||
func TestPutBucketLifecycleInvalidMD5(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-lifecycle-md5"
|
||||
createBucket(hc, bktName)
|
||||
|
||||
lifecycle := &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
Days: ptr(21),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w, r := prepareTestRequest(hc, bktName, "", lifecycle)
|
||||
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrMissingContentMD5))
|
||||
|
||||
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
|
||||
r.Header.Set(api.ContentMD5, "")
|
||||
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||
|
||||
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
|
||||
r.Header.Set(api.ContentMD5, "some-hash")
|
||||
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||
}
|
||||
|
||||
func TestPutBucketLifecycleInvalidXML(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -521,32 +505,25 @@ func TestPutBucketLifecycleInvalidXML(t *testing.T) {
|
|||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrMalformedXML))
|
||||
}
|
||||
|
||||
func putBucketLifecycleConfiguration(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration, headers map[string]string, addMD5 bool) {
|
||||
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg, headers, addMD5)
|
||||
func putBucketLifecycleConfiguration(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration) {
|
||||
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func putBucketLifecycleConfigurationErr(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration, headers map[string]string, err apierr.Error) {
|
||||
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg, headers, false)
|
||||
func putBucketLifecycleConfigurationErr(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration, err apierr.Error) {
|
||||
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg)
|
||||
assertS3Error(hc.t, w, err)
|
||||
}
|
||||
|
||||
func putBucketLifecycleConfigurationBase(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration, headers map[string]string, addMD5 bool) *httptest.ResponseRecorder {
|
||||
func putBucketLifecycleConfigurationBase(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration) *httptest.ResponseRecorder {
|
||||
w, r := prepareTestRequest(hc, bktName, "", cfg)
|
||||
|
||||
for k, v := range headers {
|
||||
r.Header.Set(k, v)
|
||||
}
|
||||
|
||||
if addMD5 {
|
||||
rawBody, err := xml.Marshal(cfg)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
hash := md5.New()
|
||||
hash.Write(rawBody)
|
||||
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString(hash.Sum(nil)))
|
||||
}
|
||||
rawBody, err := xml.Marshal(cfg)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
hash := md5.New()
|
||||
hash.Write(rawBody)
|
||||
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString(hash.Sum(nil)))
|
||||
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||
return w
|
||||
}
|
||||
|
|
|
@ -152,6 +152,12 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
p.Header[api.ContentLanguage] = contentLanguage
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(p.Header, reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
if err = h.obj.CreateMultipartUpload(ctx, p); err != nil {
|
||||
h.logAndSendError(ctx, w, "could create multipart upload", reqInfo, err, additional...)
|
||||
return
|
||||
|
@ -223,12 +229,6 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
hash, err := h.obj.UploadPart(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "could not upload a part", reqInfo, err, additional...)
|
||||
|
@ -354,12 +354,6 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
info, err := h.obj.UploadPartCopy(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "could not upload part copy", reqInfo, err, additional...)
|
||||
|
@ -422,12 +416,6 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
Parts: reqBody.Parts,
|
||||
}
|
||||
|
||||
c.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
// Start complete multipart upload which may take some time to fetch object
|
||||
// and re-upload it part by part.
|
||||
objInfo, err := h.completeMultipartUpload(r, c, bktInfo)
|
||||
|
|
|
@ -81,26 +81,6 @@ func TestDeleteMultipartAllParts(t *testing.T) {
|
|||
require.Empty(t, hc.tp.Objects())
|
||||
}
|
||||
|
||||
func TestMultipartCopiesNumber(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket", "object"
|
||||
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
copies := []uint32{2, 0}
|
||||
|
||||
hc.config.copiesNumbers = map[string][]uint32{"default": copies}
|
||||
|
||||
multipartInfo := createMultipartUpload(hc, bktName, objName, nil)
|
||||
uploadPart(hc, bktName, objName, multipartInfo.UploadID, 1, layer.UploadMinSize)
|
||||
|
||||
objs := hc.tp.Objects()
|
||||
require.Len(t, objs, 1)
|
||||
|
||||
require.EqualValues(t, copies, hc.tp.CopiesNumbers(addrFromObject(objs[0]).EncodeToString()))
|
||||
}
|
||||
|
||||
func TestSpecialMultipartName(t *testing.T) {
|
||||
hc := prepareHandlerContextWithMinCache(t)
|
||||
|
||||
|
@ -812,14 +792,3 @@ func listPartsBase(hc *handlerContext, bktName, objName string, encrypted bool,
|
|||
|
||||
return listPartsResponse
|
||||
}
|
||||
|
||||
func addrFromObject(obj *object.Object) oid.Address {
|
||||
var addr oid.Address
|
||||
cnrID, _ := obj.ContainerID()
|
||||
objID, _ := obj.ID()
|
||||
|
||||
addr.SetContainer(cnrID)
|
||||
addr.SetObject(objID)
|
||||
|
||||
return addr
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ func parsePatchConditionalHeaders(headers http.Header, log *zap.Logger) *conditi
|
|||
if httpTime, err := parseHTTPTime(headers.Get(api.IfUnmodifiedSince)); err == nil {
|
||||
args.IfUnmodifiedSince = httpTime
|
||||
} else {
|
||||
log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfUnmodifiedSince, headers.Get(api.IfUnmodifiedSince)), zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfUnmodifiedSince, headers.Get(api.IfUnmodifiedSince)), zap.Error(err))
|
||||
}
|
||||
|
||||
return args
|
||||
|
|
|
@ -311,23 +311,10 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
type BodyReader interface {
|
||||
io.ReadCloser
|
||||
TrailerHeaders() map[string]string
|
||||
}
|
||||
|
||||
type noTrailerBodyReader struct {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *noTrailerBodyReader) TrailerHeaders() map[string]string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *handler) getBodyReader(r *http.Request) (BodyReader, error) {
|
||||
func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) {
|
||||
shaType, streaming := api.IsSignedStreamingV4(r)
|
||||
if !streaming {
|
||||
return &noTrailerBodyReader{r.Body}, nil
|
||||
return r.Body, nil
|
||||
}
|
||||
|
||||
encodings := r.Header.Values(api.ContentEncoding)
|
||||
|
@ -363,15 +350,12 @@ func (h *handler) getBodyReader(r *http.Request) (BodyReader, error) {
|
|||
|
||||
var (
|
||||
err error
|
||||
chunkReader BodyReader
|
||||
chunkReader io.ReadCloser
|
||||
)
|
||||
switch shaType {
|
||||
case api.StreamingContentSHA256, api.StreamingContentSHA256Trailer:
|
||||
chunkReader, err = newSignV4ChunkedReader(r)
|
||||
case api.StreamingContentV4aSHA256, api.StreamingContentV4aSHA256Trailer:
|
||||
if shaType == api.StreamingContentV4aSHA256 {
|
||||
chunkReader, err = newSignV4aChunkedReader(r)
|
||||
default:
|
||||
chunkReader, err = newUnsignedChunkedReader(r.Body)
|
||||
} else {
|
||||
chunkReader, err = newSignV4ChunkedReader(r)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -466,7 +450,7 @@ func (h *handler) isTLSCheckRequired(r *http.Request) bool {
|
|||
|
||||
tlsTermination, err := strconv.ParseBool(tlsTerminationStr)
|
||||
if err != nil {
|
||||
h.reqLogger(r.Context()).Warn(logs.WarnInvalidTypeTLSTerminationHeader, zap.String("header", tlsTerminationStr), zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
h.reqLogger(r.Context()).Warn(logs.WarnInvalidTypeTLSTerminationHeader, zap.String("header", tlsTerminationStr), zap.Error(err))
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -828,7 +812,7 @@ func (h *handler) createBucketHandlerPolicy(w http.ResponseWriter, r *http.Reque
|
|||
h.logAndSendError(ctx, w, "could not create bucket", reqInfo, err)
|
||||
return
|
||||
}
|
||||
h.reqLogger(ctx).Info(logs.BucketIsCreated, zap.Stringer("container_id", bktInfo.CID), logs.TagField(logs.TagExternalStorage))
|
||||
h.reqLogger(ctx).Info(logs.BucketIsCreated, zap.Stringer("container_id", bktInfo.CID))
|
||||
|
||||
chains := bucketCannedACLToAPERules(cannedACL, reqInfo, bktInfo.CID)
|
||||
if err = h.ape.SaveACLChains(bktInfo.CID.EncodeToString(), chains); err != nil {
|
||||
|
@ -1041,7 +1025,7 @@ func isLockEnabled(log *zap.Logger, header http.Header) bool {
|
|||
|
||||
lockEnabled, err := strconv.ParseBool(lockEnabledStr)
|
||||
if err != nil {
|
||||
log.Warn(logs.InvalidBucketObjectLockEnabledHeader, zap.String("header", lockEnabledStr), zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
log.Warn(logs.InvalidBucketObjectLockEnabledHeader, zap.String("header", lockEnabledStr), zap.Error(err))
|
||||
}
|
||||
|
||||
return lockEnabled
|
||||
|
|
|
@ -377,77 +377,6 @@ func TestPutObjectCheckContentSHA256(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamUnsignedBodySmall(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "test2", "tmp.txt"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
w, req, chunk := getChunkedRequestUnsignedTrailingSmall(hc.context, t, bktName, objName)
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, req = prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().HeadObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
require.Equal(t, "5", w.Header().Get(api.ContentLength))
|
||||
|
||||
data := getObjectRange(t, hc, bktName, objName, 0, 5)
|
||||
for i := range chunk {
|
||||
require.Equal(t, chunk[i], data[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamUnsignedBody(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "examplebucket", "chunkObject.txt"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
w, req, chunk := getChunkedRequestUnsignedTrailing(hc.context, t, bktName, objName)
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, req = prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().HeadObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
require.Equal(t, strconv.Itoa(awsChunkedRequestExampleDecodedContentLength), w.Header().Get(api.ContentLength))
|
||||
|
||||
data := getObjectRange(t, hc, bktName, objName, 0, awsChunkedRequestExampleDecodedContentLength)
|
||||
for i := range chunk {
|
||||
require.Equal(t, chunk[i], data[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamBodyAWSExampleTrailing(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "examplebucket", "chunkObject.txt"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
t.Run("valid trailer signature", func(t *testing.T) {
|
||||
w, req, chunk := getChunkedRequestTrailing(hc.context, t, bktName, objName)
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, req = prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().HeadObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
require.Equal(t, strconv.Itoa(awsChunkedRequestExampleDecodedContentLength), w.Header().Get(api.ContentLength))
|
||||
|
||||
data := getObjectRange(t, hc, bktName, objName, 0, awsChunkedRequestExampleDecodedContentLength)
|
||||
equalDataSlices(t, chunk, data)
|
||||
})
|
||||
|
||||
t.Run("invalid trailer signature", func(t *testing.T) {
|
||||
w, req, _ := getChunkedRequestTrailing(hc.context, t, bktName, objName)
|
||||
body := req.Body.(*customNopCloser)
|
||||
body.Bytes()[body.Len()-2] = 'a'
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusForbidden)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamBodyAWSExample(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -547,9 +476,9 @@ func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName strin
|
|||
|
||||
req, err := http.NewRequest("PUT", "https://s3.amazonaws.com/"+bktName+"/"+objName, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("content-encoding", api.AwsChunked)
|
||||
req.Header.Set("content-encoding", "aws-chunked")
|
||||
req.Header.Set("content-length", strconv.Itoa(awsChunkedRequestExampleContentLength))
|
||||
req.Header.Set("x-amz-content-sha256", api.StreamingContentSHA256)
|
||||
req.Header.Set("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
|
||||
req.Header.Set("x-amz-decoded-content-length", strconv.Itoa(awsChunkedRequestExampleDecodedContentLength))
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
|
||||
|
@ -581,202 +510,6 @@ func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName strin
|
|||
return w, req, chunk
|
||||
}
|
||||
|
||||
type customNopCloser struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (c *customNopCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getChunkedRequestTrailing implements request example from
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming-trailers.html
|
||||
func getChunkedRequestTrailing(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) {
|
||||
chunk := make([]byte, 65*1024)
|
||||
for i := range chunk {
|
||||
chunk[i] = 'a'
|
||||
}
|
||||
chunk1 := chunk[:64*1024]
|
||||
chunk2 := chunk[64*1024:]
|
||||
|
||||
AWSAccessKeyID := "AKIAIOSFODNN7EXAMPLE"
|
||||
AWSSecretAccessKey := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||
|
||||
awsCreds := aws.Credentials{AccessKeyID: AWSAccessKeyID, SecretAccessKey: AWSSecretAccessKey}
|
||||
signer := v4.NewSigner()
|
||||
|
||||
reqBody := bytes.NewBufferString("10000;chunk-signature=b474d8862b1487a5145d686f57f013e54db672cee1c953b3010fb58501ef5aa2\r\n")
|
||||
_, err := reqBody.Write(chunk1)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n400;chunk-signature=1c1344b170168f8e65b41376b44b20fe354e373826ccbbe2c1d40a8cae51e5c7\r\n")
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.Write(chunk2)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0;chunk-signature=2ca2aba2005185cf7159c6277faf83795951dd77a3a99e6e65d5c9f85863f992\r\n")
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("x-amz-checksum-crc32c:sOO8/Q==\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
// original signature is 63bddb248ad2590c92712055f51b8e78ab024eead08276b24f010b0efd74843f,
|
||||
// but we use d81f82fc3505edab99d459891051a732e8730629a2e4a59689829ca17fe2e435
|
||||
// because original signature is incorrect
|
||||
// it was calculated using the`AWS4-HMAC-SHA256-PAYLOAD` constant in canonical string instead of
|
||||
// `AWS4-HMAC-SHA256-TRAILER` that actually must be used by spec
|
||||
// (java sdk use correct `AWS4-HMAC-SHA256-TRAILER` string).
|
||||
_, err = reqBody.WriteString("x-amz-trailer-signature:d81f82fc3505edab99d459891051a732e8730629a2e4a59689829ca17fe2e435")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("PUT", "https://s3.amazonaws.com/"+bktName+"/"+objName, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("content-encoding", api.AwsChunked)
|
||||
req.Header.Set("content-length", strconv.Itoa(awsChunkedRequestExampleContentLength))
|
||||
req.Header.Set("x-amz-content-sha256", api.StreamingContentSHA256Trailer)
|
||||
req.Header.Set("x-amz-decoded-content-length", strconv.Itoa(awsChunkedRequestExampleDecodedContentLength))
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc32c")
|
||||
|
||||
signTime, err := time.Parse("20060102T150405Z", "20130524T000000Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = signer.SignHTTP(ctx, awsCreds, req, api.StreamingContentSHA256Trailer, "s3", "us-east-1", signTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Body = &customNopCloser{Buffer: reqBody}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
|
||||
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
req = req.WithContext(middleware.SetBox(req.Context(), &middleware.Box{
|
||||
ClientTime: signTime,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: AWSAccessKeyID,
|
||||
SignatureV4: "106e2a8a18243abcf37539882f36619c00e2dfc72633413f02d3b74544bfeb8e",
|
||||
Region: "us-east-1",
|
||||
},
|
||||
AccessBox: &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
SecretKey: AWSSecretAccessKey,
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
return w, req, chunk
|
||||
}
|
||||
|
||||
func getChunkedRequestUnsignedTrailing(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) {
|
||||
chunk := make([]byte, 65*1024)
|
||||
for i := range chunk {
|
||||
chunk[i] = 'a'
|
||||
}
|
||||
//chunk1 := chunk[:64*1024]
|
||||
//chunk2 := chunk[64*1024:]
|
||||
|
||||
AWSAccessKeyID := "9uEm8zMrGWsEDWiPCnVuQLKTiGtCEXpYXt8eBG7agupw0JDySJZMFuej7PTcPzRqBUyPtFowNu1RtvHULU8XHjie6"
|
||||
AWSSecretAccessKey := "9f546428957ed7e189b7be928906ce7d1d9cb3042dd4d2d5194e28ce8c4c3b8e"
|
||||
|
||||
awsCreds := aws.Credentials{AccessKeyID: AWSAccessKeyID, SecretAccessKey: AWSSecretAccessKey}
|
||||
signer := v4.NewSigner()
|
||||
|
||||
reqBody := bytes.NewBufferString("10400\r\n")
|
||||
_, err := reqBody.Write(chunk)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0\r\n")
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\nx-amz-checksum-crc64nvme:pRf+emrnL+A=\r\n\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("PUT", "https://localhost:8184/"+bktName+"/"+objName, nil)
|
||||
//req, err := http.NewRequest("PUT", "https://localhost:8184/test2/body", nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("x-amz-sdk-checksum-algorithm", "CRC64NVME")
|
||||
req.Header.Set("content-encoding", api.AwsChunked)
|
||||
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc64nvme")
|
||||
req.Header.Set("x-amz-content-sha256", api.StreamingUnsignedPayloadTrailer)
|
||||
req.Header.Set("x-amz-decoded-content-length", strconv.Itoa(awsChunkedRequestExampleDecodedContentLength))
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
|
||||
signTime, err := time.Parse("20060102T150405Z", "20250131T140527Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = signer.SignHTTP(ctx, awsCreds, req, api.StreamingContentSHA256Trailer, "s3", "ru", signTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Body = io.NopCloser(reqBody)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
|
||||
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
req = req.WithContext(middleware.SetBox(req.Context(), &middleware.Box{
|
||||
ClientTime: signTime,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: AWSAccessKeyID,
|
||||
SignatureV4: "a075c83779d1c3c02254fbe4c9eff0a21556d15556fc6a25db69147c4838226b",
|
||||
Region: "ru",
|
||||
},
|
||||
AccessBox: &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
SecretKey: AWSSecretAccessKey,
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
return w, req, chunk
|
||||
}
|
||||
|
||||
func getChunkedRequestUnsignedTrailingSmall(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) {
|
||||
AWSAccessKeyID := "9uEm8zMrGWsEDWiPCnVuQLKTiGtCEXpYXt8eBG7agupw0JDySJZMFuej7PTcPzRqBUyPtFowNu1RtvHULU8XHjie6"
|
||||
AWSSecretAccessKey := "9f546428957ed7e189b7be928906ce7d1d9cb3042dd4d2d5194e28ce8c4c3b8e"
|
||||
|
||||
awsCreds := aws.Credentials{AccessKeyID: AWSAccessKeyID, SecretAccessKey: AWSSecretAccessKey}
|
||||
signer := v4.NewSigner()
|
||||
|
||||
chunk := "tmp2\n"
|
||||
|
||||
reqBody := bytes.NewBufferString("5\r\n")
|
||||
_, err := reqBody.WriteString(chunk)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0\r\n")
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("x-amz-checksum-crc64nvme:q1EYl4rI0TU=\r\n\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("PUT", "https://localhost:8184/"+bktName+"/"+objName, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("x-amz-sdk-checksum-algorithm", "CRC64NVME")
|
||||
req.Header.Set("content-encoding", api.AwsChunked)
|
||||
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc64nvme")
|
||||
req.Header.Set("x-amz-content-sha256", api.StreamingUnsignedPayloadTrailer)
|
||||
req.Header.Set("x-amz-decoded-content-length", "5")
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
|
||||
signTime, err := time.Parse("20060102T150405Z", "20250203T063745Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = signer.SignHTTP(ctx, awsCreds, req, api.StreamingContentSHA256Trailer, "s3", "ru", signTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Body = io.NopCloser(reqBody)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
|
||||
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
req = req.WithContext(middleware.SetBox(req.Context(), &middleware.Box{
|
||||
ClientTime: signTime,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: AWSAccessKeyID,
|
||||
SignatureV4: "a075c83779d1c3c02254fbe4c9eff0a21556d15556fc6a25db69147c4838226b",
|
||||
Region: "ru",
|
||||
},
|
||||
AccessBox: &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
SecretKey: AWSSecretAccessKey,
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
return w, req, []byte(chunk)
|
||||
}
|
||||
|
||||
func getEmptyChunkedRequest(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request) {
|
||||
AWSAccessKeyID := "48c1K4PLVb7SvmV3PjDKEuXaMh8yZMXZ8Wx9msrkKcYw06dZeaxeiPe8vyFm2WsoeVaNt7UWEjNsVkagDs8oX4XXh"
|
||||
AWSSecretAccessKey := "09260955b4eb0279dc017ba20a1ddac909cbd226c86cbb2d868e55534c8e64b0"
|
||||
|
|
|
@ -8,8 +8,6 @@ import (
|
|||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/v4"
|
||||
|
@ -29,19 +27,16 @@ type (
|
|||
reader *bufio.Reader
|
||||
streamSigner *v4.StreamSigner
|
||||
|
||||
trailerHeaders []string
|
||||
trailers map[string]string
|
||||
requestTime time.Time
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
requestTime time.Time
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
errGiantChunk = errors.New("chunk too big: choose chunk size <= 16MiB")
|
||||
errMalformedChunkedEncoding = errors.New("malformed chunked encoding")
|
||||
errMalformedTrailerHeaders = errors.New("malformed trailer headers")
|
||||
)
|
||||
|
||||
func (c *s3ChunkReader) Close() (err error) {
|
||||
|
@ -112,9 +107,29 @@ func (c *s3ChunkReader) Read(buf []byte) (num int, err error) {
|
|||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
b, err := c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\r' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if cap(c.buffer) < size {
|
||||
|
@ -132,6 +147,23 @@ func (c *s3ChunkReader) Read(buf []byte) (num int, err error) {
|
|||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if b != '\r' || err != nil {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
// Once we have read the entire chunk successfully, we verify
|
||||
// that the received signature matches our computed signature.
|
||||
|
@ -149,99 +181,16 @@ func (c *s3ChunkReader) Read(buf []byte) (num int, err error) {
|
|||
// If the chunk size is zero we return io.EOF. As specified by AWS,
|
||||
// only the last chunk is zero-sized.
|
||||
if size == 0 {
|
||||
if len(c.trailerHeaders) != 0 {
|
||||
if err = c.readTrailers(); err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
} else if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
}
|
||||
|
||||
c.err = io.EOF
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
}
|
||||
|
||||
c.offset = copy(buf, c.buffer)
|
||||
num += c.offset
|
||||
return num, err
|
||||
}
|
||||
|
||||
func (c *s3ChunkReader) readCRLF() error {
|
||||
for _, ch := range [2]byte{'\r', '\n'} {
|
||||
b, err := c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return c.err
|
||||
}
|
||||
if b != ch {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return c.err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3ChunkReader) readTrailers() error {
|
||||
var k, v []byte
|
||||
var err error
|
||||
for err == nil {
|
||||
k, err = c.reader.ReadBytes(':')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
v, err = c.reader.ReadBytes('\n')
|
||||
if err != nil && err != io.EOF {
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
if len(v) >= 2 && v[len(v)-2] == '\r' {
|
||||
v[len(v)-2] = '\n'
|
||||
v = v[:len(v)-1]
|
||||
}
|
||||
|
||||
switch {
|
||||
case slices.Contains(c.trailerHeaders, string(k[:len(k)-1])):
|
||||
c.buffer = append(append(c.buffer, k...), v...) // todo use copy
|
||||
case string(k) == "x-amz-trailer-signature:":
|
||||
calculatedSignature, err := c.streamSigner.GetTrailerSignature(c.buffer, c.requestTime)
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return c.err
|
||||
}
|
||||
if string(v[:64]) != hex.EncodeToString(calculatedSignature) {
|
||||
c.err = errs.GetAPIError(errs.ErrSignatureDoesNotMatch)
|
||||
return c.err
|
||||
}
|
||||
default:
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
|
||||
c.trailers[string(k[:len(k)-1])] = string(v[:len(v)-1])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3ChunkReader) TrailerHeaders() map[string]string {
|
||||
return c.trailers
|
||||
}
|
||||
|
||||
func newSignV4ChunkedReader(req *http.Request) (*s3ChunkReader, error) {
|
||||
func newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, error) {
|
||||
ctx := req.Context()
|
||||
box, err := middleware.GetBoxData(ctx)
|
||||
if err != nil {
|
||||
|
@ -265,19 +214,11 @@ func newSignV4ChunkedReader(req *http.Request) (*s3ChunkReader, error) {
|
|||
}
|
||||
newStreamSigner := v4.NewStreamSigner(currentCredentials, "s3", authHeaders.Region, seed)
|
||||
|
||||
var trailerHeaders []string
|
||||
trailer := req.Header.Get("x-amz-trailer")
|
||||
if trailer != "" {
|
||||
trailerHeaders = strings.Split(trailer, ";")
|
||||
}
|
||||
|
||||
return &s3ChunkReader{
|
||||
ctx: ctx,
|
||||
reader: bufio.NewReader(req.Body),
|
||||
streamSigner: newStreamSigner,
|
||||
requestTime: reqTime,
|
||||
buffer: make([]byte, 64*1024),
|
||||
trailerHeaders: trailerHeaders,
|
||||
trailers: make(map[string]string, len(trailerHeaders)),
|
||||
ctx: ctx,
|
||||
reader: bufio.NewReader(req.Body),
|
||||
streamSigner: newStreamSigner,
|
||||
requestTime: reqTime,
|
||||
buffer: make([]byte, 64*1024),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -2,12 +2,8 @@ package handler
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -16,102 +12,22 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSigV4AChunkedReader(t *testing.T) {
|
||||
t.Run("with trailers", func(t *testing.T) {
|
||||
accessKeyID := "9uEm8zMrGWsEDWiPCnVuQLKTiGtCEXpYXt8eBG7agupw0JDySJZMFuej7PTcPzRqBUyPtFowNu1RtvHULU8XHjie6"
|
||||
secretKey := "9f546428957ed7e189b7be928906ce7d1d9cb3042dd4d2d5194e28ce8c4c3b8e"
|
||||
func TestSigV4AStreaming(t *testing.T) {
|
||||
accessKeyID := "2XEbqH4M3ym7a3E3esxfZ2gRLnMwDXrCN4y1SkQg5fHa09sThVmVL3EE6xeKsyMzaqu5jPi41YCaVbnwbwCTF3bx1"
|
||||
secretKey := "00637f53f842573aaa06c2164c598973cd986880987111416cf71f1619def537"
|
||||
|
||||
chunk1 := "Testing with the {sdk-java}"
|
||||
body := "1b;chunk-signature=3045022100956ca03d2166100b455b532de542892f73925fbcea2f6498674a39a61bb4860902202977c1d47aea548d434540f89640ce97e605d18353cbbd75a619874f02e3dd22**\r\n" +
|
||||
chunk1 +
|
||||
"\r\n0;chunk-signature=304502210097dcc1721675469910ef8712fc2af0678eb90c12216dd6228c6b621fb6f805a0022047d27d21ae2af8a8172f2ef83c81ce9d4746aa88fc9ee0ca783eaa5e71aaef6c**\r\n" +
|
||||
"x-amz-checksum-crc32:Np6zMg==\r\n" +
|
||||
"x-amz-trailer-signature:304502200ecacd9aa2c432af5a2327c22a2ff9b32f44ab8559de00309219aef105eaaac102210092cbc0e78c4bcd56490a73da8ceed1934be80f3affeffb14d8c743fc292dda4f**\r\n\r\n"
|
||||
chunk1 := "Testing with the {sdk-java}"
|
||||
reqBody := bytes.NewBufferString("1b;chunk-signature=3045022100b63692a1b20759bdabd342011823427a8952df75c93174d98ad043abca8052e002201695228a91ba986171b8d0ad20856d3d94ca3614d0a90a50a531ba8e52447b9b**\r\n")
|
||||
_, err := reqBody.WriteString(chunk1)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0;chunk-signature=30440220455885a2d4e9f705256ca6b0a5a22f7f784780ccbd1c0a371e5db3059c91745b022073259dd44746cbd63261d628a04d25be5a32a974c077c5c2d83c8157fb323b9f****\r\n\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
reqBody := bytes.NewBufferString(body)
|
||||
req, err := http.NewRequest("PUT", "https://localhost:8184/test2/tmp", reqBody)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc32")
|
||||
req, err := http.NewRequest("PUT", "http://localhost:8084/test/tmp", reqBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
signature := "3045022100ddbc6ab11785d7f23d299de7db97379116f543377a44e38170a4e43b38b0d62b02201d8dca13c67f04f45491345152db4b704768eb8bb89b5215fd59bb4a4d9d7b61"
|
||||
signingTime, err := time.Parse("20060102T150405Z", "20250203T144621Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
accessBox, err := newTestAccessBox(key)
|
||||
require.NoError(t, err)
|
||||
accessBox.Gate.SecretKey = secretKey
|
||||
|
||||
ctx := middleware.SetBox(req.Context(), &middleware.Box{
|
||||
AccessBox: accessBox,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: accessKeyID,
|
||||
SignatureV4: signature,
|
||||
},
|
||||
ClientTime: signingTime,
|
||||
})
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
r, err := newSignV4aChunkedReader(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, chunk1, string(data))
|
||||
})
|
||||
|
||||
t.Run("without trailers", func(t *testing.T) {
|
||||
accessKeyID := "2XEbqH4M3ym7a3E3esxfZ2gRLnMwDXrCN4y1SkQg5fHa09sThVmVL3EE6xeKsyMzaqu5jPi41YCaVbnwbwCTF3bx1"
|
||||
secretKey := "00637f53f842573aaa06c2164c598973cd986880987111416cf71f1619def537"
|
||||
|
||||
chunk1 := "Testing with the {sdk-java}"
|
||||
reqBody := bytes.NewBufferString("1b;chunk-signature=3045022100b63692a1b20759bdabd342011823427a8952df75c93174d98ad043abca8052e002201695228a91ba986171b8d0ad20856d3d94ca3614d0a90a50a531ba8e52447b9b**\r\n")
|
||||
_, err := reqBody.WriteString(chunk1)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0;chunk-signature=30440220455885a2d4e9f705256ca6b0a5a22f7f784780ccbd1c0a371e5db3059c91745b022073259dd44746cbd63261d628a04d25be5a32a974c077c5c2d83c8157fb323b9f****\r\n\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("PUT", "http://localhost:8084/test/tmp", reqBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
signature := "30440220574244c5ff5deba388c4e3b0541a42113179b6839b3e6b4212d255a118fa9089022056f7b9b72c93f67dbcd25fe9ca67950b5913fc00bb7a62bc276c21e828c0b6c7"
|
||||
signingTime, err := time.Parse("20060102T150405Z", "20240904T133253Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
accessBox, err := newTestAccessBox(key)
|
||||
require.NoError(t, err)
|
||||
accessBox.Gate.SecretKey = secretKey
|
||||
|
||||
ctx := middleware.SetBox(req.Context(), &middleware.Box{
|
||||
AccessBox: accessBox,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: accessKeyID,
|
||||
SignatureV4: signature,
|
||||
},
|
||||
ClientTime: signingTime,
|
||||
})
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
r, err := newSignV4aChunkedReader(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, chunk1, string(data))
|
||||
})
|
||||
}
|
||||
|
||||
func TestSigV4ChunkedReader(t *testing.T) {
|
||||
accessKeyID := "9uEm8zMrGWsEDWiPCnVuQLKTiGtCEXpYXt8eBG7agupw0JDySJZMFuej7PTcPzRqBUyPtFowNu1RtvHULU8XHjie6"
|
||||
secretKey := "9f546428957ed7e189b7be928906ce7d1d9cb3042dd4d2d5194e28ce8c4c3b8e"
|
||||
|
||||
signature := "b740b3b2a08c541c3fc4bd155a448e25408b509a29af98a86356b894930b93e8"
|
||||
signingTime, err := time.Parse("20060102T150405Z", "20250203T134442Z")
|
||||
signature := "30440220574244c5ff5deba388c4e3b0541a42113179b6839b3e6b4212d255a118fa9089022056f7b9b72c93f67dbcd25fe9ca67950b5913fc00bb7a62bc276c21e828c0b6c7"
|
||||
signingTime, err := time.Parse("20060102T150405Z", "20240904T133253Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
key, err := keys.NewPrivateKey()
|
||||
|
@ -121,117 +37,21 @@ func TestSigV4ChunkedReader(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
accessBox.Gate.SecretKey = secretKey
|
||||
|
||||
setBoxFn := func(ctx context.Context) context.Context {
|
||||
return middleware.SetBox(ctx, &middleware.Box{
|
||||
AccessBox: accessBox,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: accessKeyID,
|
||||
SignatureV4: signature,
|
||||
Region: "us-east-1",
|
||||
},
|
||||
ClientTime: signingTime,
|
||||
})
|
||||
}
|
||||
|
||||
chunk1 := "Testing with the {sdk-java}"
|
||||
|
||||
t.Run("with trailers", func(t *testing.T) {
|
||||
body := "1b;chunk-signature=a6a9be5fff05db0b542aedb2203d892b4162250885d06b1422b173ee0ea92ba5\r\n" +
|
||||
chunk1 +
|
||||
"\r\n0;chunk-signature=31afd083a57c416c46afaf101649d7f0c6c0627cfa60c0f93d1f7ea84396ee42\r\n" +
|
||||
"x-amz-checksum-crc32:Np6zMg==\r\n" +
|
||||
"x-amz-trailer-signature:40ec0046ac730fa27a1451d00d849056c49553ee753f5d158306d05671a42125\r\n\r\n"
|
||||
|
||||
reqBody := bytes.NewBufferString(body)
|
||||
req, err := http.NewRequest("PUT", "https://localhost:8184/test2/tmp", reqBody)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc32")
|
||||
|
||||
req = req.WithContext(setBoxFn(req.Context()))
|
||||
|
||||
r, err := newSignV4ChunkedReader(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, chunk1, string(data))
|
||||
ctx := middleware.SetBox(req.Context(), &middleware.Box{
|
||||
AccessBox: accessBox,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: accessKeyID,
|
||||
SignatureV4: signature,
|
||||
},
|
||||
ClientTime: signingTime,
|
||||
})
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
t.Run("without trailers", func(t *testing.T) {
|
||||
body := "1b;chunk-signature=a6a9be5fff05db0b542aedb2203d892b4162250885d06b1422b173ee0ea92ba5\r\n" +
|
||||
chunk1 +
|
||||
"\r\n0;chunk-signature=31afd083a57c416c46afaf101649d7f0c6c0627cfa60c0f93d1f7ea84396ee42\r\n\r\n"
|
||||
reqBody := bytes.NewBufferString(body)
|
||||
req, err := http.NewRequest("PUT", "https://localhost:8184/test2/tmp", reqBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
req = req.WithContext(setBoxFn(req.Context()))
|
||||
|
||||
r, err := newSignV4ChunkedReader(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, chunk1, string(data))
|
||||
})
|
||||
}
|
||||
|
||||
func TestUnsignedChunkReader(t *testing.T) {
|
||||
chunk1 := "chunk1"
|
||||
chunk2 := "chunk2"
|
||||
|
||||
t.Run("with trailer", func(t *testing.T) {
|
||||
chunks := []string{chunk1, chunk2}
|
||||
trailer := map[string]string{"x-amz-checksum-crc64nvme": "q1EYl4rI0TU="}
|
||||
body, expected := getChunkedBody(t, chunks, trailer)
|
||||
|
||||
r, err := newUnsignedChunkedReader(body)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, string(data))
|
||||
|
||||
require.EqualValues(t, trailer, r.TrailerHeaders())
|
||||
})
|
||||
|
||||
t.Run("without trailer", func(t *testing.T) {
|
||||
chunks := []string{chunk1, chunk2}
|
||||
body, expected := getChunkedBody(t, chunks, nil)
|
||||
|
||||
r, err := newUnsignedChunkedReader(body)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, string(data))
|
||||
})
|
||||
}
|
||||
|
||||
func getChunkedBody(t *testing.T, chunks []string, trailers map[string]string) (*bytes.Buffer, string) {
|
||||
res := bytes.NewBufferString("")
|
||||
|
||||
for i, chunk := range chunks {
|
||||
meta := strconv.FormatInt(int64(len(chunk)), 16) + "\r\n"
|
||||
if i != 0 {
|
||||
meta = "\r\n" + meta
|
||||
}
|
||||
_, err := res.WriteString(meta)
|
||||
require.NoError(t, err)
|
||||
_, err = res.WriteString(chunk)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
_, err := res.WriteString("\r\n0\r\n")
|
||||
r, err := newSignV4aChunkedReader(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
for k, v := range trailers {
|
||||
_, err := res.WriteString(fmt.Sprintf("%s:%s\n", k, v))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
_, err = res.WriteString("\r\n")
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
|
||||
return res, strings.Join(chunks, "")
|
||||
require.Equal(t, chunk1, string(data))
|
||||
}
|
||||
|
|
|
@ -1,161 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
)
|
||||
|
||||
type (
|
||||
s3UnsignedChunkReader struct {
|
||||
reader *bufio.Reader
|
||||
|
||||
trailers map[string]string
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
func (c *s3UnsignedChunkReader) Close() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3UnsignedChunkReader) Read(buf []byte) (num int, err error) {
|
||||
if c.offset > 0 {
|
||||
num = copy(buf, c.buffer[c.offset:])
|
||||
if num == len(buf) {
|
||||
c.offset += num
|
||||
return num, nil
|
||||
}
|
||||
c.offset = 0
|
||||
buf = buf[num:]
|
||||
}
|
||||
|
||||
var size int
|
||||
var b byte
|
||||
for {
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b == '\r' {
|
||||
break
|
||||
}
|
||||
|
||||
// Manually deserialize the size since AWS specified
|
||||
// the chunk size to be of variable width. In particular,
|
||||
// a size of 16 is encoded as `10` while a size of 64 KB
|
||||
// is `10000`.
|
||||
switch {
|
||||
case b >= '0' && b <= '9':
|
||||
size = size<<4 | int(b-'0')
|
||||
case b >= 'a' && b <= 'f':
|
||||
size = size<<4 | int(b-('a'-10))
|
||||
case b >= 'A' && b <= 'F':
|
||||
size = size<<4 | int(b-('A'-10))
|
||||
default:
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
if size > maxChunkSize {
|
||||
c.err = errGiantChunk
|
||||
return num, c.err
|
||||
}
|
||||
}
|
||||
|
||||
if b != '\r' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if cap(c.buffer) < size {
|
||||
c.buffer = make([]byte, size)
|
||||
} else {
|
||||
c.buffer = c.buffer[:size]
|
||||
}
|
||||
|
||||
// Now, we read the payload and compute its SHA-256 hash.
|
||||
_, err = io.ReadFull(c.reader, c.buffer)
|
||||
if err == io.EOF && size != 0 {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
// If the chunk size is zero we return io.EOF. As specified by AWS,
|
||||
// only the last chunk is zero-sized.
|
||||
if size == 0 {
|
||||
var k, v string
|
||||
for err == nil {
|
||||
k, err = c.reader.ReadString(':')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return num, c.err
|
||||
}
|
||||
v, err = c.reader.ReadString('\n')
|
||||
if err != nil {
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return num, c.err
|
||||
}
|
||||
c.trailers[k[:len(k)-1]] = v[:len(v)-1]
|
||||
}
|
||||
|
||||
c.err = io.EOF
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
b, err = c.reader.ReadByte()
|
||||
if b != '\r' || err != nil {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
c.offset = copy(buf, c.buffer)
|
||||
num += c.offset
|
||||
return num, err
|
||||
}
|
||||
|
||||
func (c *s3UnsignedChunkReader) TrailerHeaders() map[string]string {
|
||||
return c.trailers
|
||||
}
|
||||
|
||||
func newUnsignedChunkedReader(body io.Reader) (*s3UnsignedChunkReader, error) {
|
||||
return &s3UnsignedChunkReader{
|
||||
reader: bufio.NewReader(body),
|
||||
trailers: map[string]string{},
|
||||
buffer: make([]byte, 64*1024),
|
||||
}, nil
|
||||
}
|
|
@ -7,8 +7,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
|
||||
|
@ -22,12 +20,10 @@ type (
|
|||
reader *bufio.Reader
|
||||
streamSigner *v4a.StreamSigner
|
||||
|
||||
trailerHeaders []string
|
||||
trailers map[string]string
|
||||
requestTime time.Time
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
requestTime time.Time
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -91,9 +87,21 @@ func (c *s3v4aChunkReader) Read(buf []byte) (num int, err error) {
|
|||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
b, err := c.reader.ReadByte()
|
||||
if err != nil {
|
||||
return c.handleErr(num, err)
|
||||
}
|
||||
if b != '\r' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err != nil {
|
||||
return c.handleErr(num, err)
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if cap(c.buffer) < size {
|
||||
|
@ -111,6 +119,19 @@ func (c *s3v4aChunkReader) Read(buf []byte) (num int, err error) {
|
|||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if b != '\r' || err != nil {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err != nil {
|
||||
return c.handleErr(num, err)
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
// Once we have read the entire chunk successfully, we verify
|
||||
// that the received signature is valid.
|
||||
|
@ -129,23 +150,10 @@ func (c *s3v4aChunkReader) Read(buf []byte) (num int, err error) {
|
|||
// If the chunk size is zero we return io.EOF. As specified by AWS,
|
||||
// only the last chunk is zero-sized.
|
||||
if size == 0 {
|
||||
if len(c.trailerHeaders) != 0 {
|
||||
if err = c.readTrailers(); err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
} else if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
}
|
||||
|
||||
c.err = io.EOF
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
}
|
||||
|
||||
c.offset = copy(buf, c.buffer)
|
||||
num += c.offset
|
||||
return num, err
|
||||
|
@ -160,78 +168,7 @@ func (c *s3v4aChunkReader) handleErr(num int, err error) (int, error) {
|
|||
return num, c.err
|
||||
}
|
||||
|
||||
func (c *s3v4aChunkReader) readCRLF() error {
|
||||
for _, ch := range [2]byte{'\r', '\n'} {
|
||||
b, err := c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return c.err
|
||||
}
|
||||
if b != ch {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return c.err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3v4aChunkReader) readTrailers() error {
|
||||
var k, v []byte
|
||||
var err error
|
||||
for err == nil {
|
||||
k, err = c.reader.ReadBytes(':')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
v, err = c.reader.ReadBytes('\n')
|
||||
if err != nil && err != io.EOF {
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
if len(v) >= 2 && v[len(v)-2] == '\r' {
|
||||
v[len(v)-2] = '\n'
|
||||
v = v[:len(v)-1]
|
||||
}
|
||||
|
||||
switch {
|
||||
case slices.Contains(c.trailerHeaders, string(k[:len(k)-1])):
|
||||
c.buffer = append(append(c.buffer, k...), v...) // todo use copy
|
||||
case string(k) == "x-amz-trailer-signature:":
|
||||
n, err := hex.Decode(v[:], bytes.TrimRight(v[:], "*\n"))
|
||||
if err != nil {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return c.err
|
||||
}
|
||||
|
||||
if err = c.streamSigner.VerifyTrailerSignature(c.buffer, c.requestTime, v[:n]); err != nil {
|
||||
c.err = fmt.Errorf("%w: %s", errs.GetAPIError(errs.ErrSignatureDoesNotMatch), err.Error())
|
||||
return c.err
|
||||
}
|
||||
default:
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
|
||||
c.trailers[string(k[:len(k)-1])] = string(v[:len(v)-1])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3v4aChunkReader) TrailerHeaders() map[string]string {
|
||||
return c.trailers
|
||||
}
|
||||
|
||||
func newSignV4aChunkedReader(req *http.Request) (*s3v4aChunkReader, error) {
|
||||
func newSignV4aChunkedReader(req *http.Request) (io.ReadCloser, error) {
|
||||
box, err := middleware.GetBoxData(req.Context())
|
||||
if err != nil {
|
||||
return nil, errs.GetAPIError(errs.ErrAuthorizationHeaderMalformed)
|
||||
|
@ -263,18 +200,10 @@ func newSignV4aChunkedReader(req *http.Request) (*s3v4aChunkReader, error) {
|
|||
|
||||
newStreamSigner := v4a.NewStreamSigner(creds, "s3", seed)
|
||||
|
||||
var trailerHeaders []string
|
||||
trailer := req.Header.Get("x-amz-trailer")
|
||||
if trailer != "" {
|
||||
trailerHeaders = strings.Split(trailer, ";")
|
||||
}
|
||||
|
||||
return &s3v4aChunkReader{
|
||||
reader: bufio.NewReader(req.Body),
|
||||
streamSigner: newStreamSigner,
|
||||
requestTime: reqTime,
|
||||
buffer: make([]byte, 64*1024),
|
||||
trailerHeaders: trailerHeaders,
|
||||
trailers: make(map[string]string, len(trailerHeaders)),
|
||||
reader: bufio.NewReader(req.Body),
|
||||
streamSigner: newStreamSigner,
|
||||
requestTime: reqTime,
|
||||
buffer: make([]byte, 64*1024),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -39,10 +39,9 @@ func (h *handler) logAndSendError(ctx context.Context, w http.ResponseWriter, lo
|
|||
zap.String("object", reqInfo.ObjectName),
|
||||
zap.String("description", logText),
|
||||
zap.String("user", reqInfo.User),
|
||||
zap.Error(err),
|
||||
}
|
||||
zap.Error(err)}
|
||||
fields = append(fields, additional...)
|
||||
h.reqLogger(ctx).Error(logs.RequestFailed, append(fields, logs.TagField(logs.TagDatapath))...)
|
||||
h.reqLogger(ctx).Error(logs.RequestFailed, fields...)
|
||||
}
|
||||
|
||||
func handleDeleteMarker(w http.ResponseWriter, err error) error {
|
||||
|
|
|
@ -94,11 +94,8 @@ const (
|
|||
|
||||
DefaultLocationConstraint = "default"
|
||||
|
||||
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
StreamingContentSHA256Trailer = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
|
||||
StreamingContentV4aSHA256 = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"
|
||||
StreamingContentV4aSHA256Trailer = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER"
|
||||
StreamingUnsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
|
||||
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
StreamingContentV4aSHA256 = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"
|
||||
|
||||
DefaultStorageClass = "STANDARD"
|
||||
)
|
||||
|
@ -132,8 +129,6 @@ var SystemMetadata = map[string]struct{}{
|
|||
func IsSignedStreamingV4(r *http.Request) (string, bool) {
|
||||
shaHeader := r.Header.Get(AmzContentSha256)
|
||||
return shaHeader,
|
||||
(shaHeader == StreamingContentSHA256 || shaHeader == StreamingContentSHA256Trailer ||
|
||||
shaHeader == StreamingContentV4aSHA256 || shaHeader == StreamingContentV4aSHA256Trailer ||
|
||||
shaHeader == StreamingUnsignedPayloadTrailer) &&
|
||||
(shaHeader == StreamingContentSHA256 || shaHeader == StreamingContentV4aSHA256) &&
|
||||
r.Method == http.MethodPut
|
||||
}
|
||||
|
|
|
@ -76,8 +76,7 @@ func (c *Cache) PutBucket(bktInfo *data.BucketInfo) {
|
|||
zap.String("zone", bktInfo.Zone),
|
||||
zap.String("bucket name", bktInfo.Name),
|
||||
zap.Stringer("bucket cid", bktInfo.CID),
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -119,12 +118,11 @@ func (c *Cache) PutObject(owner user.ID, extObjInfo *data.ExtendedObjectInfo) {
|
|||
if err := c.objCache.PutObject(extObjInfo); err != nil {
|
||||
c.logger.Warn(logs.CouldntAddObjectToCache, zap.Error(err),
|
||||
zap.String("object_name", extObjInfo.ObjectInfo.Name), zap.String("bucket_name", extObjInfo.ObjectInfo.Bucket),
|
||||
zap.String("cid", extObjInfo.ObjectInfo.CID.EncodeToString()), zap.String("oid", extObjInfo.ObjectInfo.ID.EncodeToString()),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
zap.String("cid", extObjInfo.ObjectInfo.CID.EncodeToString()), zap.String("oid", extObjInfo.ObjectInfo.ID.EncodeToString()))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, extObjInfo.ObjectInfo.Address().EncodeToString()); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -134,8 +132,7 @@ func (c *Cache) PutObjectWithName(owner user.ID, extObjInfo *data.ExtendedObject
|
|||
if err := c.namesCache.Put(extObjInfo.ObjectInfo.NiceName(), extObjInfo.ObjectInfo.Address()); err != nil {
|
||||
c.logger.Warn(logs.CouldntPutObjAddressToNameCache,
|
||||
zap.String("obj nice name", extObjInfo.ObjectInfo.NiceName()),
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -149,11 +146,11 @@ func (c *Cache) GetList(owner user.ID, key cache.ObjectsListKey) []*data.NodeVer
|
|||
|
||||
func (c *Cache) PutList(owner user.ID, key cache.ObjectsListKey, list []*data.NodeVersion) {
|
||||
if err := c.listsCache.PutVersions(key, list); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheListOfObjects, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Warn(logs.CouldntCacheListOfObjects, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key.String()); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -167,11 +164,11 @@ func (c *Cache) GetListSession(owner user.ID, key cache.ListSessionKey) *data.Li
|
|||
|
||||
func (c *Cache) PutListSession(owner user.ID, key cache.ListSessionKey, session *data.ListSession) {
|
||||
if err := c.sessionListCache.PutListSession(key, session); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheListSession, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Warn(logs.CouldntCacheListSession, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key.String()); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -190,11 +187,11 @@ func (c *Cache) GetTagging(owner user.ID, key string) map[string]string {
|
|||
|
||||
func (c *Cache) PutTagging(owner user.ID, key string, tags map[string]string) {
|
||||
if err := c.systemCache.PutTagging(key, tags); err != nil {
|
||||
c.logger.Error(logs.CouldntCacheTags, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Error(logs.CouldntCacheTags, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -212,11 +209,11 @@ func (c *Cache) GetLockInfo(owner user.ID, key string) *data.LockInfo {
|
|||
|
||||
func (c *Cache) PutLockInfo(owner user.ID, key string, lockInfo *data.LockInfo) {
|
||||
if err := c.systemCache.PutLockInfo(key, lockInfo); err != nil {
|
||||
c.logger.Error(logs.CouldntCacheLockInfo, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Error(logs.CouldntCacheLockInfo, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -233,11 +230,11 @@ func (c *Cache) GetSettings(owner user.ID, bktInfo *data.BucketInfo) *data.Bucke
|
|||
func (c *Cache) PutSettings(owner user.ID, bktInfo *data.BucketInfo, settings *data.BucketSettings) {
|
||||
key := bktInfo.Name + bktInfo.SettingsObjectName()
|
||||
if err := c.systemCache.PutSettings(key, settings); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheBucketSettings, zap.String("bucket", bktInfo.Name), zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Warn(logs.CouldntCacheBucketSettings, zap.String("bucket", bktInfo.Name), zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -255,11 +252,11 @@ func (c *Cache) PutCORS(owner user.ID, bkt *data.BucketInfo, cors *data.CORSConf
|
|||
key := bkt.CORSObjectName()
|
||||
|
||||
if err := c.systemCache.PutCORS(key, cors); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheCors, zap.String("bucket", bkt.Name), zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Warn(logs.CouldntCacheCors, zap.String("bucket", bkt.Name), zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -281,11 +278,11 @@ func (c *Cache) PutLifecycleConfiguration(owner user.ID, bkt *data.BucketInfo, c
|
|||
key := bkt.LifecycleConfigurationObjectName()
|
||||
|
||||
if err := c.systemCache.PutLifecycleConfiguration(key, cfg); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheLifecycleConfiguration, zap.String("bucket", bkt.Name), zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Warn(logs.CouldntCacheLifecycleConfiguration, zap.String("bucket", bkt.Name), zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -299,7 +296,7 @@ func (c *Cache) GetNetworkInfo() *netmap.NetworkInfo {
|
|||
|
||||
func (c *Cache) PutNetworkInfo(info netmap.NetworkInfo) {
|
||||
if err := c.networkCache.PutNetworkInfo(info); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheNetworkInfo, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Warn(logs.CouldntCacheNetworkInfo, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -309,7 +306,7 @@ func (c *Cache) GetNetmap() *netmap.NetMap {
|
|||
|
||||
func (c *Cache) PutNetmap(nm netmap.NetMap) {
|
||||
if err := c.networkCache.PutNetmap(nm); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheNetmap, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.logger.Warn(logs.CouldntCacheNetmap, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,6 @@ func (n *Layer) containerInfo(ctx context.Context, prm frostfs.PrmContainer) (*d
|
|||
log.Error(logs.CouldNotParseContainerObjectLockEnabledAttribute,
|
||||
zap.String("lock_enabled", attrLockEnabled),
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagDatapath),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -89,7 +88,7 @@ func (n *Layer) containerList(ctx context.Context, listParams ListBucketsParams)
|
|||
|
||||
res, err := n.frostFS.UserContainers(ctx, prm)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Error(logs.CouldNotListUserContainers, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||
n.reqLogger(ctx).Error(logs.CouldNotListUserContainers, zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -101,7 +100,7 @@ func (n *Layer) containerList(ctx context.Context, listParams ListBucketsParams)
|
|||
}
|
||||
info, err := n.containerInfo(ctx, getPrm)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Error(logs.CouldNotFetchContainerInfo, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||
n.reqLogger(ctx).Error(logs.CouldNotFetchContainerInfo, zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -92,8 +92,7 @@ func (n *Layer) deleteCORSObject(ctx context.Context, bktInfo *data.BucketInfo,
|
|||
if err := n.objectDeleteWithAuth(ctx, corsBkt, addr.Object(), prmAuth); err != nil {
|
||||
n.reqLogger(ctx).Error(logs.CouldntDeleteCorsObject, zap.Error(err),
|
||||
zap.String("cnrID", corsBkt.CID.EncodeToString()),
|
||||
zap.String("objID", addr.Object().EncodeToString()),
|
||||
logs.TagField(logs.TagExternalStorage))
|
||||
zap.String("objID", addr.Object().EncodeToString()))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -76,7 +76,6 @@ var _ frostfs.FrostFS = (*TestFrostFS)(nil)
|
|||
|
||||
type TestFrostFS struct {
|
||||
objects map[string]*object.Object
|
||||
copiesNumbers map[string][]uint32
|
||||
objectErrors map[string]error
|
||||
objectPutErrors map[string]error
|
||||
containers map[string]*container.Container
|
||||
|
@ -89,7 +88,6 @@ type TestFrostFS struct {
|
|||
func NewTestFrostFS(key *keys.PrivateKey) *TestFrostFS {
|
||||
return &TestFrostFS{
|
||||
objects: make(map[string]*object.Object),
|
||||
copiesNumbers: make(map[string][]uint32),
|
||||
objectErrors: make(map[string]error),
|
||||
objectPutErrors: make(map[string]error),
|
||||
containers: make(map[string]*container.Container),
|
||||
|
@ -128,10 +126,6 @@ func (t *TestFrostFS) Objects() []*object.Object {
|
|||
return res
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) CopiesNumbers(addr string) []uint32 {
|
||||
return t.copiesNumbers[addr]
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) ObjectExists(objID oid.ID) bool {
|
||||
for _, obj := range t.objects {
|
||||
if id, _ := obj.ID(); id.Equals(objID) {
|
||||
|
@ -352,8 +346,6 @@ func (t *TestFrostFS) CreateObject(ctx context.Context, prm frostfs.PrmObjectCre
|
|||
|
||||
addr := newAddress(cnrID, objID)
|
||||
t.objects[addr.EncodeToString()] = obj
|
||||
t.copiesNumbers[addr.EncodeToString()] = prm.CopiesNumber
|
||||
|
||||
return &frostfs.CreateObjectResult{
|
||||
ObjectID: objID,
|
||||
CreationEpoch: t.currentEpoch - 1,
|
||||
|
|
|
@ -543,9 +543,7 @@ func (n *Layer) GetExtendedObjectInfo(ctx context.Context, p *HeadObjectParams)
|
|||
|
||||
n.reqLogger(ctx).Debug(logs.GetObject,
|
||||
zap.Stringer("cid", p.BktInfo.CID),
|
||||
zap.Stringer("oid", objInfo.ObjectInfo.ID),
|
||||
logs.TagField(logs.TagDatapath),
|
||||
)
|
||||
zap.Stringer("oid", objInfo.ObjectInfo.ID))
|
||||
|
||||
return objInfo, nil
|
||||
}
|
||||
|
@ -598,8 +596,8 @@ func (n *Layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
if !client.IsErrObjectAlreadyRemoved(obj.Error) && !client.IsErrObjectNotFound(obj.Error) {
|
||||
return obj
|
||||
}
|
||||
n.reqLogger(ctx).Debug(logs.CouldntDeleteObjectFromStorageContinueDeleting, zap.Stringer("cid", bkt.CID),
|
||||
zap.String("oid", obj.VersionID), zap.Error(obj.Error), logs.TagField(logs.TagExternalStorage))
|
||||
n.reqLogger(ctx).Debug(logs.CouldntDeleteObjectFromStorageContinueDeleting,
|
||||
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID), zap.Error(obj.Error))
|
||||
}
|
||||
|
||||
if obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeVersion.ID); obj.Error != nil {
|
||||
|
@ -637,8 +635,8 @@ func (n *Layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
if !client.IsErrObjectAlreadyRemoved(obj.Error) && !client.IsErrObjectNotFound(obj.Error) {
|
||||
return obj
|
||||
}
|
||||
n.reqLogger(ctx).Debug(logs.CouldntDeleteObjectFromStorageContinueDeleting, zap.Stringer("cid", bkt.CID),
|
||||
zap.String("oid", obj.VersionID), zap.Error(obj.Error), logs.TagField(logs.TagExternalStorage))
|
||||
n.reqLogger(ctx).Debug(logs.CouldntDeleteObjectFromStorageContinueDeleting,
|
||||
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID), zap.Error(obj.Error))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -753,7 +751,7 @@ func (n *Layer) getNodeVersionsToDelete(ctx context.Context, bkt *data.BucketInf
|
|||
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", apierr.GetAPIError(apierr.ErrNoSuchVersion))
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Debug(logs.GetTreeNodeToDelete, zap.Stringer("cid", bkt.CID), zap.Strings("oids", oids), logs.TagField(logs.TagDatapath))
|
||||
n.reqLogger(ctx).Debug(logs.GetTreeNodeToDelete, zap.Stringer("cid", bkt.CID), zap.Strings("oids", oids))
|
||||
|
||||
return versionsToDelete, nil
|
||||
}
|
||||
|
@ -823,7 +821,7 @@ func (n *Layer) DeleteObjects(ctx context.Context, p *DeleteObjectParams) []*Ver
|
|||
for i, obj := range p.Objects {
|
||||
p.Objects[i] = n.deleteObject(ctx, p.BktInfo, p.Settings, obj, p.NetworkInfo)
|
||||
if p.IsMultiple && p.Objects[i].Error != nil {
|
||||
n.reqLogger(ctx).Error(logs.CouldntDeleteObject, zap.String("object", obj.String()), zap.Error(p.Objects[i].Error), logs.TagField(logs.TagExternalStorage))
|
||||
n.reqLogger(ctx).Error(logs.CouldntDeleteObject, zap.String("object", obj.String()), zap.Error(p.Objects[i].Error))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -853,7 +851,7 @@ func (n *Layer) ResolveBucket(ctx context.Context, zone, name string) (cid.ID, e
|
|||
return cid.ID{}, err
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Info(logs.ResolveBucket, zap.Stringer("cid", cnrID), logs.TagField(logs.TagDatapath))
|
||||
n.reqLogger(ctx).Info(logs.ResolveBucket, zap.Stringer("cid", cnrID))
|
||||
}
|
||||
|
||||
return cnrID, nil
|
||||
|
@ -878,12 +876,12 @@ func (n *Layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
|
|||
|
||||
corsObj, err := n.treeService.GetBucketCORS(ctx, p.BktInfo)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Error(logs.GetBucketCorsFromTree, zap.Error(err), logs.TagField(logs.TagExternalStorageTree))
|
||||
n.reqLogger(ctx).Error(logs.GetBucketCors, zap.Error(err))
|
||||
}
|
||||
|
||||
lifecycleObj, treeErr := n.treeService.GetBucketLifecycleConfiguration(ctx, p.BktInfo)
|
||||
if treeErr != nil {
|
||||
n.reqLogger(ctx).Error(logs.GetBucketLifecycle, zap.Error(treeErr), logs.TagField(logs.TagExternalStorageTree))
|
||||
n.reqLogger(ctx).Error(logs.GetBucketLifecycle, zap.Error(treeErr))
|
||||
}
|
||||
|
||||
err = n.frostFS.DeleteContainer(ctx, p.BktInfo.CID, p.SessionToken)
|
||||
|
|
|
@ -79,9 +79,7 @@ func (n *Layer) deleteLifecycleObject(ctx context.Context, bktInfo *data.BucketI
|
|||
if err := n.objectDeleteWithAuth(ctx, lifecycleBkt, addr.Object(), prmAuth); err != nil {
|
||||
n.reqLogger(ctx).Error(logs.CouldntDeleteLifecycleObject, zap.Error(err),
|
||||
zap.String("cid", lifecycleBkt.CID.EncodeToString()),
|
||||
zap.String("oid", addr.Object().EncodeToString()),
|
||||
logs.TagField(logs.TagExternalStorage),
|
||||
)
|
||||
zap.String("oid", addr.Object().EncodeToString()))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -541,7 +541,7 @@ func (n *Layer) initWorkerPool(ctx context.Context, size int, p commonVersionsLi
|
|||
|
||||
realSize, err := GetObjectSize(oi)
|
||||
if err != nil {
|
||||
reqLog.Debug(logs.FailedToGetRealObjectSize, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
reqLog.Debug(logs.FailedToGetRealObjectSize, zap.Error(err))
|
||||
realSize = oi.Size
|
||||
}
|
||||
|
||||
|
@ -554,7 +554,7 @@ func (n *Layer) initWorkerPool(ctx context.Context, size int, p commonVersionsLi
|
|||
})
|
||||
if err != nil {
|
||||
wg.Done()
|
||||
reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err))
|
||||
}
|
||||
}(node)
|
||||
}
|
||||
|
@ -645,7 +645,7 @@ func (n *Layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo
|
|||
|
||||
meta, err := n.objectHead(ctx, bktInfo, node.OID)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.CouldNotFetchObjectMeta, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||
n.reqLogger(ctx).Warn(logs.CouldNotFetchObjectMeta, zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -58,9 +58,10 @@ type (
|
|||
}
|
||||
|
||||
CreateMultipartParams struct {
|
||||
Info *UploadInfoParams
|
||||
Header map[string]string
|
||||
Data *UploadData
|
||||
Info *UploadInfoParams
|
||||
Header map[string]string
|
||||
Data *UploadData
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
UploadData struct {
|
||||
|
@ -74,7 +75,6 @@ type (
|
|||
Reader io.Reader
|
||||
ContentMD5 string
|
||||
ContentSHA256Hash string
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
UploadCopyParams struct {
|
||||
|
@ -85,13 +85,11 @@ type (
|
|||
SrcEncryption encryption.Params
|
||||
PartNumber int
|
||||
Range *RangeParams
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
CompleteMultipartParams struct {
|
||||
Info *UploadInfoParams
|
||||
Parts []*CompletedPart
|
||||
CopiesNumbers []uint32
|
||||
Info *UploadInfoParams
|
||||
Parts []*CompletedPart
|
||||
}
|
||||
|
||||
CompletedPart struct {
|
||||
|
@ -167,6 +165,7 @@ func (n *Layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
|
|||
Owner: n.gateOwner,
|
||||
Created: TimeNow(ctx),
|
||||
Meta: make(map[string]string, metaSize),
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
CreationEpoch: networkInfo.CurrentEpoch(),
|
||||
}
|
||||
|
||||
|
@ -213,7 +212,7 @@ func (n *Layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, er
|
|||
func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInfo, p *UploadPartParams) (*data.ObjectInfo, error) {
|
||||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
||||
if err := p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err))
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidEncryptionParameters)
|
||||
}
|
||||
|
||||
|
@ -223,7 +222,7 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
Attributes: make([][2]string, 2),
|
||||
Payload: p.Reader,
|
||||
CreationTime: TimeNow(ctx),
|
||||
CopiesNumber: p.CopiesNumbers,
|
||||
CopiesNumber: multipartInfo.CopiesNumbers,
|
||||
}
|
||||
|
||||
decSize := p.Size
|
||||
|
@ -266,10 +265,7 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
||||
err = n.frostFS.DeleteObject(ctx, prm)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject,
|
||||
zap.Stringer("cid", bktInfo.CID),
|
||||
zap.Stringer("oid", createdObj.ID),
|
||||
logs.TagField(logs.TagExternalStorage))
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||
}
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidDigest)
|
||||
}
|
||||
|
@ -286,10 +282,7 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
if !bytes.Equal(contentHashBytes, createdObj.HashSum) {
|
||||
err = n.objectDelete(ctx, bktInfo, createdObj.ID)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject,
|
||||
zap.Stringer("cid", bktInfo.CID),
|
||||
zap.Stringer("oid", createdObj.ID),
|
||||
logs.TagField(logs.TagExternalStorage))
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||
}
|
||||
return nil, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch)
|
||||
}
|
||||
|
@ -297,7 +290,7 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
|
||||
n.reqLogger(ctx).Debug(logs.UploadPart,
|
||||
zap.String("multipart upload", p.Info.UploadID), zap.Int("part number", p.PartNumber),
|
||||
zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID), logs.TagField(logs.TagDatapath))
|
||||
zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||
|
||||
partInfo := &data.PartInfo{
|
||||
Key: p.Info.Key,
|
||||
|
@ -320,8 +313,7 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
if err = n.objectDelete(ctx, bktInfo, oldPartID); err != nil {
|
||||
n.reqLogger(ctx).Error(logs.CouldntDeleteOldPartObject, zap.Error(err),
|
||||
zap.String("cid", bktInfo.CID.EncodeToString()),
|
||||
zap.String("oid", oldPartID.EncodeToString()),
|
||||
logs.TagField(logs.TagExternalStorage))
|
||||
zap.String("oid", oldPartID.EncodeToString()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -380,11 +372,10 @@ func (n *Layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.
|
|||
}
|
||||
|
||||
params := &UploadPartParams{
|
||||
Info: p.Info,
|
||||
PartNumber: p.PartNumber,
|
||||
Size: size,
|
||||
Reader: objPayload,
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
Info: p.Info,
|
||||
PartNumber: p.PartNumber,
|
||||
Size: size,
|
||||
Reader: objPayload,
|
||||
}
|
||||
|
||||
return n.uploadPart(ctx, multipartInfo, params)
|
||||
|
@ -481,15 +472,14 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
Header: initMetadata,
|
||||
Size: &multipartObjetSize,
|
||||
Encryption: p.Info.Encryption,
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
CopiesNumbers: multipartInfo.CopiesNumbers,
|
||||
CompleteMD5Hash: hex.EncodeToString(md5Hash.Sum(nil)) + "-" + strconv.Itoa(len(p.Parts)),
|
||||
})
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Error(logs.CouldNotPutCompletedObject,
|
||||
zap.String("uploadID", p.Info.UploadID),
|
||||
zap.String("uploadKey", p.Info.Key),
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagExternalStorage))
|
||||
zap.Error(err))
|
||||
|
||||
return nil, nil, apierr.GetAPIError(apierr.ErrInternalError)
|
||||
}
|
||||
|
@ -501,8 +491,7 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
if err = n.objectDelete(ctx, p.Info.Bkt, partInfo.OID); err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.CouldNotDeleteUploadPart,
|
||||
zap.Stringer("cid", p.Info.Bkt.CID), zap.Stringer("oid", &partInfo.OID),
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagExternalStorage))
|
||||
zap.Error(err))
|
||||
}
|
||||
addr.SetObject(partInfo.OID)
|
||||
n.cache.DeleteObject(addr)
|
||||
|
@ -596,7 +585,7 @@ func (n *Layer) deleteUploadedParts(ctx context.Context, bkt *data.BucketInfo, p
|
|||
oids, err := relations.ListAllRelations(ctx, n.frostFS.Relations(), bkt.CID, info.OID, tokens)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.FailedToListAllObjectRelations, zap.String("cid", bkt.CID.EncodeToString()),
|
||||
zap.String("oid", info.OID.EncodeToString()), zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||
zap.String("oid", info.OID.EncodeToString()), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
members = append(members, append(oids, info.OID)...)
|
||||
|
@ -605,7 +594,7 @@ func (n *Layer) deleteUploadedParts(ctx context.Context, bkt *data.BucketInfo, p
|
|||
|
||||
err := n.putTombstones(ctx, bkt, networkInfo, members)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.FailedToPutTombstones, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||
n.reqLogger(ctx).Warn(logs.FailedToPutTombstones, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -618,7 +607,7 @@ func (n *Layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
|
|||
|
||||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
||||
if err = p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err))
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidEncryptionParameters)
|
||||
}
|
||||
|
||||
|
@ -710,8 +699,7 @@ func (n *Layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.
|
|||
zap.Stringer("cid", p.Bkt.CID),
|
||||
zap.String("upload id", p.UploadID),
|
||||
zap.Ints("part numbers", partsNumbers),
|
||||
zap.Strings("oids", oids),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
zap.Strings("oids", oids))
|
||||
|
||||
return multipartInfo, res, nil
|
||||
}
|
||||
|
|
|
@ -295,11 +295,7 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
if !bytes.Equal(headerMd5Hash, createdObj.MD5Sum) {
|
||||
err = n.objectDelete(ctx, p.BktInfo, createdObj.ID)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject,
|
||||
zap.Stringer("cid", p.BktInfo.CID),
|
||||
zap.Stringer("oid", createdObj.ID),
|
||||
logs.TagField(logs.TagExternalStorage),
|
||||
)
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||
}
|
||||
return nil, apierr.GetAPIError(apierr.ErrBadDigest)
|
||||
}
|
||||
|
@ -313,17 +309,13 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
if !bytes.Equal(contentHashBytes, createdObj.HashSum) {
|
||||
err = n.objectDelete(ctx, p.BktInfo, createdObj.ID)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject,
|
||||
zap.Stringer("cid", p.BktInfo.CID),
|
||||
zap.Stringer("oid", createdObj.ID),
|
||||
logs.TagField(logs.TagExternalStorage))
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||
}
|
||||
return nil, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch)
|
||||
}
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID),
|
||||
zap.Stringer("oid", createdObj.ID), logs.TagField(logs.TagExternalStorage))
|
||||
n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||
now := TimeNow(ctx)
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
|
@ -418,7 +410,7 @@ func (n *Layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.Bucke
|
|||
|
||||
meta, err := n.objectHead(ctx, bkt, node.OID)
|
||||
if err != nil {
|
||||
if client.IsErrObjectNotFound(err) || client.IsErrObjectAlreadyRemoved(err) {
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
return nil, fmt.Errorf("%w: %s; %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error(), node.OID.EncodeToString())
|
||||
}
|
||||
return nil, err
|
||||
|
@ -475,7 +467,7 @@ func (n *Layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
|||
|
||||
meta, err := n.objectHead(ctx, bkt, foundVersion.OID)
|
||||
if err != nil {
|
||||
if client.IsErrObjectNotFound(err) || client.IsErrObjectAlreadyRemoved(err) {
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchVersion), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
|
@ -548,7 +540,7 @@ func (n *Layer) objectPutAndHash(ctx context.Context, prm frostfs.PrmObjectCreat
|
|||
func (n *Layer) payloadDiscard(ctx context.Context, payload io.Reader) {
|
||||
if payload != nil {
|
||||
if _, errDiscard := io.Copy(io.Discard, payload); errDiscard != nil {
|
||||
n.reqLogger(ctx).Warn(logs.FailedToDiscardPutPayloadProbablyGoroutineLeaks, zap.Error(errDiscard), logs.TagField(logs.TagDatapath))
|
||||
n.reqLogger(ctx).Warn(logs.FailedToDiscardPutPayloadProbablyGoroutineLeaks, zap.Error(errDiscard))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -558,7 +550,7 @@ type logWrapper struct {
|
|||
}
|
||||
|
||||
func (l *logWrapper) Printf(format string, args ...interface{}) {
|
||||
l.log.Info(fmt.Sprintf(format, args...), logs.TagField(logs.TagDatapath))
|
||||
l.log.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func IsSystemHeader(key string) bool {
|
||||
|
|
|
@ -217,7 +217,7 @@ func (n *Layer) GetBucketSettings(ctx context.Context, bktInfo *data.BucketInfo)
|
|||
return nil, err
|
||||
}
|
||||
settings = &data.BucketSettings{Versioning: data.VersioningUnversioned}
|
||||
n.reqLogger(ctx).Debug(logs.BucketSettingsNotFoundUseDefaults, logs.TagField(logs.TagDatapath))
|
||||
n.reqLogger(ctx).Debug(logs.BucketSettingsNotFoundUseDefaults)
|
||||
}
|
||||
|
||||
n.cache.PutSettings(owner, bktInfo, settings)
|
||||
|
|
|
@ -168,8 +168,7 @@ func (n *Layer) getNodeVersion(ctx context.Context, objVersion *data.ObjectVersi
|
|||
|
||||
if err == nil && version != nil && !version.IsDeleteMarker {
|
||||
n.reqLogger(ctx).Debug(logs.GetTreeNode,
|
||||
zap.Stringer("cid", objVersion.BktInfo.CID),
|
||||
zap.Stringer("oid", version.OID), logs.TagField(logs.TagExternalStorageTree))
|
||||
zap.Stringer("cid", objVersion.BktInfo.CID), zap.Stringer("oid", version.OID))
|
||||
}
|
||||
|
||||
return version, err
|
||||
|
|
|
@ -65,13 +65,13 @@ func (n *Layer) submitPutTombstone(ctx context.Context, bkt *data.BucketInfo, me
|
|||
defer wg.Done()
|
||||
|
||||
if err := n.putTombstoneObject(ctx, tomb, bkt); err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.FailedToPutTombstoneObject, zap.String("cid", bkt.CID.EncodeToString()), zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||
n.reqLogger(ctx).Warn(logs.FailedToPutTombstoneObject, zap.String("cid", bkt.CID.EncodeToString()), zap.Error(err))
|
||||
errCh <- fmt.Errorf("put tombstone object: %w", err)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
wg.Done()
|
||||
n.reqLogger(ctx).Warn(logs.FailedToSubmitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
n.reqLogger(ctx).Warn(logs.FailedToSubmitTaskToPool, zap.Error(err))
|
||||
errCh <- fmt.Errorf("submit task to pool: %w", err)
|
||||
}
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ func (n *Layer) getMembers(ctx context.Context, cnrID cid.ID, objID oid.ID, toke
|
|||
}
|
||||
|
||||
n.reqLogger(ctx).Warn(logs.FailedToListAllObjectRelations, zap.String("cid", cnrID.EncodeToString()),
|
||||
zap.String("oid", objID.EncodeToString()), zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||
zap.String("oid", objID.EncodeToString()), zap.Error(err))
|
||||
return nil, nil
|
||||
}
|
||||
return append(oids, objID), nil
|
||||
|
|
|
@ -110,7 +110,7 @@ func preparePathStyleAddress(reqInfo *ReqInfo, r *http.Request, reqLogger *zap.L
|
|||
// https://github.com/go-chi/chi/issues/641
|
||||
// https://github.com/go-chi/chi/issues/642
|
||||
if obj, err := url.PathUnescape(reqInfo.ObjectName); err != nil {
|
||||
reqLogger.Warn(logs.FailedToUnescapeObjectName, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
reqLogger.Warn(logs.FailedToUnescapeObjectName, zap.Error(err))
|
||||
} else {
|
||||
reqInfo.ObjectName = obj
|
||||
}
|
||||
|
|
|
@ -53,15 +53,15 @@ func Auth(center Center, log *zap.Logger) Func {
|
|||
box, err := center.Authenticate(r)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNoAuthorizationHeader) {
|
||||
reqLogOrDefault(ctx, log).Debug(logs.CouldntReceiveAccessBoxForGateKeyRandomKeyWillBeUsed, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
reqLogOrDefault(ctx, log).Debug(logs.CouldntReceiveAccessBoxForGateKeyRandomKeyWillBeUsed, zap.Error(err))
|
||||
} else {
|
||||
reqLogOrDefault(ctx, log).Error(logs.FailedToPassAuthentication, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
reqLogOrDefault(ctx, log).Error(logs.FailedToPassAuthentication, zap.Error(err))
|
||||
err = apierr.TransformToS3Error(err)
|
||||
if err.(apierr.Error).ErrCode == apierr.ErrInternalError {
|
||||
err = apierr.GetAPIError(apierr.ErrAccessDenied)
|
||||
}
|
||||
if _, wrErr := WriteErrorResponse(w, GetReqInfo(r.Context()), err); wrErr != nil {
|
||||
reqLogOrDefault(ctx, log).Error(logs.FailedToWriteResponse, zap.Error(wrErr), logs.TagField(logs.TagDatapath))
|
||||
reqLogOrDefault(ctx, log).Error(logs.FailedToWriteResponse, zap.Error(wrErr))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ func Auth(center Center, log *zap.Logger) Func {
|
|||
if box.AccessBox.Gate.BearerToken != nil {
|
||||
reqInfo.User = bearer.ResolveIssuer(*box.AccessBox.Gate.BearerToken).String()
|
||||
}
|
||||
reqLogOrDefault(ctx, log).Debug(logs.SuccessfulAuth, zap.String("accessKeyID", box.AuthHeaders.AccessKeyID), logs.TagField(logs.TagDatapath))
|
||||
reqLogOrDefault(ctx, log).Debug(logs.SuccessfulAuth, zap.String("accessKeyID", box.AuthHeaders.AccessKeyID))
|
||||
}
|
||||
|
||||
h.ServeHTTP(w, r.WithContext(ctx))
|
||||
|
@ -89,15 +89,15 @@ func FrostfsIDValidation(frostfsID FrostFSIDValidator, log *zap.Logger) Func {
|
|||
ctx := r.Context()
|
||||
bd, err := GetBoxData(ctx)
|
||||
if err != nil || bd.Gate.BearerToken == nil {
|
||||
reqLogOrDefault(ctx, log).Debug(logs.AnonRequestSkipFrostfsIDValidation, logs.TagField(logs.TagDatapath))
|
||||
reqLogOrDefault(ctx, log).Debug(logs.AnonRequestSkipFrostfsIDValidation)
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateBearerToken(frostfsID, bd.Gate.BearerToken); err != nil {
|
||||
reqLogOrDefault(ctx, log).Error(logs.FrostfsIDValidationFailed, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
reqLogOrDefault(ctx, log).Error(logs.FrostfsIDValidationFailed, zap.Error(err))
|
||||
if _, wrErr := WriteErrorResponse(w, GetReqInfo(r.Context()), err); wrErr != nil {
|
||||
reqLogOrDefault(ctx, log).Error(logs.FailedToWriteResponse, zap.Error(wrErr), logs.TagField(logs.TagDatapath))
|
||||
reqLogOrDefault(ctx, log).Error(logs.FailedToWriteResponse, zap.Error(wrErr))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ type (
|
|||
)
|
||||
|
||||
func LogHTTP(l *zap.Logger, _ LogHTTPSettings) Func {
|
||||
l.Warn(logs.LogHTTPDisabledInThisBuild, logs.TagField(logs.TagApp))
|
||||
l.Warn(logs.LogHTTPDisabledInThisBuild)
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
h.ServeHTTP(w, r)
|
||||
|
|
|
@ -139,7 +139,7 @@ func resolveCID(log *zap.Logger, resolveContainerID ContainerIDResolveFunc) cidR
|
|||
|
||||
containerID, err := resolveContainerID(ctx, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
reqLogOrDefault(ctx, log).Debug(logs.FailedToResolveCID, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
reqLogOrDefault(ctx, log).Debug(logs.FailedToResolveCID, zap.Error(err))
|
||||
return ""
|
||||
}
|
||||
|
||||
|
|
|
@ -90,10 +90,10 @@ func PolicyCheck(cfg PolicyConfig) Func {
|
|||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
if err := policyCheck(r, cfg); err != nil {
|
||||
reqLogOrDefault(ctx, cfg.Log).Error(logs.PolicyValidationFailed, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
reqLogOrDefault(ctx, cfg.Log).Error(logs.PolicyValidationFailed, zap.Error(err))
|
||||
err = apierr.TransformToS3Error(err)
|
||||
if _, wrErr := WriteErrorResponse(w, GetReqInfo(ctx), err); wrErr != nil {
|
||||
reqLogOrDefault(ctx, cfg.Log).Error(logs.FailedToWriteResponse, zap.Error(wrErr), logs.TagField(logs.TagDatapath))
|
||||
reqLogOrDefault(ctx, cfg.Log).Error(logs.FailedToWriteResponse, zap.Error(wrErr))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -200,9 +200,7 @@ func getPolicyRequest(r *http.Request, cfg PolicyConfig, reqType ReqType, bktNam
|
|||
|
||||
reqLogOrDefault(r.Context(), cfg.Log).Debug(logs.PolicyRequest, zap.String("action", op),
|
||||
zap.String("resource", res), zap.Any("request properties", requestProps),
|
||||
zap.Any("resource properties", resourceProps),
|
||||
logs.TagField(logs.TagDatapath),
|
||||
)
|
||||
zap.Any("resource properties", resourceProps))
|
||||
|
||||
return testutil.NewRequest(op, testutil.NewResource(res, resourceProps), requestProps), pk, groups, nil
|
||||
}
|
||||
|
|
|
@ -166,7 +166,7 @@ func Request(log *zap.Logger, settings RequestSettings) Func {
|
|||
// generate random UUIDv4
|
||||
id, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToGenerateRequestID, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
log.Error(logs.FailedToGenerateRequestID, zap.Error(err))
|
||||
}
|
||||
|
||||
// set request id into response header
|
||||
|
@ -198,8 +198,7 @@ func Request(log *zap.Logger, settings RequestSettings) Func {
|
|||
r = r.WithContext(SetReqLogger(ctx, reqLogger))
|
||||
|
||||
reqLogger.Info(logs.RequestStart, zap.String("host", r.Host),
|
||||
zap.String("remote_host", reqInfo.RemoteHost), zap.String("namespace", reqInfo.Namespace),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
zap.String("remote_host", reqInfo.RemoteHost), zap.String("namespace", reqInfo.Namespace))
|
||||
|
||||
// continue execution
|
||||
h.ServeHTTP(lw, r)
|
||||
|
|
|
@ -331,7 +331,7 @@ func LogSuccessResponse(l *zap.Logger) Func {
|
|||
fields = append(fields, zap.String("user", reqInfo.User))
|
||||
}
|
||||
|
||||
reqLogger.Info(logs.RequestEnd, append(fields, logs.TagField(logs.TagDatapath))...)
|
||||
reqLogger.Info(logs.RequestEnd, fields...)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -245,14 +245,13 @@ func errorResponseHandler(w http.ResponseWriter, r *http.Request) {
|
|||
zap.String("method", reqInfo.API),
|
||||
zap.String("http method", r.Method),
|
||||
zap.String("url", r.RequestURI),
|
||||
logs.TagField(logs.TagDatapath),
|
||||
}
|
||||
|
||||
if wrErr != nil {
|
||||
fields = append(fields, zap.NamedError("write_response_error", wrErr))
|
||||
}
|
||||
|
||||
log.Error(logs.RequestUnmatched, append(fields, logs.TagField(logs.TagDatapath))...)
|
||||
log.Error(logs.RequestUnmatched, fields...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -267,14 +266,13 @@ func notSupportedHandler() http.HandlerFunc {
|
|||
fields := []zap.Field{
|
||||
zap.String("http method", r.Method),
|
||||
zap.String("url", r.RequestURI),
|
||||
logs.TagField(logs.TagDatapath),
|
||||
}
|
||||
|
||||
if wrErr != nil {
|
||||
fields = append(fields, zap.NamedError("write_response_error", wrErr))
|
||||
}
|
||||
|
||||
log.Error(logs.NotSupported, append(fields, logs.TagField(logs.TagDatapath))...)
|
||||
log.Error(logs.NotSupported, fields...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
172
cmd/s3-gw/app.go
172
cmd/s3-gw/app.go
|
@ -53,7 +53,6 @@ import (
|
|||
"github.com/panjf2000/ants/v2"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/text/encoding/ianaindex"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -92,10 +91,6 @@ type (
|
|||
wrkDone chan struct{}
|
||||
}
|
||||
|
||||
tagsConfig struct {
|
||||
tagLogs sync.Map
|
||||
}
|
||||
|
||||
loggerSettings struct {
|
||||
mu sync.RWMutex
|
||||
appMetrics *metrics.AppMetrics
|
||||
|
@ -104,7 +99,6 @@ type (
|
|||
appSettings struct {
|
||||
logLevel zap.AtomicLevel
|
||||
httpLogging s3middleware.LogHTTPConfig
|
||||
tagsConfig *tagsConfig
|
||||
maxClient maxClientsConfig
|
||||
defaultMaxAge int
|
||||
reconnectInterval time.Duration
|
||||
|
@ -144,54 +138,13 @@ type (
|
|||
deadline time.Duration
|
||||
count int
|
||||
}
|
||||
|
||||
Logger struct {
|
||||
logger *zap.Logger
|
||||
lvl zap.AtomicLevel
|
||||
}
|
||||
)
|
||||
|
||||
func (t *tagsConfig) LevelEnabled(tag string, tgtLevel zapcore.Level) bool {
|
||||
lvl, ok := t.tagLogs.Load(tag)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return lvl.(zapcore.Level).Enabled(tgtLevel)
|
||||
}
|
||||
|
||||
func (t *tagsConfig) update(cfg *viper.Viper) error {
|
||||
tags, err := fetchLogTagsConfig(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.tagLogs.Range(func(key, value any) bool {
|
||||
k := key.(string)
|
||||
v := value.(zapcore.Level)
|
||||
|
||||
if lvl, ok := tags[k]; ok {
|
||||
if lvl != v {
|
||||
t.tagLogs.Store(key, lvl)
|
||||
}
|
||||
} else {
|
||||
t.tagLogs.Delete(key)
|
||||
delete(tags, k)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
for k, v := range tags {
|
||||
t.tagLogs.Store(k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newTagsConfig(v *viper.Viper) *tagsConfig {
|
||||
var t tagsConfig
|
||||
if err := t.update(v); err != nil {
|
||||
// panic here is analogue of the similar panic during common log level initialization.
|
||||
panic(err.Error())
|
||||
}
|
||||
return &t
|
||||
}
|
||||
|
||||
func (s *loggerSettings) DroppedLogsInc() {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
@ -210,10 +163,8 @@ func (s *loggerSettings) setMetrics(appMetrics *metrics.AppMetrics) {
|
|||
|
||||
func newApp(ctx context.Context, cfg *appCfg) *App {
|
||||
logSettings := &loggerSettings{}
|
||||
tagConfig := newTagsConfig(cfg.config())
|
||||
log := pickLogger(cfg.config(), logSettings, tagConfig)
|
||||
log := pickLogger(cfg.config(), logSettings)
|
||||
settings := newAppSettings(log, cfg.config())
|
||||
settings.tagsConfig = tagConfig
|
||||
appCache := layer.NewCache(getCacheOptions(cfg.config(), log.logger))
|
||||
|
||||
app := &App{
|
||||
|
@ -254,7 +205,7 @@ func (a *App) initAuthCenter(ctx context.Context) {
|
|||
if a.config().IsSet(cfgContainersAccessBox) {
|
||||
cnrID, err := a.resolveContainerID(ctx, cfgContainersAccessBox)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotFetchAccessBoxContainerInfo, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.CouldNotFetchAccessBoxContainerInfo, zap.Error(err))
|
||||
}
|
||||
a.settings.accessbox = &cnrID
|
||||
}
|
||||
|
@ -273,7 +224,7 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
// prepare random key for anonymous requests
|
||||
randomKey, err := keys.NewPrivateKey()
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldntGenerateRandomKey, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.CouldntGenerateRandomKey, zap.Error(err))
|
||||
}
|
||||
|
||||
var gateOwner user.ID
|
||||
|
@ -283,7 +234,7 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
if a.config().IsSet(cfgContainersCORS) {
|
||||
corsCnrInfo, err = a.fetchContainerInfo(ctx, cfgContainersCORS)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotFetchCORSContainerInfo, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.CouldNotFetchCORSContainerInfo, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -291,7 +242,7 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
if a.config().IsSet(cfgContainersLifecycle) {
|
||||
lifecycleCnrInfo, err = a.fetchContainerInfo(ctx, cfgContainersLifecycle)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotFetchLifecycleContainerInfo, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.CouldNotFetchLifecycleContainerInfo, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -317,7 +268,7 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
func (a *App) initWorkerPool() *ants.Pool {
|
||||
workerPool, err := ants.NewPool(a.settings.workerPoolSize)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.FailedToCreateWorkerPool, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.FailedToCreateWorkerPool, zap.Error(err))
|
||||
}
|
||||
return workerPool
|
||||
}
|
||||
|
@ -326,7 +277,6 @@ func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
|
|||
settings := &appSettings{
|
||||
logLevel: log.lvl,
|
||||
httpLogging: s3middleware.LogHTTPConfig{},
|
||||
tagsConfig: newTagsConfig(v),
|
||||
maxClient: newMaxClients(v),
|
||||
defaultMaxAge: fetchDefaultMaxAge(v, log.logger),
|
||||
reconnectInterval: fetchReconnectInterval(v),
|
||||
|
@ -673,7 +623,7 @@ func (a *App) initFrostfsID(ctx context.Context) {
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.InitFrostfsIDFailed, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.InitFrostfsIDContractFailed, zap.Error(err))
|
||||
}
|
||||
|
||||
a.frostfsid, err = frostfsid.NewFrostFSID(frostfsid.Config{
|
||||
|
@ -682,7 +632,7 @@ func (a *App) initFrostfsID(ctx context.Context) {
|
|||
Logger: a.log,
|
||||
})
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.InitFrostfsIDFailed, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.InitFrostfsIDContractFailed, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -698,7 +648,7 @@ func (a *App) initPolicyStorage(ctx context.Context) {
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.InitPolicyContractFailed, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.InitPolicyContractFailed, zap.Error(err))
|
||||
}
|
||||
|
||||
a.policyStorage = policy.NewStorage(policy.StorageConfig{
|
||||
|
@ -712,7 +662,7 @@ func (a *App) initResolver() {
|
|||
var err error
|
||||
a.bucketResolver, err = resolver.NewBucketResolver(a.getResolverOrder(), a.getResolverConfig())
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -727,11 +677,11 @@ func (a *App) getResolverOrder() []string {
|
|||
order := a.config().GetStringSlice(cfgResolveOrder)
|
||||
if a.config().GetString(cfgRPCEndpoint) == "" {
|
||||
order = remove(order, resolver.NNSResolver)
|
||||
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided, logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided)
|
||||
}
|
||||
|
||||
if len(order) == 0 {
|
||||
a.log.Info(logs.ContainerResolverWillBeDisabled, logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.ContainerResolverWillBeDisabled)
|
||||
}
|
||||
|
||||
return order
|
||||
|
@ -754,13 +704,13 @@ func (a *App) initTracing(ctx context.Context) {
|
|||
if trustedCa := a.config().GetString(cfgTracingTrustedCa); trustedCa != "" {
|
||||
caBytes, err := os.ReadFile(trustedCa)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||
return
|
||||
}
|
||||
certPool := x509.NewCertPool()
|
||||
ok := certPool.AppendCertsFromPEM(caBytes)
|
||||
if !ok {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"))
|
||||
return
|
||||
}
|
||||
cfg.ServerCaCertPool = certPool
|
||||
|
@ -768,17 +718,17 @@ func (a *App) initTracing(ctx context.Context) {
|
|||
|
||||
attributes, err := fetchTracingAttributes(a.config())
|
||||
if err != nil {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||
return
|
||||
}
|
||||
cfg.Attributes = attributes
|
||||
|
||||
updated, err := tracing.Setup(ctx, cfg)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||
}
|
||||
if updated {
|
||||
a.log.Info(logs.TracingConfigUpdated, logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.TracingConfigUpdated)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -788,7 +738,7 @@ func (a *App) shutdownTracing() {
|
|||
defer cancel()
|
||||
|
||||
if err := tracing.Shutdown(shdnCtx); err != nil {
|
||||
a.log.Warn(logs.FailedToShutdownTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToShutdownTracing, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -805,7 +755,7 @@ func newMaxClients(cfg *viper.Viper) maxClientsConfig {
|
|||
func getDialerSource(logger *zap.Logger, cfg *viper.Viper) *internalnet.DialerSource {
|
||||
source, err := internalnet.NewDialerSource(fetchMultinetConfig(cfg, logger))
|
||||
if err != nil {
|
||||
logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err))
|
||||
}
|
||||
return source
|
||||
}
|
||||
|
@ -817,12 +767,12 @@ func (a *App) initPools(ctx context.Context) {
|
|||
password := wallet.GetPassword(a.config(), cfgWalletPassphrase)
|
||||
key, err := wallet.GetKeyFromPath(a.config().GetString(cfgWalletPath), a.config().GetString(cfgWalletAddress), password)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err))
|
||||
}
|
||||
|
||||
prm.SetKey(&key.PrivateKey)
|
||||
prmTree.SetKey(key)
|
||||
a.log.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())), logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())))
|
||||
|
||||
for _, peer := range fetchPeers(a.log, a.config()) {
|
||||
prm.AddNode(peer)
|
||||
|
@ -833,8 +783,9 @@ func (a *App) initPools(ctx context.Context) {
|
|||
prm.SetNodeDialTimeout(connTimeout)
|
||||
prmTree.SetNodeDialTimeout(connTimeout)
|
||||
|
||||
prm.SetNodeStreamTimeout(fetchStreamTimeout(a.config(), cfgStreamTimeout))
|
||||
prmTree.SetNodeStreamTimeout(fetchStreamTimeout(a.config(), cfgTreeStreamTimeout))
|
||||
streamTimeout := fetchStreamTimeout(a.config())
|
||||
prm.SetNodeStreamTimeout(streamTimeout)
|
||||
prmTree.SetNodeStreamTimeout(streamTimeout)
|
||||
|
||||
healthCheckTimeout := fetchHealthCheckTimeout(a.config())
|
||||
prm.SetHealthcheckTimeout(healthCheckTimeout)
|
||||
|
@ -849,8 +800,8 @@ func (a *App) initPools(ctx context.Context) {
|
|||
|
||||
prm.SetGracefulCloseOnSwitchTimeout(fetchSetGracefulCloseOnSwitchTimeout(a.config()))
|
||||
|
||||
prm.SetLogger(a.log.With(logs.TagField(logs.TagDatapath)))
|
||||
prmTree.SetLogger(a.log.With(logs.TagField(logs.TagDatapath)))
|
||||
prm.SetLogger(a.log)
|
||||
prmTree.SetLogger(a.log)
|
||||
|
||||
prmTree.SetMaxRequestAttempts(a.config().GetInt(cfgTreePoolMaxAttempts))
|
||||
|
||||
|
@ -864,11 +815,11 @@ func (a *App) initPools(ctx context.Context) {
|
|||
|
||||
p, err := pool.NewPool(prm)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err))
|
||||
}
|
||||
|
||||
if err = p.Dial(ctx); err != nil {
|
||||
a.log.Fatal(logs.FailedToDialConnectionPool, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.FailedToDialConnectionPool, zap.Error(err))
|
||||
}
|
||||
|
||||
if a.config().GetBool(cfgTreePoolNetmapSupport) {
|
||||
|
@ -877,10 +828,10 @@ func (a *App) initPools(ctx context.Context) {
|
|||
|
||||
treePool, err := treepool.NewPool(prmTree)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.FailedToCreateTreePool, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.FailedToCreateTreePool, zap.Error(err))
|
||||
}
|
||||
if err = treePool.Dial(ctx); err != nil {
|
||||
a.log.Fatal(logs.FailedToDialTreePool, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.FailedToDialTreePool, zap.Error(err))
|
||||
}
|
||||
|
||||
a.treePool = treePool
|
||||
|
@ -906,7 +857,6 @@ func (a *App) Wait() {
|
|||
a.log.Info(logs.ApplicationStarted,
|
||||
zap.String("name", "frostfs-s3-gw"),
|
||||
zap.String("version", version.Version),
|
||||
logs.TagField(logs.TagApp),
|
||||
)
|
||||
|
||||
a.metrics.State().SetVersion(version.Version)
|
||||
|
@ -914,7 +864,7 @@ func (a *App) Wait() {
|
|||
|
||||
<-a.webDone // wait for web-server to be stopped
|
||||
|
||||
a.log.Info(logs.ApplicationFinished, logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.ApplicationFinished)
|
||||
}
|
||||
|
||||
func (a *App) setHealthStatus() {
|
||||
|
@ -960,11 +910,11 @@ func (a *App) Serve(ctx context.Context) {
|
|||
|
||||
for i := range servs {
|
||||
go func(i int) {
|
||||
a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()), logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()))
|
||||
|
||||
if err := srv.Serve(servs[i].Listener()); err != nil && err != http.ErrServerClosed {
|
||||
a.metrics.MarkUnhealthy(servs[i].Address())
|
||||
a.log.Fatal(logs.ListenAndServe, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.ListenAndServe, zap.Error(err))
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
@ -989,7 +939,7 @@ LOOP:
|
|||
ctx, cancel := shutdownContext()
|
||||
defer cancel()
|
||||
|
||||
a.log.Info(logs.StoppingServer, zap.Error(srv.Shutdown(ctx)), logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.StoppingServer, zap.Error(srv.Shutdown(ctx)))
|
||||
|
||||
a.metrics.Shutdown()
|
||||
a.stopServices()
|
||||
|
@ -1003,23 +953,23 @@ func shutdownContext() (context.Context, context.CancelFunc) {
|
|||
}
|
||||
|
||||
func (a *App) configReload(ctx context.Context) {
|
||||
a.log.Info(logs.SIGHUPConfigReloadStarted, logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.SIGHUPConfigReloadStarted)
|
||||
|
||||
if !a.config().IsSet(cmdConfig) && !a.config().IsSet(cmdConfigDir) {
|
||||
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed, logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed)
|
||||
return
|
||||
}
|
||||
if err := a.cfg.reload(); err != nil {
|
||||
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := a.bucketResolver.UpdateResolvers(a.getResolverOrder()); err != nil {
|
||||
a.log.Warn(logs.FailedToReloadResolvers, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToReloadResolvers, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := a.updateServers(); err != nil {
|
||||
a.log.Warn(logs.FailedToReloadServerParameters, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToReloadServerParameters, zap.Error(err))
|
||||
}
|
||||
|
||||
a.setRuntimeParameters()
|
||||
|
@ -1033,22 +983,18 @@ func (a *App) configReload(ctx context.Context) {
|
|||
a.initTracing(ctx)
|
||||
a.setHealthStatus()
|
||||
|
||||
a.log.Info(logs.SIGHUPConfigReloadCompleted, logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.SIGHUPConfigReloadCompleted)
|
||||
}
|
||||
|
||||
func (a *App) updateSettings() {
|
||||
if lvl, err := getLogLevel(a.config()); err != nil {
|
||||
a.log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err))
|
||||
} else {
|
||||
a.settings.logLevel.SetLevel(lvl)
|
||||
}
|
||||
|
||||
if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.config(), a.log)); err != nil {
|
||||
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
}
|
||||
|
||||
if err := a.settings.tagsConfig.update(a.config()); err != nil {
|
||||
a.log.Warn(logs.TagsLogConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err))
|
||||
}
|
||||
|
||||
a.settings.update(a.config(), a.log)
|
||||
|
@ -1079,17 +1025,17 @@ func (a *App) initServers(ctx context.Context) {
|
|||
if err != nil {
|
||||
a.unbindServers = append(a.unbindServers, serverInfo)
|
||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||
a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err), logs.TagField(logs.TagApp))...)
|
||||
a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err))...)
|
||||
continue
|
||||
}
|
||||
a.metrics.MarkHealthy(serverInfo.Address)
|
||||
|
||||
a.servers = append(a.servers, srv)
|
||||
a.log.Info(logs.AddServer, append(fields, logs.TagField(logs.TagApp))...)
|
||||
a.log.Info(logs.AddServer, fields...)
|
||||
}
|
||||
|
||||
if len(a.servers) == 0 {
|
||||
a.log.Fatal(logs.NoHealthyServers, logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.NoHealthyServers)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1193,7 +1139,7 @@ func (a *App) initHandler() {
|
|||
|
||||
a.api, err = handler.New(a.log, a.obj, a.settings, a.policyStorage, a.frostfsid)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotInitializeAPIHandler, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.CouldNotInitializeAPIHandler, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1225,7 +1171,7 @@ func (a *App) getServers() []Server {
|
|||
func (a *App) setRuntimeParameters() {
|
||||
if len(os.Getenv("GOMEMLIMIT")) != 0 {
|
||||
// default limit < yaml limit < app env limit < GOMEMLIMIT
|
||||
a.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT, logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1234,9 +1180,7 @@ func (a *App) setRuntimeParameters() {
|
|||
if softMemoryLimit != previous {
|
||||
a.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
|
||||
zap.Int64("new_value", softMemoryLimit),
|
||||
zap.Int64("old_value", previous),
|
||||
logs.TagField(logs.TagApp),
|
||||
)
|
||||
zap.Int64("old_value", previous))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1262,7 +1206,7 @@ func (a *App) tryReconnect(ctx context.Context, sr *http.Server) bool {
|
|||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
a.log.Info(logs.ServerReconnecting, logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.ServerReconnecting)
|
||||
var failedServers []ServerInfo
|
||||
|
||||
for _, serverInfo := range a.unbindServers {
|
||||
|
@ -1273,23 +1217,23 @@ func (a *App) tryReconnect(ctx context.Context, sr *http.Server) bool {
|
|||
|
||||
srv, err := newServer(ctx, serverInfo)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.ServerReconnectFailed, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.ServerReconnectFailed, zap.Error(err))
|
||||
failedServers = append(failedServers, serverInfo)
|
||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||
continue
|
||||
}
|
||||
|
||||
go func() {
|
||||
a.log.Info(logs.StartingServer, zap.String("address", srv.Address()), logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.StartingServer, zap.String("address", srv.Address()))
|
||||
a.metrics.MarkHealthy(serverInfo.Address)
|
||||
if err = sr.Serve(srv.Listener()); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
a.log.Warn(logs.ListenAndServe, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.ListenAndServe, zap.Error(err))
|
||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||
}
|
||||
}()
|
||||
|
||||
a.servers = append(a.servers, srv)
|
||||
a.log.Info(logs.ServerReconnectedSuccessfully, append(fields, logs.TagField(logs.TagApp))...)
|
||||
a.log.Info(logs.ServerReconnectedSuccessfully, fields...)
|
||||
}
|
||||
|
||||
a.unbindServers = failedServers
|
||||
|
|
|
@ -21,13 +21,20 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"git.frostfs.info/TrueCloudLab/zapjournald"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/ssgreg/journald"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
const wildcardPlaceholder = "<wildcard>"
|
||||
const (
|
||||
destinationStdout = "stdout"
|
||||
destinationJournald = "journald"
|
||||
|
||||
wildcardPlaceholder = "<wildcard>"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultRebalanceInterval = 60 * time.Second
|
||||
|
@ -82,8 +89,7 @@ var (
|
|||
defaultDefaultNamespaces = []string{"", "root"}
|
||||
)
|
||||
|
||||
// Settings.
|
||||
const (
|
||||
const ( // Settings.
|
||||
// Logger.
|
||||
cfgLoggerLevel = "logger.level"
|
||||
cfgLoggerDestination = "logger.destination"
|
||||
|
@ -93,11 +99,6 @@ const (
|
|||
cfgLoggerSamplingThereafter = "logger.sampling.thereafter"
|
||||
cfgLoggerSamplingInterval = "logger.sampling.interval"
|
||||
|
||||
cfgLoggerTags = "logger.tags"
|
||||
cfgLoggerTagsPrefixTmpl = cfgLoggerTags + ".%d."
|
||||
cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "name"
|
||||
cfgLoggerTagsLevelTmpl = cfgLoggerTagsPrefixTmpl + "level"
|
||||
|
||||
// HttpLogging.
|
||||
cfgHTTPLoggingEnabled = "http_logging.enabled"
|
||||
cfgHTTPLoggingMaxBody = "http_logging.max_body"
|
||||
|
@ -122,7 +123,6 @@ const (
|
|||
// Pool config.
|
||||
cfgConnectTimeout = "connect_timeout"
|
||||
cfgStreamTimeout = "stream_timeout"
|
||||
cfgTreeStreamTimeout = "tree_stream_timeout"
|
||||
cfgHealthcheckTimeout = "healthcheck_timeout"
|
||||
cfgRebalanceInterval = "rebalance_interval"
|
||||
cfgPoolErrorThreshold = "pool_error_threshold"
|
||||
|
@ -363,8 +363,8 @@ func fetchReconnectInterval(cfg *viper.Viper) time.Duration {
|
|||
return reconnect
|
||||
}
|
||||
|
||||
func fetchStreamTimeout(cfg *viper.Viper, cfgEntry string) time.Duration {
|
||||
streamTimeout := cfg.GetDuration(cfgEntry)
|
||||
func fetchStreamTimeout(cfg *viper.Viper) time.Duration {
|
||||
streamTimeout := cfg.GetDuration(cfgStreamTimeout)
|
||||
if streamTimeout <= 0 {
|
||||
streamTimeout = defaultStreamTimeout
|
||||
}
|
||||
|
@ -469,18 +469,14 @@ func fetchDefaultPolicy(l *zap.Logger, cfg *viper.Viper) netmap.PlacementPolicy
|
|||
policyStr := cfg.GetString(cfgPolicyDefault)
|
||||
if err := policy.DecodeString(policyStr); err != nil {
|
||||
l.Warn(logs.FailedToParseDefaultLocationConstraint,
|
||||
zap.String("policy", policyStr), zap.String("default", defaultPlacementPolicy),
|
||||
zap.Error(err), logs.TagField(logs.TagApp))
|
||||
zap.String("policy", policyStr), zap.String("default", defaultPlacementPolicy), zap.Error(err))
|
||||
} else {
|
||||
return policy
|
||||
}
|
||||
}
|
||||
|
||||
if err := policy.DecodeString(defaultPlacementPolicy); err != nil {
|
||||
l.Fatal(logs.FailedToParseDefaultDefaultLocationConstraint,
|
||||
zap.String("policy", defaultPlacementPolicy),
|
||||
logs.TagField(logs.TagApp),
|
||||
)
|
||||
l.Fatal(logs.FailedToParseDefaultDefaultLocationConstraint, zap.String("policy", defaultPlacementPolicy))
|
||||
}
|
||||
|
||||
return policy
|
||||
|
@ -493,9 +489,7 @@ func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultV
|
|||
l.Error(logs.InvalidLifetimeUsingDefaultValue,
|
||||
zap.String("parameter", cfgEntry),
|
||||
zap.Duration("value in config", lifetime),
|
||||
zap.Duration("default", defaultValue),
|
||||
logs.TagField(logs.TagApp),
|
||||
)
|
||||
zap.Duration("default", defaultValue))
|
||||
} else {
|
||||
return lifetime
|
||||
}
|
||||
|
@ -511,9 +505,7 @@ func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue
|
|||
l.Error(logs.InvalidCacheSizeUsingDefaultValue,
|
||||
zap.String("parameter", cfgEntry),
|
||||
zap.Int("value in config", size),
|
||||
zap.Int("default", defaultValue),
|
||||
logs.TagField(logs.TagApp),
|
||||
)
|
||||
zap.Int("default", defaultValue))
|
||||
} else {
|
||||
return size
|
||||
}
|
||||
|
@ -535,8 +527,7 @@ func fetchRemovingCheckInterval(v *viper.Viper, l *zap.Logger) time.Duration {
|
|||
l.Error(logs.InvalidAccessBoxCacheRemovingCheckInterval,
|
||||
zap.String("parameter", cfgAccessBoxCacheRemovingCheckInterval),
|
||||
zap.Duration("value in config", duration),
|
||||
zap.Duration("default", defaultAccessBoxCacheRemovingCheckInterval),
|
||||
logs.TagField(logs.TagApp))
|
||||
zap.Duration("default", defaultAccessBoxCacheRemovingCheckInterval))
|
||||
|
||||
return defaultAccessBoxCacheRemovingCheckInterval
|
||||
}
|
||||
|
@ -550,9 +541,7 @@ func fetchDefaultMaxAge(cfg *viper.Viper, l *zap.Logger) int {
|
|||
if defaultMaxAge <= 0 && defaultMaxAge != -1 {
|
||||
l.Fatal(logs.InvalidDefaultMaxAge,
|
||||
zap.String("parameter", cfgDefaultMaxAge),
|
||||
zap.String("value in config", strconv.Itoa(defaultMaxAge)),
|
||||
logs.TagField(logs.TagApp),
|
||||
)
|
||||
zap.String("value in config", strconv.Itoa(defaultMaxAge)))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -563,19 +552,14 @@ func fetchRegionMappingPolicies(l *zap.Logger, cfg *viper.Viper) map[string]netm
|
|||
filepath := cfg.GetString(cfgPolicyRegionMapFile)
|
||||
regionPolicyMap, err := readRegionMap(filepath)
|
||||
if err != nil {
|
||||
l.Warn(logs.FailedToReadRegionMapFilePolicies,
|
||||
zap.String("file", filepath),
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagApp))
|
||||
l.Warn(logs.FailedToReadRegionMapFilePolicies, zap.String("file", filepath), zap.Error(err))
|
||||
return make(map[string]netmap.PlacementPolicy)
|
||||
}
|
||||
|
||||
regionMap := make(map[string]netmap.PlacementPolicy, len(regionPolicyMap))
|
||||
for region, policy := range regionPolicyMap {
|
||||
if region == api.DefaultLocationConstraint {
|
||||
l.Warn(logs.DefaultLocationConstraintCantBeOverriden,
|
||||
zap.String("policy", policy),
|
||||
logs.TagField(logs.TagApp))
|
||||
l.Warn(logs.DefaultLocationConstraintCantBeOverriden, zap.String("policy", policy))
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -590,10 +574,7 @@ func fetchRegionMappingPolicies(l *zap.Logger, cfg *viper.Viper) map[string]netm
|
|||
continue
|
||||
}
|
||||
|
||||
l.Warn(logs.FailedToParseLocationConstraint,
|
||||
zap.String("region", region),
|
||||
zap.String("policy", policy),
|
||||
logs.TagField(logs.TagApp))
|
||||
l.Warn(logs.FailedToParseLocationConstraint, zap.String("region", region), zap.String("policy", policy))
|
||||
}
|
||||
|
||||
return regionMap
|
||||
|
@ -625,11 +606,7 @@ func fetchDefaultCopiesNumbers(l *zap.Logger, v *viper.Viper) []uint32 {
|
|||
parsedValue, err := strconv.ParseUint(unparsed[i], 10, 32)
|
||||
if err != nil {
|
||||
l.Warn(logs.FailedToParseDefaultCopiesNumbers,
|
||||
zap.Strings("copies numbers", unparsed),
|
||||
zap.Uint32s("default", defaultCopiesNumbers),
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagApp),
|
||||
)
|
||||
zap.Strings("copies numbers", unparsed), zap.Uint32s("default", defaultCopiesNumbers), zap.Error(err))
|
||||
return defaultCopiesNumbers
|
||||
}
|
||||
result[i] = uint32(parsedValue)
|
||||
|
@ -685,17 +662,15 @@ func fetchCopiesNumbers(l *zap.Logger, v *viper.Viper) map[string][]uint32 {
|
|||
for j := range vector {
|
||||
parsedValue, err := strconv.ParseUint(vector[j], 10, 32)
|
||||
if err != nil {
|
||||
l.Warn(logs.FailedToParseCopiesNumbers,
|
||||
zap.String("location", constraint),
|
||||
zap.Strings("copies numbers", vector), zap.Error(err),
|
||||
logs.TagField(logs.TagApp))
|
||||
l.Warn(logs.FailedToParseCopiesNumbers, zap.String("location", constraint),
|
||||
zap.Strings("copies numbers", vector), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
vector32[j] = uint32(parsedValue)
|
||||
}
|
||||
|
||||
copiesNums[constraint] = vector32
|
||||
l.Info(logs.ConstraintAdded, zap.String("location", constraint), zap.Strings("copies numbers", vector), logs.TagField(logs.TagApp))
|
||||
l.Info(logs.ConstraintAdded, zap.String("location", constraint), zap.Strings("copies numbers", vector))
|
||||
}
|
||||
return copiesNums
|
||||
}
|
||||
|
@ -704,9 +679,7 @@ func fetchDefaultNamespaces(l *zap.Logger, v *viper.Viper) []string {
|
|||
defaultNamespaces := v.GetStringSlice(cfgKludgeDefaultNamespaces)
|
||||
if len(defaultNamespaces) == 0 {
|
||||
defaultNamespaces = defaultDefaultNamespaces
|
||||
l.Warn(logs.DefaultNamespacesCannotBeEmpty,
|
||||
zap.Strings("namespaces", defaultNamespaces),
|
||||
logs.TagField(logs.TagApp))
|
||||
l.Warn(logs.DefaultNamespacesCannotBeEmpty, zap.Strings("namespaces", defaultNamespaces))
|
||||
}
|
||||
|
||||
for i := range defaultNamespaces { // to be set namespaces in env variable as `S3_GW_KLUDGE_DEFAULT_NAMESPACES="" 'root'`
|
||||
|
@ -730,7 +703,7 @@ func fetchNamespacesConfig(l *zap.Logger, v *viper.Viper) (NamespacesConfig, []s
|
|||
|
||||
nsConfig, err := readNamespacesConfig(v.GetString(cfgNamespacesConfig))
|
||||
if err != nil {
|
||||
l.Warn(logs.FailedToParseNamespacesConfig, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
l.Warn(logs.FailedToParseNamespacesConfig, zap.Error(err))
|
||||
}
|
||||
|
||||
defaultNamespacesNames := fetchDefaultNamespaces(l, v)
|
||||
|
@ -744,13 +717,11 @@ func fetchNamespacesConfig(l *zap.Logger, v *viper.Viper) (NamespacesConfig, []s
|
|||
}
|
||||
|
||||
if len(overrideDefaults) > 0 {
|
||||
l.Warn(logs.DefaultNamespaceConfigValuesBeOverwritten, logs.TagField(logs.TagApp))
|
||||
l.Warn(logs.DefaultNamespaceConfigValuesBeOverwritten)
|
||||
defaultNSValue.LocationConstraints = overrideDefaults[0].LocationConstraints
|
||||
defaultNSValue.CopiesNumbers = overrideDefaults[0].CopiesNumbers
|
||||
if len(overrideDefaults) > 1 {
|
||||
l.Warn(logs.MultipleDefaultOverridesFound,
|
||||
zap.String("name", overrideDefaults[0].Name),
|
||||
logs.TagField(logs.TagApp))
|
||||
l.Warn(logs.MultipleDefaultOverridesFound, zap.String("name", overrideDefaults[0].Name))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -793,7 +764,7 @@ func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
|||
priority := v.GetInt(key + "priority")
|
||||
|
||||
if address == "" {
|
||||
l.Warn(logs.SkipEmptyAddress, logs.TagField(logs.TagApp))
|
||||
l.Warn(logs.SkipEmptyAddress)
|
||||
break
|
||||
}
|
||||
if weight <= 0 { // unspecified or wrong
|
||||
|
@ -808,9 +779,7 @@ func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
|||
l.Info(logs.AddedStoragePeer,
|
||||
zap.Int("priority", priority),
|
||||
zap.String("address", address),
|
||||
zap.Float64("weight", weight),
|
||||
logs.TagField(logs.TagApp),
|
||||
)
|
||||
zap.Float64("weight", weight))
|
||||
}
|
||||
|
||||
return nodes
|
||||
|
@ -834,7 +803,7 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
|||
}
|
||||
|
||||
if _, ok := seen[serverInfo.Address]; ok {
|
||||
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address), logs.TagField(logs.TagApp))
|
||||
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address))
|
||||
continue
|
||||
}
|
||||
seen[serverInfo.Address] = struct{}{}
|
||||
|
@ -863,13 +832,13 @@ func fetchVHSNamespaces(v *viper.Viper, log *zap.Logger) map[string]bool {
|
|||
nsMap := v.GetStringMap(cfgVHSNamespaces)
|
||||
for ns, val := range nsMap {
|
||||
if _, ok := vhsNamespacesEnabled[ns]; ok {
|
||||
log.Warn(logs.WarnDuplicateNamespaceVHS, zap.String("namespace", ns), logs.TagField(logs.TagApp))
|
||||
log.Warn(logs.WarnDuplicateNamespaceVHS, zap.String("namespace", ns))
|
||||
continue
|
||||
}
|
||||
|
||||
enabledFlag, ok := val.(bool)
|
||||
if !ok {
|
||||
log.Warn(logs.WarnValueVHSEnabledFlagWrongType, zap.String("namespace", ns), logs.TagField(logs.TagApp))
|
||||
log.Warn(logs.WarnValueVHSEnabledFlagWrongType, zap.String("namespace", ns))
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -953,41 +922,6 @@ func fetchTombstoneWorkerPoolSize(v *viper.Viper) int {
|
|||
return tombstoneWorkerPoolSize
|
||||
}
|
||||
|
||||
func fetchLogTagsConfig(v *viper.Viper) (map[string]zapcore.Level, error) {
|
||||
res := make(map[string]zapcore.Level)
|
||||
|
||||
defaultLevel := v.GetString(cfgLoggerLevel)
|
||||
var defaultLvl zapcore.Level
|
||||
if err := defaultLvl.Set(defaultLevel); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse log level, unknown level: '%s'", defaultLevel)
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
name := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i))
|
||||
if name == "" {
|
||||
break
|
||||
}
|
||||
|
||||
lvl := defaultLvl
|
||||
level := v.GetString(fmt.Sprintf(cfgLoggerTagsLevelTmpl, i))
|
||||
if level != "" {
|
||||
if err := lvl.Set(level); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse log tags config, unknown level: '%s'", level)
|
||||
}
|
||||
}
|
||||
|
||||
res[name] = lvl
|
||||
}
|
||||
|
||||
if len(res) == 0 && !v.IsSet(cfgLoggerTags) {
|
||||
for _, tag := range defaultTags {
|
||||
res[tag] = defaultLvl
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func newViper(flags *pflag.FlagSet) (*viper.Viper, error) {
|
||||
v := viper.New()
|
||||
|
||||
|
@ -1298,19 +1232,129 @@ type LoggerAppSettings interface {
|
|||
DroppedLogsInc()
|
||||
}
|
||||
|
||||
func pickLogger(v *viper.Viper, settings LoggerAppSettings) *Logger {
|
||||
lvl, err := getLogLevel(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
dest := v.GetString(cfgLoggerDestination)
|
||||
|
||||
switch dest {
|
||||
case destinationStdout:
|
||||
return newStdoutLogger(v, lvl, settings)
|
||||
case destinationJournald:
|
||||
return newJournaldLogger(v, lvl, settings)
|
||||
default:
|
||||
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
||||
}
|
||||
}
|
||||
|
||||
// newStdoutLogger constructs a Logger instance for the current application.
|
||||
// Panics on failure.
|
||||
//
|
||||
// Logger contains a logger is built from zap's production logging configuration with:
|
||||
// - parameterized level (debug by default)
|
||||
// - console encoding
|
||||
// - ISO8601 time encoding
|
||||
// - sampling intervals
|
||||
//
|
||||
// and atomic log level to dynamically change it.
|
||||
//
|
||||
// Logger records a stack trace for all messages at or above fatal level.
|
||||
//
|
||||
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
||||
func newStdoutLogger(v *viper.Viper, lvl zapcore.Level, settings LoggerAppSettings) *Logger {
|
||||
stdout := zapcore.AddSync(os.Stderr)
|
||||
level := zap.NewAtomicLevelAt(lvl)
|
||||
|
||||
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, level)
|
||||
|
||||
consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, settings)
|
||||
|
||||
return &Logger{
|
||||
logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
||||
lvl: level,
|
||||
}
|
||||
}
|
||||
|
||||
func newJournaldLogger(v *viper.Viper, lvl zapcore.Level, settings LoggerAppSettings) *Logger {
|
||||
level := zap.NewAtomicLevelAt(lvl)
|
||||
|
||||
encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields)
|
||||
|
||||
core := zapjournald.NewCore(level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||
coreWithContext := core.With([]zapcore.Field{
|
||||
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
||||
zapjournald.SyslogIdentifier(),
|
||||
zapjournald.SyslogPid(),
|
||||
})
|
||||
|
||||
coreWithContext = applyZapCoreMiddlewares(coreWithContext, v, settings)
|
||||
|
||||
l := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
|
||||
|
||||
return &Logger{
|
||||
logger: l,
|
||||
lvl: level,
|
||||
}
|
||||
}
|
||||
|
||||
func newLogEncoder() zapcore.Encoder {
|
||||
c := zap.NewProductionEncoderConfig()
|
||||
c.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
return zapcore.NewConsoleEncoder(c)
|
||||
}
|
||||
|
||||
func applyZapCoreMiddlewares(core zapcore.Core, v *viper.Viper, settings LoggerAppSettings) zapcore.Core {
|
||||
if v.GetBool(cfgLoggerSamplingEnabled) {
|
||||
core = zapcore.NewSamplerWithOptions(core,
|
||||
v.GetDuration(cfgLoggerSamplingInterval),
|
||||
v.GetInt(cfgLoggerSamplingInitial),
|
||||
v.GetInt(cfgLoggerSamplingThereafter),
|
||||
zapcore.SamplerHook(func(_ zapcore.Entry, dec zapcore.SamplingDecision) {
|
||||
if dec&zapcore.LogDropped > 0 {
|
||||
settings.DroppedLogsInc()
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
return core
|
||||
}
|
||||
|
||||
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||
var lvl zapcore.Level
|
||||
lvlStr := v.GetString(cfgLoggerLevel)
|
||||
err := lvl.UnmarshalText([]byte(lvlStr))
|
||||
if err != nil {
|
||||
return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
|
||||
"value should be one of %v", lvlStr, err, [...]zapcore.Level{
|
||||
zapcore.DebugLevel,
|
||||
zapcore.InfoLevel,
|
||||
zapcore.WarnLevel,
|
||||
zapcore.ErrorLevel,
|
||||
zapcore.DPanicLevel,
|
||||
zapcore.PanicLevel,
|
||||
zapcore.FatalLevel,
|
||||
})
|
||||
}
|
||||
return lvl, nil
|
||||
}
|
||||
|
||||
func validateDomains(domains []string, log *zap.Logger) []string {
|
||||
validDomains := make([]string, 0, len(domains))
|
||||
LOOP:
|
||||
for _, domain := range domains {
|
||||
if strings.Contains(domain, ":") {
|
||||
log.Warn(logs.WarnDomainContainsPort, zap.String("domain", domain), logs.TagField(logs.TagApp))
|
||||
log.Warn(logs.WarnDomainContainsPort, zap.String("domain", domain))
|
||||
continue
|
||||
}
|
||||
|
||||
domainParts := strings.Split(domain, ".")
|
||||
for _, part := range domainParts {
|
||||
if strings.ContainsAny(part, "<>") && part != wildcardPlaceholder {
|
||||
log.Warn(logs.WarnDomainContainsInvalidPlaceholder, zap.String("domain", domain), logs.TagField(logs.TagApp))
|
||||
log.Warn(logs.WarnDomainContainsInvalidPlaceholder, zap.String("domain", domain))
|
||||
continue LOOP
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,210 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/zapjournald"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/ssgreg/journald"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
const (
|
||||
destinationStdout string = "stdout"
|
||||
destinationJournald string = "journald"
|
||||
)
|
||||
|
||||
var defaultTags = []string{logs.TagApp, logs.TagDatapath, logs.TagExternalStorage, logs.TagExternalStorageTree, logs.TagExternalBlockchain}
|
||||
|
||||
type Logger struct {
|
||||
logger *zap.Logger
|
||||
lvl zap.AtomicLevel
|
||||
}
|
||||
|
||||
func pickLogger(v *viper.Viper, loggerSettings LoggerAppSettings, tagSettings TagFilterSettings) *Logger {
|
||||
dest := v.GetString(cfgLoggerDestination)
|
||||
|
||||
switch dest {
|
||||
case destinationStdout:
|
||||
return newStdoutLogger(v, loggerSettings, tagSettings)
|
||||
case destinationJournald:
|
||||
return newJournaldLogger(v, loggerSettings, tagSettings)
|
||||
default:
|
||||
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
||||
}
|
||||
}
|
||||
|
||||
// newStdoutLogger constructs a Logger instance for the current application.
|
||||
// Panics on failure.
|
||||
//
|
||||
// Logger contains a logger is built from zap's production logging configuration with:
|
||||
// - parameterized level (debug by default)
|
||||
// - console encoding
|
||||
// - ISO8601 time encoding
|
||||
// - sampling intervals
|
||||
//
|
||||
// and atomic log level to dynamically change it.
|
||||
//
|
||||
// Logger records a stack trace for all messages at or above fatal level.
|
||||
//
|
||||
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
||||
func newStdoutLogger(v *viper.Viper, loggerSettings LoggerAppSettings, tagSettings TagFilterSettings) *Logger {
|
||||
c := newZapLogConfig(v)
|
||||
|
||||
out, errSink, err := openZapSinks(c)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("open zap sinks: %v", err.Error()))
|
||||
}
|
||||
|
||||
core := zapcore.NewCore(zapcore.NewConsoleEncoder(c.EncoderConfig), out, c.Level)
|
||||
core = applyZapCoreMiddlewares(core, v, loggerSettings, tagSettings)
|
||||
l := zap.New(core, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.ErrorOutput(errSink))
|
||||
|
||||
return &Logger{logger: l, lvl: c.Level}
|
||||
}
|
||||
|
||||
func newJournaldLogger(v *viper.Viper, logSettings LoggerAppSettings, tagSettings TagFilterSettings) *Logger {
|
||||
c := newZapLogConfig(v)
|
||||
|
||||
// We can use NewJSONEncoder instead if, say, frontend
|
||||
// would like to access journald logs and parse them easily.
|
||||
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
|
||||
|
||||
journalCore := zapjournald.NewCore(c.Level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||
core := journalCore.With([]zapcore.Field{
|
||||
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
||||
zapjournald.SyslogIdentifier(),
|
||||
zapjournald.SyslogPid(),
|
||||
})
|
||||
core = applyZapCoreMiddlewares(core, v, logSettings, tagSettings)
|
||||
l := zap.New(core, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
|
||||
return &Logger{logger: l, lvl: c.Level}
|
||||
}
|
||||
|
||||
func openZapSinks(cfg zap.Config) (zapcore.WriteSyncer, zapcore.WriteSyncer, error) {
|
||||
sink, closeOut, err := zap.Open(cfg.OutputPaths...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
errSink, _, err := zap.Open(cfg.ErrorOutputPaths...)
|
||||
if err != nil {
|
||||
closeOut()
|
||||
return nil, nil, err
|
||||
}
|
||||
return sink, errSink, nil
|
||||
}
|
||||
|
||||
var _ zapcore.Core = (*zapCoreTagFilterWrapper)(nil)
|
||||
|
||||
type zapCoreTagFilterWrapper struct {
|
||||
core zapcore.Core
|
||||
settings TagFilterSettings
|
||||
extra []zap.Field
|
||||
}
|
||||
|
||||
type TagFilterSettings interface {
|
||||
LevelEnabled(tag string, lvl zapcore.Level) bool
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) Enabled(level zapcore.Level) bool {
|
||||
return c.core.Enabled(level)
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) With(fields []zapcore.Field) zapcore.Core {
|
||||
return &zapCoreTagFilterWrapper{
|
||||
core: c.core.With(fields),
|
||||
settings: c.settings,
|
||||
extra: append(c.extra, fields...),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) Check(entry zapcore.Entry, checked *zapcore.CheckedEntry) *zapcore.CheckedEntry {
|
||||
if c.core.Enabled(entry.Level) {
|
||||
return checked.AddCore(entry, c)
|
||||
}
|
||||
return checked
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) Write(entry zapcore.Entry, fields []zapcore.Field) error {
|
||||
if c.shouldSkip(entry, fields) || c.shouldSkip(entry, c.extra) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.core.Write(entry, fields)
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field) bool {
|
||||
for _, field := range fields {
|
||||
if field.Key == logs.TagFieldName && field.Type == zapcore.StringType {
|
||||
if !c.settings.LevelEnabled(field.String, entry.Level) {
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) Sync() error {
|
||||
return c.core.Sync()
|
||||
}
|
||||
|
||||
func applyZapCoreMiddlewares(core zapcore.Core, v *viper.Viper, appSettings LoggerAppSettings, tagSettings TagFilterSettings) zapcore.Core {
|
||||
core = &zapCoreTagFilterWrapper{
|
||||
core: core,
|
||||
settings: tagSettings,
|
||||
}
|
||||
|
||||
if v.GetBool(cfgLoggerSamplingEnabled) {
|
||||
core = zapcore.NewSamplerWithOptions(core,
|
||||
v.GetDuration(cfgLoggerSamplingInterval),
|
||||
v.GetInt(cfgLoggerSamplingInitial),
|
||||
v.GetInt(cfgLoggerSamplingThereafter),
|
||||
zapcore.SamplerHook(func(_ zapcore.Entry, dec zapcore.SamplingDecision) {
|
||||
if dec&zapcore.LogDropped > 0 {
|
||||
appSettings.DroppedLogsInc()
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
return core
|
||||
}
|
||||
|
||||
func newZapLogConfig(v *viper.Viper) zap.Config {
|
||||
lvl, err := getLogLevel(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
c := zap.Config{
|
||||
Level: zap.NewAtomicLevelAt(lvl),
|
||||
EncoderConfig: zap.NewProductionEncoderConfig(),
|
||||
OutputPaths: []string{"stderr"},
|
||||
ErrorOutputPaths: []string{"stderr"},
|
||||
}
|
||||
c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||
var lvl zapcore.Level
|
||||
lvlStr := v.GetString(cfgLoggerLevel)
|
||||
err := lvl.UnmarshalText([]byte(lvlStr))
|
||||
if err != nil {
|
||||
return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
|
||||
"value should be one of %v", lvlStr, err, [...]zapcore.Level{
|
||||
zapcore.DebugLevel,
|
||||
zapcore.InfoLevel,
|
||||
zapcore.WarnLevel,
|
||||
zapcore.ErrorLevel,
|
||||
zapcore.DPanicLevel,
|
||||
zapcore.PanicLevel,
|
||||
zapcore.FatalLevel,
|
||||
})
|
||||
}
|
||||
return lvl, nil
|
||||
}
|
|
@ -19,24 +19,24 @@ type Service struct {
|
|||
// Start runs http service with the exposed endpoint on the configured port.
|
||||
func (ms *Service) Start() {
|
||||
if ms.enabled {
|
||||
ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp))
|
||||
ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr))
|
||||
err := ms.ListenAndServe()
|
||||
if err != nil && err != http.ErrServerClosed {
|
||||
ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort, logs.TagField(logs.TagApp))
|
||||
ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort)
|
||||
}
|
||||
} else {
|
||||
ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled, logs.TagField(logs.TagApp))
|
||||
ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled)
|
||||
}
|
||||
}
|
||||
|
||||
// ShutDown stops the service.
|
||||
func (ms *Service) ShutDown(ctx context.Context) {
|
||||
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp))
|
||||
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr))
|
||||
err := ms.Shutdown(ctx)
|
||||
if err != nil {
|
||||
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err))
|
||||
if err = ms.Close(); err != nil {
|
||||
ms.log.Panic(logs.CantShutDownService, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
ms.log.Panic(logs.CantShutDownService, zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -55,11 +55,6 @@ S3_GW_LOGGER_SAMPLING_ENABLED=false
|
|||
S3_GW_LOGGER_SAMPLING_INITIAL=100
|
||||
S3_GW_LOGGER_SAMPLING_THEREAFTER=100
|
||||
S3_GW_LOGGER_SAMPLING_INTERVAL=1s
|
||||
S3_GW_LOGGER_TAGS_0_NAME=app
|
||||
S3_GW_LOGGER_TAGS_0_LEVEL=info
|
||||
S3_GW_LOGGER_TAGS_1_NAME=datapath
|
||||
S3_GW_LOGGER_TAGS_1_LEVEL=fatal
|
||||
|
||||
|
||||
# HTTP logger
|
||||
S3_GW_HTTP_LOGGING_ENABLED=false
|
||||
|
@ -85,10 +80,8 @@ S3_GW_PROMETHEUS_ADDRESS=localhost:8086
|
|||
|
||||
# Timeout to connect to a node
|
||||
S3_GW_CONNECT_TIMEOUT=10s
|
||||
# Timeout for individual operations in object pool streaming RPC.
|
||||
# Timeout for individual operations in streaming RPC.
|
||||
S3_GW_STREAM_TIMEOUT=10s
|
||||
# Timeout for individual operations in tree pool streaming RPC.
|
||||
S3_GW_TREE_STREAM_TIMEOUT=10s
|
||||
# Timeout to check node health during rebalance.
|
||||
S3_GW_HEALTHCHECK_TIMEOUT=15s
|
||||
# Interval to check node health
|
||||
|
|
|
@ -60,12 +60,6 @@ logger:
|
|||
initial: 100
|
||||
thereafter: 100
|
||||
interval: 1s
|
||||
tags:
|
||||
- name: "app"
|
||||
level: "debug"
|
||||
- name: "datapath"
|
||||
- name: "external_storage"
|
||||
- name: "external_storage_tree"
|
||||
|
||||
# log http request data (URI, headers, query, etc)
|
||||
http_logging:
|
||||
|
@ -106,10 +100,8 @@ tracing:
|
|||
|
||||
# Timeout to connect to a node
|
||||
connect_timeout: 10s
|
||||
# Timeout for individual operations in object pool streaming RPC.
|
||||
# Timeout for individual operations in streaming RPC.
|
||||
stream_timeout: 10s
|
||||
# Timeout for individual operations in tree pool streaming RPC.
|
||||
tree_stream_timeout: 10s
|
||||
# Timeout to check node health during rebalance
|
||||
healthcheck_timeout: 15s
|
||||
# Interval to check node health
|
||||
|
|
|
@ -202,7 +202,7 @@ func (c *cred) checkIfCredentialsAreRemoved(ctx context.Context, cnrID cid.ID, a
|
|||
|
||||
func (c *cred) putBoxToCache(accessKeyID string, val *cache.AccessBoxCacheValue) {
|
||||
if err := c.cache.Put(accessKeyID, val); err != nil {
|
||||
c.log.Warn(logs.CouldntPutAccessBoxIntoCache, zap.String("accessKeyID", accessKeyID), logs.TagField(logs.TagDatapath))
|
||||
c.log.Warn(logs.CouldntPutAccessBoxIntoCache, zap.String("accessKeyID", accessKeyID))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -241,7 +241,7 @@ func (c *cred) getAccessBox(ctx context.Context, cnrID cid.ID, accessKeyID strin
|
|||
|
||||
func (c *cred) Put(ctx context.Context, prm CredentialsParam) (oid.Address, error) {
|
||||
if prm.AccessKeyID != "" {
|
||||
c.log.Info(logs.CheckCustomAccessKeyIDUniqueness, zap.String("access_key_id", prm.AccessKeyID), logs.TagField(logs.TagApp))
|
||||
c.log.Info(logs.CheckCustomAccessKeyIDUniqueness, zap.String("access_key_id", prm.AccessKeyID))
|
||||
credsPrm := PrmGetCredsObject{
|
||||
Container: prm.Container,
|
||||
AccessKeyID: prm.AccessKeyID,
|
||||
|
|
|
@ -215,7 +215,6 @@ resolve_order:
|
|||
|
||||
connect_timeout: 10s
|
||||
stream_timeout: 10s
|
||||
tree_stream_timeout: 10s
|
||||
healthcheck_timeout: 15s
|
||||
rebalance_interval: 60s
|
||||
pool_error_threshold: 100
|
||||
|
@ -238,8 +237,7 @@ source_ip_header: "Source-Ip"
|
|||
| `rpc_endpoint` | `string` | no | | The address of the RPC host to which the gateway connects to resolve bucket names and interact with frostfs contracts (required to use the `nns` resolver and `frostfsid` contract). |
|
||||
| `resolve_order` | `[]string` | yes | `[dns]` | Order of bucket name resolvers to use. Available resolvers: `dns`, `nns`. |
|
||||
| `connect_timeout` | `duration` | no | `10s` | Timeout to connect to a node. |
|
||||
| `stream_timeout` | `duration` | no | `10s` | Timeout for individual operations in object pool streaming RPC. |
|
||||
| `tree_stream_timeout` | `duration` | no | `10s` | Timeout for individual operations in tree pool streaming RPC. |
|
||||
| `stream_timeout` | `duration` | no | `10s` | Timeout for individual operations in streaming RPC. |
|
||||
| `healthcheck_timeout` | `duration` | no | `15s` | Timeout to check node health during rebalance. |
|
||||
| `rebalance_interval` | `duration` | no | `60s` | Interval to check node health. |
|
||||
| `pool_error_threshold` | `uint32` | no | `100` | The number of errors on connection after which node is considered as unhealthy. |
|
||||
|
@ -381,13 +379,6 @@ logger:
|
|||
initial: 100
|
||||
thereafter: 100
|
||||
interval: 1s
|
||||
tags:
|
||||
- name: app
|
||||
level: info
|
||||
- name: datapath
|
||||
- name: external_blockchain
|
||||
- name: external_storage_tree
|
||||
- name: external_storage
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|
@ -399,32 +390,6 @@ logger:
|
|||
| `sampling.thereafter` | `int` | no | '100' | Sampling count of entries after an `interval`. |
|
||||
| `sampling.interval` | `duration` | no | '1s' | Sampling interval of messaging similar entries. |
|
||||
|
||||
## Tags
|
||||
|
||||
There are additional log entries that can hurt performance and can be additionally logged by using `logger.tags`
|
||||
parameter.
|
||||
If section `tags` isn't set the default tags (see [Tag values](#tag-values)) be enabled.
|
||||
If section `tags` set but empty no tags be used.
|
||||
|
||||
```yaml
|
||||
tags:
|
||||
- name: "app"
|
||||
level: info
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-----------------------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------|
|
||||
| `name` | `string` | yes | | Tag name. Possible values see below in `Tag values` section. |
|
||||
| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
|
||||
|
||||
### Tag values
|
||||
|
||||
* `app` - common application logs (enabled by default).
|
||||
* `datapath` - main logic of application (enabled by default).
|
||||
* `external_blockchain` - external interaction with neo-go blockchain (enabled by default).
|
||||
* `external_storage` - external interaction with storage node (enabled by default).
|
||||
* `external_storage_tree` - external interaction with tree service in storage node (enabled by default).
|
||||
|
||||
|
||||
### `http_logging` section
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ func (f *FrostFSID) GetUserGroupIDsAndClaims(userHash util.Uint160) ([]string, m
|
|||
subj, err := f.getSubject(userHash)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
f.log.Debug(logs.UserGroupsListIsEmpty, zap.Error(err), logs.TagField(logs.TagExternalBlockchain))
|
||||
f.log.Debug(logs.UserGroupsListIsEmpty, zap.Error(err))
|
||||
return nil, nil, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
|
@ -86,7 +86,7 @@ func (f *FrostFSID) getSubject(addr util.Uint160) (*client.SubjectExtended, erro
|
|||
}
|
||||
|
||||
if err = f.cache.PutSubject(addr, subj); err != nil {
|
||||
f.log.Warn(logs.CouldntCacheSubject, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
f.log.Warn(logs.CouldntCacheSubject, zap.Error(err))
|
||||
}
|
||||
|
||||
return subj, nil
|
||||
|
@ -121,7 +121,7 @@ func (f *FrostFSID) getUserKey(namespace, name string) (*keys.PublicKey, error)
|
|||
}
|
||||
|
||||
if err = f.cache.PutUserKey(namespace, name, userKey); err != nil {
|
||||
f.log.Warn(logs.CouldntCacheUserKey, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
f.log.Warn(logs.CouldntCacheUserKey, zap.Error(err))
|
||||
}
|
||||
|
||||
return userKey, nil
|
||||
|
|
|
@ -69,7 +69,7 @@ func (c *MorphRuleChainStorage) ListMorphRuleChains(name chain.Name, target engi
|
|||
}
|
||||
|
||||
if err = c.cache.Put(key, list); err != nil {
|
||||
c.log.Warn(logs.CouldntCacheListPolicyChains, logs.TagField(logs.TagApp))
|
||||
c.log.Warn(logs.CouldntCacheListPolicyChains)
|
||||
}
|
||||
|
||||
return list, nil
|
||||
|
|
|
@ -1,230 +1,189 @@
|
|||
package logs
|
||||
|
||||
import "go.uber.org/zap"
|
||||
|
||||
const (
|
||||
TagFieldName = "tag"
|
||||
|
||||
TagApp = "app"
|
||||
TagDatapath = "datapath"
|
||||
TagExternalStorage = "external_storage"
|
||||
TagExternalStorageTree = "external_storage_tree"
|
||||
TagExternalBlockchain = "external_blockchain"
|
||||
)
|
||||
|
||||
func TagField(tag string) zap.Field {
|
||||
return zap.String(TagFieldName, tag)
|
||||
}
|
||||
|
||||
// App.
|
||||
const (
|
||||
ApplicationStarted = "application started"
|
||||
ApplicationFinished = "application finished"
|
||||
StartingServer = "starting server"
|
||||
StoppingServer = "stopping server"
|
||||
ServiceIsRunning = "service is running"
|
||||
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port"
|
||||
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled"
|
||||
ShuttingDownService = "shutting down service"
|
||||
CantGracefullyShutDownService = "can't gracefully shut down service, force stop"
|
||||
FailedToShutdownTracing = "failed to shutdown tracing"
|
||||
UsingCredentials = "using credentials"
|
||||
FailedToAddServer = "failed to add server"
|
||||
AddServer = "add server"
|
||||
CantShutDownService = "can't shut down service"
|
||||
FailedToCreateResolver = "failed to create resolver"
|
||||
CouldntGenerateRandomKey = "couldn't generate random key"
|
||||
FailedToCreateConnectionPool = "failed to create connection pool"
|
||||
FailedToDialConnectionPool = "failed to dial connection pool"
|
||||
FailedToCreateTreePool = "failed to create tree pool"
|
||||
FailedToDialTreePool = "failed to dial tree pool"
|
||||
ListenAndServe = "listen and serve"
|
||||
NoHealthyServers = "no healthy servers"
|
||||
CouldNotInitializeAPIHandler = "could not initialize API handler"
|
||||
InitFrostfsIDFailed = "init frostfsid failed"
|
||||
InitPolicyContractFailed = "init policy contract failed"
|
||||
ServerReconnecting = "reconnecting server..."
|
||||
ServerReconnectedSuccessfully = "server reconnected successfully"
|
||||
ServerReconnectFailed = "failed to reconnect server"
|
||||
CouldNotFetchCORSContainerInfo = "couldn't fetch CORS container info"
|
||||
CouldNotFetchAccessBoxContainerInfo = "couldn't fetch AccessBox container info"
|
||||
MultinetDialSuccess = "multinet dial successful"
|
||||
MultinetDialFail = "multinet dial failed"
|
||||
FailedToCreateWorkerPool = "failed to create worker pool"
|
||||
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key"
|
||||
AddedStoragePeer = "added storage peer"
|
||||
SIGHUPConfigReloadStarted = "SIGHUP config reload started"
|
||||
MetricsAreDisabled = "metrics are disabled"
|
||||
ConstraintAdded = "constraint added"
|
||||
ContainerResolverWillBeDisabled = "container resolver will be disabled because of resolvers 'resolver_order' is empty"
|
||||
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed"
|
||||
FailedToReloadConfig = "failed to reload config"
|
||||
TracingConfigUpdated = "tracing config updated"
|
||||
FailedToReloadResolvers = "failed to reload resolvers"
|
||||
FailedToReloadServerParameters = "failed to reload server parameters"
|
||||
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed"
|
||||
LogLevelWontBeUpdated = "log level won't be updated"
|
||||
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver 'nns' won't be used since 'rpc_endpoint' isn't provided"
|
||||
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)"
|
||||
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value"
|
||||
FailedToParseDefaultLocationConstraint = "failed to parse 'default' location constraint, default one will be used"
|
||||
FailedToReadRegionMapFilePolicies = "failed to read region map file, policies will be empty"
|
||||
DefaultLocationConstraintCantBeOverriden = "'default' location constraint can't be overriden by custom policy, use 'placement_policy.default'"
|
||||
FailedToParseLocationConstraint = "failed to parse location constraint, it cannot be used"
|
||||
FailedToParseDefaultCopiesNumbers = "failed to parse 'default' copies numbers, default one will be used"
|
||||
FailedToParseCopiesNumbers = "failed to parse copies numbers, skip"
|
||||
FailedToInitializeTracing = "failed to initialize tracing"
|
||||
DefaultNamespacesCannotBeEmpty = "default namespaces cannot be empty, defaults will be used"
|
||||
FailedToParseNamespacesConfig = "failed to unmarshal namespaces config"
|
||||
DefaultNamespaceConfigValuesBeOverwritten = "default namespace config value be overwritten by values from 'namespaces.config'"
|
||||
MultipleDefaultOverridesFound = "multiple default overrides found, only one will be used"
|
||||
SkipEmptyAddress = "skip, empty address"
|
||||
FailedToParseDefaultDefaultLocationConstraint = "failed to parse default 'default' location constraint"
|
||||
LogHTTPDisabledInThisBuild = "http logging disabled in this build"
|
||||
InvalidDefaultMaxAge = "invalid defaultMaxAge"
|
||||
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped"
|
||||
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated"
|
||||
InvalidAccessBoxCacheRemovingCheckInterval = "invalid accessbox check removing interval, using default value"
|
||||
WarnDuplicateAddress = "duplicate address"
|
||||
FailedToLoadMultinetConfig = "failed to load multinet config"
|
||||
MultinetConfigWontBeUpdated = "multinet config won't be updated"
|
||||
WarnDomainContainsPort = "the domain contains a port, domain skipped"
|
||||
TagsLogConfigWontBeUpdated = "tags log config won't be updated"
|
||||
CouldNotFetchLifecycleContainerInfo = "couldn't fetch lifecycle container info"
|
||||
WarnDuplicateNamespaceVHS = "duplicate namespace with enabled VHS, config value skipped"
|
||||
WarnValueVHSEnabledFlagWrongType = "the value of the VHS enable flag for the namespace is of the wrong type, config value skipped"
|
||||
WarnDomainContainsInvalidPlaceholder = "the domain contains an invalid placeholder, domain skipped"
|
||||
)
|
||||
|
||||
// Datapath.
|
||||
const (
|
||||
NotSupported = "not supported"
|
||||
RequestUnmatched = "request unmatched"
|
||||
RequestStart = "request start"
|
||||
RequestFailed = "request failed"
|
||||
RequestEnd = "request end"
|
||||
AnonRequestSkipFrostfsIDValidation = "anon request, skip FrostfsID validation"
|
||||
CouldntReceiveAccessBoxForGateKeyRandomKeyWillBeUsed = "couldn't receive access box for gate key, random key will be used"
|
||||
FrostfsIDValidationFailed = "FrostfsID validation failed"
|
||||
FailedToPassAuthentication = "failed to pass authentication"
|
||||
RequestUnmatched = "request unmatched" // Error in ../../api/router.go
|
||||
CheckContainer = "check container" // Info in ../../authmate/authmate.go
|
||||
CreateContainer = "create container" // Info in ../../authmate/authmate.go
|
||||
StoreBearerTokenIntoFrostFS = "store bearer token into FrostFS" // Info in ../../authmate/authmate.go
|
||||
UpdateAccessCredObjectIntoFrostFS = "update access cred object into FrostFS" // Info in ../../authmate/authmate.go
|
||||
MetricsAreDisabled = "metrics are disabled" // Warn in ../../metrics/app.go
|
||||
FoundMoreThanOneUnversionedNode = "found more than one unversioned node" // Debug in ../../pkg/service/tree/tree.go
|
||||
ServiceIsRunning = "service is running" // Info in ../../cmd/s3-gw/service.go
|
||||
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port" // Warn in ../../cmd/s3-gw/service.go
|
||||
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled" // Info in ../../cmd/s3-gw/service.go
|
||||
ShuttingDownService = "shutting down service" // Info in ../../cmd/s3-gw/service.go
|
||||
CantGracefullyShutDownService = "can't gracefully shut down service, force stop" // Error in ../../cmd/s3-gw/service.go
|
||||
ContainerResolverWillBeDisabled = "container resolver will be disabled because of resolvers 'resolver_order' is empty" // Info in ../../cmd/s3-gw/app.go
|
||||
FailedToInitializeTracing = "failed to initialize tracing" // Warn in ../../cmd/s3-gw/app.go
|
||||
TracingConfigUpdated = "tracing config updated" // Info in ../../cmd/s3-gw/app.go
|
||||
FailedToShutdownTracing = "failed to shutdown tracing" // Warn in ../../cmd/s3-gw/app.go
|
||||
UsingCredentials = "using credentials" // Info in ../../cmd/s3-gw/app.go
|
||||
ApplicationStarted = "application started" // Info in ../../cmd/s3-gw/app.go
|
||||
ApplicationFinished = "application finished" // Info in ../../cmd/s3-gw/app.go
|
||||
StartingServer = "starting server" // Info in ../../cmd/s3-gw/app.go
|
||||
StoppingServer = "stopping server" // Info in ../../cmd/s3-gw/app.go
|
||||
SIGHUPConfigReloadStarted = "SIGHUP config reload started" // Info in ../../cmd/s3-gw/app.go
|
||||
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed" // Warn in ../../cmd/s3-gw/app.go
|
||||
FailedToReloadConfig = "failed to reload config" // Warn in ../../cmd/s3-gw/app.go
|
||||
FailedToReloadResolvers = "failed to reload resolvers" // Warn in ../../cmd/s3-gw/app.go
|
||||
FailedToReloadServerParameters = "failed to reload server parameters" // Warn in ../../cmd/s3-gw/app.go
|
||||
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed" // Info in ../../cmd/s3-gw/app.go
|
||||
LogLevelWontBeUpdated = "log level won't be updated" // Warn in ../../cmd/s3-gw/app.go
|
||||
FailedToAddServer = "failed to add server" // Warn in ../../cmd/s3-gw/app.go
|
||||
AddServer = "add server" // Info in ../../cmd/s3-gw/app.go
|
||||
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver 'nns' won't be used since 'rpc_endpoint' isn't provided" // Warn in ../../cmd/s3-gw/app.go
|
||||
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)" // Error in ../../cmd/s3-gw/app_settings.go
|
||||
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value" // Error in ../../cmd/s3-gw/app_settings.go
|
||||
FailedToParseDefaultLocationConstraint = "failed to parse 'default' location constraint, default one will be used" // Warn in cmd/s3-gw/app_settings.go
|
||||
FailedToReadRegionMapFilePolicies = "failed to read region map file, policies will be empty" // Warn in cmd/s3-gw/app_settings.go
|
||||
DefaultLocationConstraintCantBeOverriden = "'default' location constraint can't be overriden by custom policy, use 'placement_policy.default'" // Warn in cmd/s3-gw/app_settings.go
|
||||
FailedToParseLocationConstraint = "failed to parse location constraint, it cannot be used" // Warn in cmd/s3-gw/app_settings.go
|
||||
FailedToParseDefaultCopiesNumbers = "failed to parse 'default' copies numbers, default one will be used" // Warn in cmd/s3-gw/app_settings.go
|
||||
FailedToParseCopiesNumbers = "failed to parse copies numbers, skip" // Warn in cmd/s3-gw/app_settings.go
|
||||
DefaultNamespacesCannotBeEmpty = "default namespaces cannot be empty, defaults will be used" // Warn in cmd/s3-gw/app_settings.go
|
||||
FailedToParseNamespacesConfig = "failed to unmarshal namespaces config" // Warn in cmd/s3-gw/app_settings.go
|
||||
DefaultNamespaceConfigValuesBeOverwritten = "default namespace config value be overwritten by values from 'namespaces.config'" // Warn in cmd/s3-gw/app_settings.go
|
||||
MultipleDefaultOverridesFound = "multiple default overrides found, only one will be used" // Warn in cmd/s3-gw/app_settings.go
|
||||
FailedToParseDefaultDefaultLocationConstraint = "failed to parse default 'default' location constraint" // Fatal in cmd/s3-gw/app_settings.go
|
||||
ConstraintAdded = "constraint added" // Info in ../../cmd/s3-gw/app_settings.go
|
||||
SkipEmptyAddress = "skip, empty address" // Warn in ../../cmd/s3-gw/app_settings.go
|
||||
AddedStoragePeer = "added storage peer" // Info in ../../cmd/s3-gw/app_settings.go
|
||||
PrepareConnectionPool = "prepare connection pool" // Debug in ../../cmd/s3-authmate/modules/utils.go
|
||||
PrepareFrostfsIDClient = "prepare frostfsid client" // Debug in ../../cmd/s3-authmate/modules/utils.go
|
||||
PreparePolicyClient = "prepare policy client" // Debug in ../../cmd/s3-authmate/modules/utils.go
|
||||
CreateSubjectInFrostFSID = "create subject in frostfsid" // Debug in ../../cmd/s3-authmate/modules/utils.go
|
||||
SubjectAlreadyExistsInFrostFSID = "subject already exists in frostfsid" // Debug in ../../cmd/s3-authmate/modules/utils.go
|
||||
SetSubjectNameInFrostFSID = "set subject name in frostfsid" // Debug in ../../cmd/s3-authmate/modules/utils.go
|
||||
AddPolicyChainRules = "add policy chain rules" // Debug in ../../cmd/s3-authmate/modules/utils.go
|
||||
InvalidCacheEntryType = "invalid cache entry type" // Warn in ../../api/cache/*
|
||||
InvalidCacheKeyType = "invalid cache key type" // Warn in ../../api/cache/objectslist.go
|
||||
ObjectIsCopied = "object is copied" // Info in ../../api/handler/copy.go
|
||||
RequestFailed = "request failed" // Error in ../../api/handler/util.go
|
||||
GetBucketInfo = "get bucket info" // Warn in ../../api/handler/cors.go
|
||||
GetBucketCors = "get bucket cors" // Warn in ../../api/handler/cors.go
|
||||
CouldntDeleteObject = "couldn't delete object" // Error in ../../api/layer/layer.go
|
||||
BucketIsCreated = "bucket is created" // Info in ../../api/handler/put.go
|
||||
CouldNotParseContainerObjectLockEnabledAttribute = "could not parse container object lock enabled attribute" // Error in ../../api/layer/container.go
|
||||
CouldNotListUserContainers = "could not list user containers" // Error in ../../api/layer/container.go
|
||||
CouldNotFetchContainerInfo = "could not fetch container info" // Error in ../../api/layer/container.go
|
||||
MismatchedObjEncryptionInfo = "mismatched obj encryptionInfo" // Warn in ../../api/layer/multipart_upload.go
|
||||
UploadPart = "upload part" // Debug in ../../api/layer/multipart_upload.go
|
||||
CouldntDeleteOldPartObject = "couldn't delete old part object" // Error in ../../api/layer/multipart_upload.go
|
||||
CouldNotPutCompletedObject = "could not put a completed object (multipart upload)" // Error in ../../api/layer/multipart_upload.go
|
||||
CouldNotDeleteUploadPart = "could not delete upload part" // Warn in ../../api/layer/multipart_upload.go
|
||||
CouldntDeletePart = "couldn't delete part" // Warn in ../../api/layer/multipart_upload.go
|
||||
PartDetails = "part details" // Debug in ../../api/layer/multipart_upload.go
|
||||
GetObject = "get object" // Debug in ../../api/layer/layer.go
|
||||
ResolveBucket = "resolve bucket" // Info in ../../api/layer/layer.go
|
||||
CouldntDeleteCorsObject = "couldn't delete cors object" // Error in ../../api/layer/cors.go
|
||||
PutObject = "put object" // Debug in ../../api/layer/object.go
|
||||
FailedToDeleteObject = "failed to delete object" // Debug in ../../api/layer/object.go
|
||||
FailedToDiscardPutPayloadProbablyGoroutineLeaks = "failed to discard put payload, probably goroutine leaks" // Warn in ../../api/layer/object.go
|
||||
FailedToSubmitTaskToPool = "failed to submit task to pool" // Warn in ../../api/layer/object.go
|
||||
CouldNotFetchObjectMeta = "could not fetch object meta" // Warn in ../../api/layer/object.go
|
||||
GetTreeNode = "get tree node" // Debug in ../../api/layer/tagging.go
|
||||
GetTreeNodeToDelete = "get tree node to delete" // Debug in ../../api/layer/tagging.go
|
||||
CouldntPutBucketInfoIntoCache = "couldn't put bucket info into cache" // Warn in ../../api/layer/cache.go
|
||||
CouldntAddObjectToCache = "couldn't add object to cache" // Warn in ../../api/layer/cache.go
|
||||
CouldntCacheAccessControlOperation = "couldn't cache access control operation" // Warn in ../../api/layer/cache.go
|
||||
CouldntPutObjAddressToNameCache = "couldn't put obj address to name cache" // Warn in ../../api/layer/cache.go
|
||||
CouldntCacheListOfObjects = "couldn't cache list of objects" // Warn in ../../api/layer/cache.go
|
||||
CouldntCacheListSession = "couldn't cache list session" // Warn in ../../api/layer/cache.go
|
||||
CouldntCacheTags = "couldn't cache tags" // Error in ../../api/layer/cache.go
|
||||
CouldntCacheLockInfo = "couldn't cache lock info" // Error in ../../api/layer/cache.go
|
||||
CouldntCacheBucketSettings = "couldn't cache bucket settings" // Warn in ../../api/layer/cache.go
|
||||
CouldntCacheCors = "couldn't cache cors" // Warn in ../../api/layer/cache.go
|
||||
CouldntCacheListPolicyChains = "couldn't cache list policy chains" // Warn in ../../api/layer/cache.go
|
||||
RequestEnd = "request end" // Info in ../../api/middleware/response.go
|
||||
CouldntReceiveAccessBoxForGateKeyRandomKeyWillBeUsed = "couldn't receive access box for gate key, random key will be used" // Debug in ../../api/middleware/auth.go
|
||||
FailedToPassAuthentication = "failed to pass authentication" // Error in ../../api/middleware/auth.go
|
||||
FailedToResolveCID = "failed to resolve CID" // Debug in ../../api/middleware/metrics.go
|
||||
RequestStart = "request start" // Info in ../../api/middleware/reqinfo.go
|
||||
LogHTTP = "http log" // Info in ../../api/middleware/log_http.go
|
||||
FailedToCloseHTTPBody = "failed to close http body" // Error in ../../api/middleware/log_http.go
|
||||
FailedToInitializeHTTPLogger = "failed to initialize http logger" // Error in ../../api/middleware/log_http.go
|
||||
FailedToReloadHTTPFileLogger = "failed to reload http file logger" // Error in ../../api/middleware/log_http.go
|
||||
FailedToReadHTTPBody = "failed to read http body" // Error in ../../api/middleware/log_http.go
|
||||
FailedToProcessHTTPBody = "failed to process http body" // Error in ../../api/middleware/log_http.go
|
||||
LogHTTPDisabledInThisBuild = "http logging disabled in this build" // Warn in ../../api/middleware/log_http_stub.go
|
||||
FailedToUnescapeObjectName = "failed to unescape object name" // Warn in ../../api/middleware/reqinfo.go
|
||||
InvalidDefaultMaxAge = "invalid defaultMaxAge" // Fatal in ../../cmd/s3-gw/app_settings.go
|
||||
CantShutDownService = "can't shut down service" // Panic in ../../cmd/s3-gw/service.go
|
||||
CouldntGenerateRandomKey = "couldn't generate random key" // Fatal in ../../cmd/s3-gw/app.go
|
||||
FailedToCreateResolver = "failed to create resolver" // Fatal in ../../cmd/s3-gw/app.go
|
||||
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key" // Fatal in ../../cmd/s3-gw/app.go
|
||||
FailedToCreateConnectionPool = "failed to create connection pool" // Fatal in ../../cmd/s3-gw/app.go
|
||||
FailedToDialConnectionPool = "failed to dial connection pool" // Fatal in ../../cmd/s3-gw/app.go
|
||||
FailedToCreateTreePool = "failed to create tree pool" // Fatal in ../../cmd/s3-gw/app.go
|
||||
FailedToDialTreePool = "failed to dial tree pool" // Fatal in ../../cmd/s3-gw/app.go
|
||||
ListenAndServe = "listen and serve" // Fatal in ../../cmd/s3-gw/app.go
|
||||
NoHealthyServers = "no healthy servers" // Fatal in ../../cmd/s3-gw/app.go
|
||||
CouldNotInitializeAPIHandler = "could not initialize API handler" // Fatal in ../../cmd/s3-gw/app.go
|
||||
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" // Warn in ../../cmd/s3-gw/app.go
|
||||
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" // Info in ../../cmd/s3-gw/app.go
|
||||
AnonRequestSkipFrostfsIDValidation = "anon request, skip FrostfsID validation" // Debug in ../../api/middleware/auth.go
|
||||
FrostfsIDValidationFailed = "FrostfsID validation failed" // Error in ../../api/middleware/auth.go
|
||||
InitFrostfsIDContractFailed = "init frostfsid contract failed" // Fatal in ../../cmd/s3-gw/app.go
|
||||
InitPolicyContractFailed = "init policy contract failed" // Fatal in ../../cmd/s3-gw/app.go
|
||||
PolicyValidationFailed = "policy validation failed"
|
||||
ServerReconnecting = "reconnecting server..."
|
||||
ServerReconnectedSuccessfully = "server reconnected successfully"
|
||||
ServerReconnectFailed = "failed to reconnect server"
|
||||
ParseTreeNode = "parse tree node"
|
||||
FailedToGetRealObjectSize = "failed to get real object size"
|
||||
CouldntDeleteObjectFromStorageContinueDeleting = "couldn't delete object from storage, continue deleting from tree"
|
||||
CouldntPutAccessBoxIntoCache = "couldn't put accessbox into cache"
|
||||
InvalidAccessBoxCacheRemovingCheckInterval = "invalid accessbox check removing interval, using default value"
|
||||
CouldNotCloseRequestBody = "could not close request body"
|
||||
BucketOwnerKeyIsMissing = "bucket owner key is missing"
|
||||
SettingsNodeInvalidOwnerKey = "settings node: invalid owner key"
|
||||
SuccessfulAuth = "successful auth"
|
||||
PolicyRequest = "policy request"
|
||||
FailedToGenerateRequestID = "failed to generate request id"
|
||||
InvalidBucketObjectLockEnabledHeader = "invalid X-Amz-Bucket-Object-Lock-Enabled header"
|
||||
InvalidTreeKV = "invalid tree service meta KV"
|
||||
FailedToWriteResponse = "failed to write response"
|
||||
WarnDuplicateAddress = "duplicate address"
|
||||
PolicyCouldntBeConvertedToNativeRules = "policy couldn't be converted to native rules, only s3 rules be applied"
|
||||
FailedToParseHTTPTime = "failed to parse http time, header is ignored"
|
||||
WarnInvalidTypeTLSTerminationHeader = "invalid type of value of tls termination header"
|
||||
FailedToUnescapeObjectName = "failed to unescape object name"
|
||||
MismatchedObjEncryptionInfo = "mismatched obj encryptionInfo"
|
||||
CouldNotParseContainerObjectLockEnabledAttribute = "could not parse container object lock enabled attribute"
|
||||
FailedToParsePartInfo = "failed to parse part info"
|
||||
SettingsNodeInvalidOwnerKey = "settings node: invalid owner key"
|
||||
CouldntCacheSubject = "couldn't cache subject info"
|
||||
UserGroupsListIsEmpty = "user groups list is empty, subject not found"
|
||||
CouldntCacheUserKey = "couldn't cache user key"
|
||||
ObjectTaggingNodeHasMultipleIDs = "object tagging node has multiple ids"
|
||||
BucketTaggingNodeHasMultipleIDs = "bucket tagging node has multiple ids"
|
||||
BucketSettingsNodeHasMultipleIDs = "bucket settings node has multiple ids"
|
||||
BucketCORSNodeHasMultipleIDs = "bucket cors node has multiple ids"
|
||||
SystemNodeHasMultipleIDs = "system node has multiple ids"
|
||||
FailedToRemoveOldSystemNode = "failed to remove old system node"
|
||||
FailedToParseAddressInTreeNode = "failed to parse object addr in tree node"
|
||||
UnexpectedMultiNodeIDsInSubTreeMultiParts = "unexpected multi node ids in sub tree multi parts"
|
||||
FoundSeveralSystemNodes = "found several system nodes"
|
||||
BucketLifecycleNodeHasMultipleIDs = "bucket lifecycle node has multiple ids"
|
||||
UploadPart = "upload part"
|
||||
FailedToSubmitTaskToPool = "failed to submit task to pool"
|
||||
FailedToGetRealObjectSize = "failed to get real object size"
|
||||
PartDetails = "part details"
|
||||
BucketSettingsNotFoundUseDefaults = "bucket settings not found, use defaults"
|
||||
ParseTreeNode = "parse tree node"
|
||||
GetObject = "get object"
|
||||
GetTreeNodeToDelete = "get tree node to delete"
|
||||
InvalidBucketObjectLockEnabledHeader = "invalid X-Amz-Bucket-Object-Lock-Enabled header"
|
||||
InvalidCacheEntryType = "invalid cache entry type"
|
||||
InvalidCacheKeyType = "invalid cache key type"
|
||||
CouldntPutBucketInfoIntoCache = "couldn't put bucket info into cache"
|
||||
CouldntAddObjectToCache = "couldn't add object to cache"
|
||||
CouldntCacheAccessControlOperation = "couldn't cache access control operation"
|
||||
CouldntPutObjAddressToNameCache = "couldn't put obj address to name cache"
|
||||
CouldntPutAccessBoxIntoCache = "couldn't put accessbox into cache"
|
||||
CouldntCacheListOfObjects = "couldn't cache list of objects"
|
||||
CouldntCacheListSession = "couldn't cache list session"
|
||||
CouldntCacheTags = "couldn't cache tags"
|
||||
CouldntCacheLockInfo = "couldn't cache lock info"
|
||||
CouldntCacheNetworkInfo = "couldn't cache network info"
|
||||
CouldntCacheSubject = "couldn't cache subject info"
|
||||
CouldntCacheNetmap = "couldn't cache netmap"
|
||||
CouldntCacheUserKey = "couldn't cache user key"
|
||||
CouldntCacheBucketSettings = "couldn't cache bucket settings"
|
||||
CouldntCacheCors = "couldn't cache cors"
|
||||
CouldntCacheListPolicyChains = "couldn't cache list policy chains"
|
||||
FailedToParsePartInfo = "failed to parse part info"
|
||||
CouldNotFetchCORSContainerInfo = "couldn't fetch CORS container info"
|
||||
CouldNotFetchAccessBoxContainerInfo = "couldn't fetch AccessBox container info"
|
||||
CloseCredsObjectPayload = "close creds object payload"
|
||||
CouldntDeleteLifecycleObject = "couldn't delete lifecycle configuration object"
|
||||
CouldntCacheLifecycleConfiguration = "couldn't cache lifecycle configuration"
|
||||
GetBucketCors = "get bucket cors"
|
||||
GetBucketInfo = "get bucket info"
|
||||
ResolveBucket = "resolve bucket"
|
||||
FailedToResolveCID = "failed to resolve CID"
|
||||
FailedToDiscardPutPayloadProbablyGoroutineLeaks = "failed to discard put payload, probably goroutine leaks"
|
||||
)
|
||||
|
||||
// External storage.
|
||||
const (
|
||||
ObjectIsCopied = "object is copied"
|
||||
CouldntDeleteObject = "couldn't delete object"
|
||||
CouldNotListUserContainers = "could not list user containers"
|
||||
CouldNotFetchContainerInfo = "could not fetch container info"
|
||||
CouldntDeleteObjectFromStorageContinueDeleting = "couldn't delete object from storage, continue deleting from tree"
|
||||
FailedToPutTombstoneObject = "failed to put tombstone object"
|
||||
FailedToListAllObjectRelations = "failed to list all object relations"
|
||||
FailedToPutTombstones = "failed to put tombstones"
|
||||
CouldntDeleteCorsObject = "couldn't delete cors object"
|
||||
BucketIsCreated = "bucket is created"
|
||||
CouldntDeleteOldPartObject = "couldn't delete old part object"
|
||||
CouldNotPutCompletedObject = "could not put a completed object (multipart upload)"
|
||||
CouldNotDeleteUploadPart = "could not delete upload part"
|
||||
PutObject = "put object"
|
||||
CouldNotFetchObjectMeta = "could not fetch object meta"
|
||||
FailedToDeleteObject = "failed to delete object"
|
||||
CouldntDeleteLifecycleObject = "couldn't delete lifecycle configuration object"
|
||||
)
|
||||
|
||||
// External blockchain.
|
||||
const (
|
||||
UserGroupsListIsEmpty = "user groups list is empty, subject not found"
|
||||
)
|
||||
|
||||
// External storage tree.
|
||||
const (
|
||||
FoundMoreThanOneUnversionedNode = "found more than one unversioned node"
|
||||
GetTreeNode = "get tree node"
|
||||
InvalidTreeKV = "invalid tree service meta KV"
|
||||
FailedToRemoveOldSystemNode = "failed to remove old system node"
|
||||
GetBucketLifecycle = "get bucket lifecycle"
|
||||
FailedToRemoveOldPartNode = "failed to remove old part node"
|
||||
SystemNodeHasMultipleIDs = "system node has multiple ids"
|
||||
ObjectTaggingNodeHasMultipleIDs = "object tagging node has multiple ids"
|
||||
BucketTaggingNodeHasMultipleIDs = "bucket tagging node has multiple ids"
|
||||
BucketSettingsNodeHasMultipleIDs = "bucket settings node has multiple ids"
|
||||
BucketCORSNodeHasMultipleIDs = "bucket cors node has multiple ids"
|
||||
GetBucketCorsFromTree = "get bucket cors from tree"
|
||||
)
|
||||
|
||||
// Authmate.
|
||||
const (
|
||||
CreateContainer = "create container"
|
||||
CheckContainer = "check container"
|
||||
CheckCustomAccessKeyIDUniqueness = "check custom access key id uniqueness"
|
||||
StoreBearerTokenIntoFrostFS = "store bearer token into FrostFS"
|
||||
UpdateAccessCredObjectIntoFrostFS = "update access cred object into FrostFS"
|
||||
SubjectAlreadyExistsInFrostFSID = "subject already exists in frostfsid"
|
||||
SetSubjectNameInFrostFSID = "set subject name in frostfsid"
|
||||
AddPolicyChainRules = "add policy chain rules"
|
||||
PrepareConnectionPool = "prepare connection pool"
|
||||
PrepareFrostfsIDClient = "prepare frostfsid client"
|
||||
PreparePolicyClient = "prepare policy client"
|
||||
CreateSubjectInFrostFSID = "create subject in frostfsid"
|
||||
CloseCredsObjectPayload = "close creds object payload"
|
||||
)
|
||||
|
||||
// Log HTTP.
|
||||
const (
|
||||
LogHTTP = "http log"
|
||||
FailedToCloseHTTPBody = "failed to close http body"
|
||||
FailedToInitializeHTTPLogger = "failed to initialize http logger"
|
||||
FailedToReadHTTPBody = "failed to read http body"
|
||||
FailedToProcessHTTPBody = "failed to process http body"
|
||||
CouldNotFetchLifecycleContainerInfo = "couldn't fetch lifecycle container info"
|
||||
BucketLifecycleNodeHasMultipleIDs = "bucket lifecycle node has multiple ids"
|
||||
GetBucketLifecycle = "get bucket lifecycle"
|
||||
WarnDuplicateNamespaceVHS = "duplicate namespace with enabled VHS, config value skipped"
|
||||
WarnValueVHSEnabledFlagWrongType = "the value of the VHS enable flag for the namespace is of the wrong type, config value skipped"
|
||||
WarnDomainContainsInvalidPlaceholder = "the domain contains an invalid placeholder, domain skipped"
|
||||
FailedToRemoveOldPartNode = "failed to remove old part node"
|
||||
CouldntCacheNetworkInfo = "couldn't cache network info"
|
||||
NotSupported = "not supported"
|
||||
CheckCustomAccessKeyIDUniqueness = "check custom access key id uniqueness"
|
||||
FailedToLoadMultinetConfig = "failed to load multinet config"
|
||||
MultinetConfigWontBeUpdated = "multinet config won't be updated"
|
||||
MultinetDialSuccess = "multinet dial successful"
|
||||
MultinetDialFail = "multinet dial failed"
|
||||
FailedToParseHTTPTime = "failed to parse http time, header is ignored"
|
||||
FailedToPutTombstoneObject = "failed to put tombstone object"
|
||||
FailedToCreateWorkerPool = "failed to create worker pool"
|
||||
FailedToListAllObjectRelations = "failed to list all object relations"
|
||||
WarnInvalidTypeTLSTerminationHeader = "invalid type of value of tls termination header"
|
||||
FailedToPutTombstones = "failed to put tombstones"
|
||||
WarnDomainContainsPort = "the domain contains a port, domain skipped"
|
||||
CouldntCacheNetmap = "couldn't cache netmap"
|
||||
BucketSettingsNotFoundUseDefaults = "bucket settings not found, use defaults"
|
||||
)
|
||||
|
|
|
@ -17,16 +17,9 @@ func (l LogEventHandler) DialPerformed(sourceIP net.Addr, _, address string, err
|
|||
sourceIPString = sourceIP.Network() + "://" + sourceIP.String()
|
||||
}
|
||||
if err == nil {
|
||||
l.logger.Debug(logs.MultinetDialSuccess,
|
||||
zap.String("source", sourceIPString),
|
||||
zap.String("destination", address),
|
||||
logs.TagField(logs.TagApp))
|
||||
l.logger.Debug(logs.MultinetDialSuccess, zap.String("source", sourceIPString), zap.String("destination", address))
|
||||
} else {
|
||||
l.logger.Debug(logs.MultinetDialFail,
|
||||
zap.String("source", sourceIPString),
|
||||
zap.String("destination", address),
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagApp))
|
||||
l.logger.Debug(logs.MultinetDialFail, zap.String("source", sourceIPString), zap.String("destination", address), zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ type AppMetricsConfig struct {
|
|||
|
||||
func NewAppMetrics(cfg AppMetricsConfig) *AppMetrics {
|
||||
if !cfg.Enabled {
|
||||
cfg.Logger.Warn(logs.MetricsAreDisabled, logs.TagField(logs.TagApp))
|
||||
cfg.Logger.Warn(logs.MetricsAreDisabled)
|
||||
}
|
||||
|
||||
registry := cfg.Registerer
|
||||
|
@ -44,7 +44,7 @@ func NewAppMetrics(cfg AppMetricsConfig) *AppMetrics {
|
|||
|
||||
func (m *AppMetrics) SetEnabled(enabled bool) {
|
||||
if !enabled {
|
||||
m.logger.Warn(logs.MetricsAreDisabled, logs.TagField(logs.TagApp))
|
||||
m.logger.Warn(logs.MetricsAreDisabled)
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
|
|
|
@ -257,7 +257,7 @@ func newNodeVersionFromTreeNode(log *zap.Logger, filePath string, treeNode *tree
|
|||
|
||||
if createdStr, ok := treeNode.Get(createdKV); ok {
|
||||
if utcMilli, err := strconv.ParseInt(createdStr, 10, 64); err != nil {
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(createdKV, createdStr), zap.Error(err), logs.TagField(logs.TagExternalStorageTree))
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(createdKV, createdStr), zap.Error(err))
|
||||
} else {
|
||||
created := time.UnixMilli(utcMilli)
|
||||
version.Created = &created
|
||||
|
@ -267,7 +267,7 @@ func newNodeVersionFromTreeNode(log *zap.Logger, filePath string, treeNode *tree
|
|||
if ownerStr, ok := treeNode.Get(ownerKV); ok {
|
||||
var owner user.ID
|
||||
if err := owner.DecodeString(ownerStr); err != nil {
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(ownerKV, ownerStr), zap.Error(err), logs.TagField(logs.TagExternalStorageTree))
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(ownerKV, ownerStr), zap.Error(err))
|
||||
} else {
|
||||
version.Owner = &owner
|
||||
}
|
||||
|
@ -275,7 +275,7 @@ func newNodeVersionFromTreeNode(log *zap.Logger, filePath string, treeNode *tree
|
|||
|
||||
if creationEpoch, ok := treeNode.Get(creationEpochKV); ok {
|
||||
if epoch, err := strconv.ParseUint(creationEpoch, 10, 64); err != nil {
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(creationEpochKV, creationEpoch), zap.Error(err), logs.TagField(logs.TagExternalStorageTree))
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(creationEpochKV, creationEpoch), zap.Error(err))
|
||||
} else {
|
||||
version.CreationEpoch = epoch
|
||||
}
|
||||
|
@ -342,13 +342,13 @@ func newMultipartInfoFromTreeNode(log *zap.Logger, filePath string, treeNode *tr
|
|||
|
||||
if ownerID, ok := treeNode.Get(ownerKV); ok {
|
||||
if err := multipartInfo.Owner.DecodeString(ownerID); err != nil {
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(ownerKV, ownerID), zap.Error(err), logs.TagField(logs.TagExternalStorageTree))
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(ownerKV, ownerID), zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
if created, ok := treeNode.Get(createdKV); ok {
|
||||
if utcMilli, err := strconv.ParseInt(created, 10, 64); err != nil {
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(createdKV, created), zap.Error(err), logs.TagField(logs.TagExternalStorageTree))
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(createdKV, created), zap.Error(err))
|
||||
} else {
|
||||
multipartInfo.Created = time.UnixMilli(utcMilli)
|
||||
}
|
||||
|
@ -356,7 +356,7 @@ func newMultipartInfoFromTreeNode(log *zap.Logger, filePath string, treeNode *tr
|
|||
|
||||
if finished, ok := treeNode.Get(finishedKV); ok {
|
||||
if flag, err := strconv.ParseBool(finished); err != nil {
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(finishedKV, finished), zap.Error(err), logs.TagField(logs.TagExternalStorageTree))
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(finishedKV, finished), zap.Error(err))
|
||||
} else {
|
||||
multipartInfo.Finished = flag
|
||||
}
|
||||
|
@ -364,7 +364,7 @@ func newMultipartInfoFromTreeNode(log *zap.Logger, filePath string, treeNode *tr
|
|||
|
||||
if creationEpoch, ok := treeNode.Get(creationEpochKV); ok {
|
||||
if epoch, err := strconv.ParseUint(creationEpoch, 10, 64); err != nil {
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(creationEpochKV, creationEpoch), zap.Error(err), logs.TagField(logs.TagExternalStorageTree))
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(creationEpochKV, creationEpoch), zap.Error(err))
|
||||
} else {
|
||||
multipartInfo.CreationEpoch = epoch
|
||||
}
|
||||
|
@ -395,23 +395,23 @@ func newMultipartInfo(log *zap.Logger, node NodeResponse) (*data.MultipartInfo,
|
|||
multipartInfo.Key = string(kv.GetValue())
|
||||
case createdKV:
|
||||
if utcMilli, err := strconv.ParseInt(string(kv.GetValue()), 10, 64); err != nil {
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(createdKV, string(kv.GetValue())), zap.Error(err), logs.TagField(logs.TagExternalStorageTree))
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(createdKV, string(kv.GetValue())), zap.Error(err))
|
||||
} else {
|
||||
multipartInfo.Created = time.UnixMilli(utcMilli)
|
||||
}
|
||||
case ownerKV:
|
||||
if err := multipartInfo.Owner.DecodeString(string(kv.GetValue())); err != nil {
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(ownerKV, string(kv.GetValue())), zap.Error(err), logs.TagField(logs.TagExternalStorageTree))
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(ownerKV, string(kv.GetValue())), zap.Error(err))
|
||||
}
|
||||
case finishedKV:
|
||||
if isFinished, err := strconv.ParseBool(string(kv.GetValue())); err != nil {
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(finishedKV, string(kv.GetValue())), zap.Error(err), logs.TagField(logs.TagExternalStorageTree))
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(finishedKV, string(kv.GetValue())), zap.Error(err))
|
||||
} else {
|
||||
multipartInfo.Finished = isFinished
|
||||
}
|
||||
case creationEpochKV:
|
||||
if epoch, err := strconv.ParseUint(string(kv.GetValue()), 10, 64); err != nil {
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(creationEpochKV, string(kv.GetValue())), zap.Error(err), logs.TagField(logs.TagExternalStorageTree))
|
||||
log.Warn(logs.InvalidTreeKV, zap.String(creationEpochKV, string(kv.GetValue())), zap.Error(err))
|
||||
} else {
|
||||
multipartInfo.CreationEpoch = epoch
|
||||
}
|
||||
|
@ -515,7 +515,7 @@ func (c *Tree) GetSettingsNode(ctx context.Context, bktInfo *data.BucketInfo) (*
|
|||
|
||||
if ownerKeyHex, ok := node.Get(ownerKeyKV); ok {
|
||||
if settings.OwnerKey, err = keys.NewPublicKeyFromString(ownerKeyHex); err != nil {
|
||||
c.reqLogger(ctx).Error(logs.SettingsNodeInvalidOwnerKey, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
c.reqLogger(ctx).Error(logs.SettingsNodeInvalidOwnerKey, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -539,7 +539,7 @@ func (c *Tree) PutSettingsNode(ctx context.Context, bktInfo *data.BucketInfo, se
|
|||
latest := multiNode.Latest()
|
||||
ind := latest.GetLatestNodeIndex()
|
||||
if latest.IsSplit() {
|
||||
c.reqLogger(ctx).Error(logs.BucketSettingsNodeHasMultipleIDs, zap.Uint64s("ids", latest.ID), logs.TagField(logs.TagExternalStorageTree))
|
||||
c.reqLogger(ctx).Error(logs.BucketSettingsNodeHasMultipleIDs, zap.Uint64s("ids", latest.ID))
|
||||
}
|
||||
|
||||
if err = c.service.MoveNode(ctx, bktInfo, systemTree, latest.ID[ind], 0, meta); err != nil {
|
||||
|
@ -582,7 +582,7 @@ func (c *Tree) PutBucketCORS(ctx context.Context, bktInfo *data.BucketInfo, addr
|
|||
latest := multiNode.Latest()
|
||||
ind := latest.GetLatestNodeIndex()
|
||||
if latest.IsSplit() {
|
||||
c.reqLogger(ctx).Error(logs.BucketCORSNodeHasMultipleIDs, logs.TagField(logs.TagExternalStorageTree))
|
||||
c.reqLogger(ctx).Error(logs.BucketCORSNodeHasMultipleIDs)
|
||||
}
|
||||
|
||||
if err = c.service.MoveNode(ctx, bktInfo, systemTree, latest.ID[ind], 0, meta); err != nil {
|
||||
|
@ -640,14 +640,14 @@ func (c *Tree) cleanOldNodes(ctx context.Context, nodes []*treeNode, bktInfo *da
|
|||
for _, node := range nodes {
|
||||
ind := node.GetLatestNodeIndex()
|
||||
if node.IsSplit() {
|
||||
c.reqLogger(ctx).Error(logs.SystemNodeHasMultipleIDs, zap.String("FileName", node.Meta[FileNameKey]), zap.Uint64s("ids", node.ID), logs.TagField(logs.TagExternalStorageTree))
|
||||
c.reqLogger(ctx).Error(logs.SystemNodeHasMultipleIDs, zap.String("FileName", node.Meta[FileNameKey]), zap.Uint64s("ids", node.ID))
|
||||
}
|
||||
if err := c.service.RemoveNode(ctx, bktInfo, systemTree, node.ID[ind]); err != nil {
|
||||
c.reqLogger(ctx).Warn(logs.FailedToRemoveOldSystemNode, zap.String("FileName", node.Meta[FileNameKey]), zap.Uint64("id", node.ID[ind]), logs.TagField(logs.TagExternalStorageTree))
|
||||
c.reqLogger(ctx).Warn(logs.FailedToRemoveOldSystemNode, zap.String("FileName", node.Meta[FileNameKey]), zap.Uint64("id", node.ID[ind]))
|
||||
} else {
|
||||
addr, err := getTreeNodeAddress(node)
|
||||
if err != nil {
|
||||
c.log.Warn(logs.FailedToParseAddressInTreeNode, zap.String("FileName", node.Meta[FileNameKey]), zap.Uint64("id", node.ID[ind]), logs.TagField(logs.TagDatapath))
|
||||
c.log.Warn(logs.FailedToParseAddressInTreeNode, zap.String("FileName", node.Meta[FileNameKey]), zap.Uint64("id", node.ID[ind]))
|
||||
continue
|
||||
}
|
||||
res = append(res, addr)
|
||||
|
@ -702,7 +702,7 @@ func (c *Tree) PutObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, o
|
|||
|
||||
ind := tagNode.GetLatestNodeIndex()
|
||||
if tagNode.IsSplit() {
|
||||
c.reqLogger(ctx).Error(logs.ObjectTaggingNodeHasMultipleIDs, logs.TagField(logs.TagExternalStorageTree))
|
||||
c.reqLogger(ctx).Error(logs.ObjectTaggingNodeHasMultipleIDs)
|
||||
}
|
||||
|
||||
return c.service.MoveNode(ctx, bktInfo, versionTree, tagNode.ID[ind], objVersion.ID, treeTagSet)
|
||||
|
@ -751,7 +751,7 @@ func (c *Tree) PutBucketTagging(ctx context.Context, bktInfo *data.BucketInfo, t
|
|||
latest := multiNode.Latest()
|
||||
ind := latest.GetLatestNodeIndex()
|
||||
if latest.IsSplit() {
|
||||
c.reqLogger(ctx).Error(logs.BucketTaggingNodeHasMultipleIDs, zap.Uint64s("ids", latest.ID), logs.TagField(logs.TagExternalStorageTree))
|
||||
c.reqLogger(ctx).Error(logs.BucketTaggingNodeHasMultipleIDs, zap.Uint64s("ids", latest.ID))
|
||||
}
|
||||
|
||||
if err = c.service.MoveNode(ctx, bktInfo, systemTree, latest.ID[ind], 0, treeTagSet); err != nil {
|
||||
|
@ -1049,7 +1049,7 @@ func (s *VersionsByPrefixStreamImpl) parseNodeResponse(node NodeResponse) (res *
|
|||
trNode, fileName, err := parseTreeNode(node)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNodeDoesntContainFileName) {
|
||||
s.log.Debug(logs.ParseTreeNode, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
s.log.Debug(logs.ParseTreeNode, zap.Error(err))
|
||||
}
|
||||
return nil, true, nil
|
||||
}
|
||||
|
@ -1287,9 +1287,7 @@ func (c *Tree) getUnversioned(ctx context.Context, bktInfo *data.BucketInfo, tre
|
|||
|
||||
if len(nodes) > 1 {
|
||||
c.reqLogger(ctx).Debug(logs.FoundMoreThanOneUnversionedNode,
|
||||
zap.String("treeID", treeID),
|
||||
zap.String("filepath", filepath),
|
||||
logs.TagField(logs.TagExternalStorageTree))
|
||||
zap.String("treeID", treeID), zap.String("filepath", filepath))
|
||||
}
|
||||
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
|
@ -1462,8 +1460,7 @@ func (c *Tree) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartN
|
|||
zap.String("upload id", info.UploadID),
|
||||
zap.Uint64("multipart node id ", multipartNodeID),
|
||||
zap.Uint64s("id", part.GetNodeID()),
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
if partInfo.Number == info.Number {
|
||||
|
@ -1491,8 +1488,7 @@ func (c *Tree) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartN
|
|||
c.reqLogger(ctx).Warn(logs.FailedToRemoveOldPartNode,
|
||||
zap.String("key", info.Key),
|
||||
zap.String("upload id", info.UploadID),
|
||||
zap.Uint64("id", nodeID),
|
||||
logs.TagField(logs.TagExternalStorageTree))
|
||||
zap.Uint64("id", nodeID))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1518,8 +1514,7 @@ func (c *Tree) GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipart
|
|||
// multipart parts nodeID shouldn't have multiple values
|
||||
c.reqLogger(ctx).Warn(logs.UnexpectedMultiNodeIDsInSubTreeMultiParts,
|
||||
zap.Uint64("multipart node id ", multipartNodeID),
|
||||
zap.Uint64s("node ids", part.GetNodeID()),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
zap.Uint64s("node ids", part.GetNodeID()))
|
||||
continue
|
||||
}
|
||||
if part.GetNodeID()[0] == multipartNodeID {
|
||||
|
@ -1530,8 +1525,7 @@ func (c *Tree) GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipart
|
|||
c.reqLogger(ctx).Warn(logs.FailedToParsePartInfo,
|
||||
zap.Uint64("multipart node id ", multipartNodeID),
|
||||
zap.Uint64s("node ids", part.GetNodeID()),
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
result = append(result, partInfo)
|
||||
|
@ -1562,7 +1556,7 @@ func (c *Tree) PutBucketLifecycleConfiguration(ctx context.Context, bktInfo *dat
|
|||
latest := multiNode.Latest()
|
||||
ind := latest.GetLatestNodeIndex()
|
||||
if latest.IsSplit() {
|
||||
c.reqLogger(ctx).Error(logs.BucketLifecycleNodeHasMultipleIDs, logs.TagField(logs.TagDatapath))
|
||||
c.reqLogger(ctx).Error(logs.BucketLifecycleNodeHasMultipleIDs)
|
||||
}
|
||||
|
||||
if err = c.service.MoveNode(ctx, bktInfo, systemTree, latest.ID[ind], 0, meta); err != nil {
|
||||
|
@ -1837,7 +1831,7 @@ func (c *Tree) getSystemNode(ctx context.Context, bktInfo *data.BucketInfo, name
|
|||
return nil, tree.ErrNodeNotFound
|
||||
}
|
||||
if len(nodes) != 1 {
|
||||
c.reqLogger(ctx).Warn(logs.FoundSeveralSystemNodes, zap.String("name", name), logs.TagField(logs.TagDatapath))
|
||||
c.reqLogger(ctx).Warn(logs.FoundSeveralSystemNodes, zap.String("name", name))
|
||||
}
|
||||
|
||||
return newMultiNode(nodes)
|
||||
|
|
Loading…
Add table
Reference in a new issue