From e7f620f137748c9fb4f9716b28b51ba64de5cd01 Mon Sep 17 00:00:00 2001 From: Nikita Zinkevich Date: Thu, 6 Feb 2025 13:18:16 +0300 Subject: [PATCH] [#606] Support log tagging Signed-off-by: Nikita Zinkevich --- api/handler/acl.go | 6 +- api/handler/copy.go | 2 +- api/handler/cors.go | 4 +- api/handler/get.go | 4 +- api/handler/patch.go | 2 +- api/handler/put.go | 6 +- api/handler/util.go | 5 +- api/layer/cache.go | 45 ++- api/layer/container.go | 5 +- api/layer/cors.go | 3 +- api/layer/layer.go | 22 +- api/layer/lifecycle.go | 4 +- api/layer/listing.go | 6 +- api/layer/multipart_upload.go | 32 +- api/layer/object.go | 16 +- api/layer/system_object.go | 2 +- api/layer/tagging.go | 3 +- api/layer/tombstone.go | 6 +- api/middleware/address_style.go | 2 +- api/middleware/auth.go | 14 +- api/middleware/log_http_stub.go | 2 +- api/middleware/metrics.go | 2 +- api/middleware/policy.go | 8 +- api/middleware/reqinfo.go | 5 +- api/middleware/response.go | 2 +- api/router.go | 6 +- cmd/s3-gw/app.go | 165 +++++--- cmd/s3-gw/app_settings.go | 241 +++++------ cmd/s3-gw/logger.go | 210 ++++++++++ cmd/s3-gw/service.go | 12 +- config/config.env | 5 + config/config.yaml | 6 + creds/tokens/credentials.go | 2 +- docs/configuration.md | 33 ++ internal/frostfs/frostfsid/frostfsid.go | 2 +- .../policy/morph_rule_chain_storage.go | 2 +- internal/logs/logs.go | 380 ++++++++++-------- internal/net/event_handler.go | 11 +- metrics/app.go | 4 +- pkg/service/tree/tree.go | 60 +-- 40 files changed, 850 insertions(+), 497 deletions(-) create mode 100644 cmd/s3-gw/logger.go diff --git a/api/handler/acl.go b/api/handler/acl.go index ff2c1b6c..cd744e08 100644 --- a/api/handler/acl.go +++ b/api/handler/acl.go @@ -99,7 +99,7 @@ func (h *handler) encodePrivateCannedACL(ctx context.Context, bktInfo *data.Buck ownerEncodedID := ownerDisplayName if settings.OwnerKey == nil { - h.reqLogger(ctx).Warn(logs.BucketOwnerKeyIsMissing, zap.String("owner", bktInfo.Owner.String())) + h.reqLogger(ctx).Warn(logs.BucketOwnerKeyIsMissing, zap.String("owner", bktInfo.Owner.String()), logs.TagField(logs.TagDatapath)) } else { ownerDisplayName = settings.OwnerKey.Address() ownerEncodedID = hex.EncodeToString(settings.OwnerKey.Bytes()) @@ -150,7 +150,7 @@ func (h *handler) putBucketACLAPEHandler(w http.ResponseWriter, r *http.Request, defer func() { if errBody := r.Body.Close(); errBody != nil { - h.reqLogger(ctx).Warn(logs.CouldNotCloseRequestBody, zap.Error(errBody)) + h.reqLogger(ctx).Warn(logs.CouldNotCloseRequestBody, zap.Error(errBody), logs.TagField(logs.TagDatapath)) } }() @@ -382,7 +382,7 @@ func (h *handler) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) h.logAndSendError(ctx, w, "could not convert s3 policy to native chain policy", reqInfo, err) return } else { - h.reqLogger(ctx).Warn(logs.PolicyCouldntBeConvertedToNativeRules) + h.reqLogger(ctx).Warn(logs.PolicyCouldntBeConvertedToNativeRules, logs.TagField(logs.TagDatapath)) } chainsToSave := []*chain.Chain{s3Chain} diff --git a/api/handler/copy.go b/api/handler/copy.go index f4c8c783..30f13704 100644 --- a/api/handler/copy.go +++ b/api/handler/copy.go @@ -244,7 +244,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { } } - h.reqLogger(ctx).Info(logs.ObjectIsCopied, zap.Stringer("object_id", dstObjInfo.ID)) + h.reqLogger(ctx).Info(logs.ObjectIsCopied, zap.Stringer("object_id", dstObjInfo.ID), logs.TagField(logs.TagExternalStorage)) if dstEncryptionParams.Enabled() { addSSECHeaders(w.Header(), r.Header) diff --git a/api/handler/cors.go b/api/handler/cors.go index dcdaa521..fbd272cf 100644 --- a/api/handler/cors.go +++ b/api/handler/cors.go @@ -108,13 +108,13 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) { } bktInfo, err := h.getBucketInfo(ctx, reqInfo.BucketName) if err != nil { - h.reqLogger(ctx).Warn(logs.GetBucketInfo, zap.Error(err)) + h.reqLogger(ctx).Warn(logs.GetBucketInfo, zap.Error(err), logs.TagField(logs.TagExternalBlockchain)) return } cors, err := h.obj.GetBucketCORS(ctx, bktInfo, h.cfg.NewXMLDecoder) if err != nil { - h.reqLogger(ctx).Warn(logs.GetBucketCors, zap.Error(err)) + h.reqLogger(ctx).Warn(logs.GetBucketCors, zap.Error(err), logs.TagField(logs.TagExternalStorageTree)) return } diff --git a/api/handler/get.go b/api/handler/get.go index 3403ac7d..04cebe11 100644 --- a/api/handler/get.go +++ b/api/handler/get.go @@ -296,12 +296,12 @@ func parseConditionalHeaders(headers http.Header, log *zap.Logger) *conditionalA if httpTime, err := parseHTTPTime(headers.Get(api.IfModifiedSince)); err == nil { args.IfModifiedSince = httpTime } else { - log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfModifiedSince, headers.Get(api.IfModifiedSince)), zap.Error(err)) + log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfModifiedSince, headers.Get(api.IfModifiedSince)), zap.Error(err), logs.TagField(logs.TagDatapath)) } if httpTime, err := parseHTTPTime(headers.Get(api.IfUnmodifiedSince)); err == nil { args.IfUnmodifiedSince = httpTime } else { - log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfUnmodifiedSince, headers.Get(api.IfUnmodifiedSince)), zap.Error(err)) + log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfUnmodifiedSince, headers.Get(api.IfUnmodifiedSince)), zap.Error(err), logs.TagField(logs.TagDatapath)) } return args diff --git a/api/handler/patch.go b/api/handler/patch.go index 60efe3d2..deba2cbf 100644 --- a/api/handler/patch.go +++ b/api/handler/patch.go @@ -142,7 +142,7 @@ func parsePatchConditionalHeaders(headers http.Header, log *zap.Logger) *conditi if httpTime, err := parseHTTPTime(headers.Get(api.IfUnmodifiedSince)); err == nil { args.IfUnmodifiedSince = httpTime } else { - log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfUnmodifiedSince, headers.Get(api.IfUnmodifiedSince)), zap.Error(err)) + log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfUnmodifiedSince, headers.Get(api.IfUnmodifiedSince)), zap.Error(err), logs.TagField(logs.TagDatapath)) } return args diff --git a/api/handler/put.go b/api/handler/put.go index 8b9bf7b5..3e5e287f 100644 --- a/api/handler/put.go +++ b/api/handler/put.go @@ -466,7 +466,7 @@ func (h *handler) isTLSCheckRequired(r *http.Request) bool { tlsTermination, err := strconv.ParseBool(tlsTerminationStr) if err != nil { - h.reqLogger(r.Context()).Warn(logs.WarnInvalidTypeTLSTerminationHeader, zap.String("header", tlsTerminationStr), zap.Error(err)) + h.reqLogger(r.Context()).Warn(logs.WarnInvalidTypeTLSTerminationHeader, zap.String("header", tlsTerminationStr), zap.Error(err), logs.TagField(logs.TagDatapath)) return true } @@ -828,7 +828,7 @@ func (h *handler) createBucketHandlerPolicy(w http.ResponseWriter, r *http.Reque h.logAndSendError(ctx, w, "could not create bucket", reqInfo, err) return } - h.reqLogger(ctx).Info(logs.BucketIsCreated, zap.Stringer("container_id", bktInfo.CID)) + h.reqLogger(ctx).Info(logs.BucketIsCreated, zap.Stringer("container_id", bktInfo.CID), logs.TagField(logs.TagExternalStorage)) chains := bucketCannedACLToAPERules(cannedACL, reqInfo, bktInfo.CID) if err = h.ape.SaveACLChains(bktInfo.CID.EncodeToString(), chains); err != nil { @@ -1041,7 +1041,7 @@ func isLockEnabled(log *zap.Logger, header http.Header) bool { lockEnabled, err := strconv.ParseBool(lockEnabledStr) if err != nil { - log.Warn(logs.InvalidBucketObjectLockEnabledHeader, zap.String("header", lockEnabledStr), zap.Error(err)) + log.Warn(logs.InvalidBucketObjectLockEnabledHeader, zap.String("header", lockEnabledStr), zap.Error(err), logs.TagField(logs.TagDatapath)) } return lockEnabled diff --git a/api/handler/util.go b/api/handler/util.go index bc2d279c..7d12af0b 100644 --- a/api/handler/util.go +++ b/api/handler/util.go @@ -39,9 +39,10 @@ func (h *handler) logAndSendError(ctx context.Context, w http.ResponseWriter, lo zap.String("object", reqInfo.ObjectName), zap.String("description", logText), zap.String("user", reqInfo.User), - zap.Error(err)} + zap.Error(err), + } fields = append(fields, additional...) - h.reqLogger(ctx).Error(logs.RequestFailed, fields...) + h.reqLogger(ctx).Error(logs.RequestFailed, append(fields, logs.TagField(logs.TagDatapath))...) } func handleDeleteMarker(w http.ResponseWriter, err error) error { diff --git a/api/layer/cache.go b/api/layer/cache.go index 1ef92a68..be6f6618 100644 --- a/api/layer/cache.go +++ b/api/layer/cache.go @@ -54,7 +54,7 @@ func DefaultCachesConfigs(logger *zap.Logger) *CachesConfig { func NewCache(cfg *CachesConfig) *Cache { return &Cache{ - logger: cfg.Logger, + logger: cfg.Logger.With(logs.TagField(logs.TagDatapath)), listsCache: cache.NewObjectsListCache(cfg.ObjectsList), sessionListCache: cache.NewListSessionCache(cfg.SessionList), objCache: cache.New(cfg.Objects), @@ -76,7 +76,8 @@ func (c *Cache) PutBucket(bktInfo *data.BucketInfo) { zap.String("zone", bktInfo.Zone), zap.String("bucket name", bktInfo.Name), zap.Stringer("bucket cid", bktInfo.CID), - zap.Error(err)) + zap.Error(err), + logs.TagField(logs.TagDatapath)) } } @@ -118,11 +119,12 @@ func (c *Cache) PutObject(owner user.ID, extObjInfo *data.ExtendedObjectInfo) { if err := c.objCache.PutObject(extObjInfo); err != nil { c.logger.Warn(logs.CouldntAddObjectToCache, zap.Error(err), zap.String("object_name", extObjInfo.ObjectInfo.Name), zap.String("bucket_name", extObjInfo.ObjectInfo.Bucket), - zap.String("cid", extObjInfo.ObjectInfo.CID.EncodeToString()), zap.String("oid", extObjInfo.ObjectInfo.ID.EncodeToString())) + zap.String("cid", extObjInfo.ObjectInfo.CID.EncodeToString()), zap.String("oid", extObjInfo.ObjectInfo.ID.EncodeToString()), + logs.TagField(logs.TagDatapath)) } if err := c.accessCache.Put(owner, extObjInfo.ObjectInfo.Address().EncodeToString()); err != nil { - c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err)) + c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath)) } } @@ -132,7 +134,8 @@ func (c *Cache) PutObjectWithName(owner user.ID, extObjInfo *data.ExtendedObject if err := c.namesCache.Put(extObjInfo.ObjectInfo.NiceName(), extObjInfo.ObjectInfo.Address()); err != nil { c.logger.Warn(logs.CouldntPutObjAddressToNameCache, zap.String("obj nice name", extObjInfo.ObjectInfo.NiceName()), - zap.Error(err)) + zap.Error(err), + logs.TagField(logs.TagDatapath)) } } @@ -146,11 +149,11 @@ func (c *Cache) GetList(owner user.ID, key cache.ObjectsListKey) []*data.NodeVer func (c *Cache) PutList(owner user.ID, key cache.ObjectsListKey, list []*data.NodeVersion) { if err := c.listsCache.PutVersions(key, list); err != nil { - c.logger.Warn(logs.CouldntCacheListOfObjects, zap.Error(err)) + c.logger.Warn(logs.CouldntCacheListOfObjects, zap.Error(err), logs.TagField(logs.TagDatapath)) } if err := c.accessCache.Put(owner, key.String()); err != nil { - c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err)) + c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath)) } } @@ -164,11 +167,11 @@ func (c *Cache) GetListSession(owner user.ID, key cache.ListSessionKey) *data.Li func (c *Cache) PutListSession(owner user.ID, key cache.ListSessionKey, session *data.ListSession) { if err := c.sessionListCache.PutListSession(key, session); err != nil { - c.logger.Warn(logs.CouldntCacheListSession, zap.Error(err)) + c.logger.Warn(logs.CouldntCacheListSession, zap.Error(err), logs.TagField(logs.TagDatapath)) } if err := c.accessCache.Put(owner, key.String()); err != nil { - c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err)) + c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath)) } } @@ -187,11 +190,11 @@ func (c *Cache) GetTagging(owner user.ID, key string) map[string]string { func (c *Cache) PutTagging(owner user.ID, key string, tags map[string]string) { if err := c.systemCache.PutTagging(key, tags); err != nil { - c.logger.Error(logs.CouldntCacheTags, zap.Error(err)) + c.logger.Error(logs.CouldntCacheTags, zap.Error(err), logs.TagField(logs.TagDatapath)) } if err := c.accessCache.Put(owner, key); err != nil { - c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err)) + c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath)) } } @@ -209,11 +212,11 @@ func (c *Cache) GetLockInfo(owner user.ID, key string) *data.LockInfo { func (c *Cache) PutLockInfo(owner user.ID, key string, lockInfo *data.LockInfo) { if err := c.systemCache.PutLockInfo(key, lockInfo); err != nil { - c.logger.Error(logs.CouldntCacheLockInfo, zap.Error(err)) + c.logger.Error(logs.CouldntCacheLockInfo, zap.Error(err), logs.TagField(logs.TagDatapath)) } if err := c.accessCache.Put(owner, key); err != nil { - c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err)) + c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath)) } } @@ -230,11 +233,11 @@ func (c *Cache) GetSettings(owner user.ID, bktInfo *data.BucketInfo) *data.Bucke func (c *Cache) PutSettings(owner user.ID, bktInfo *data.BucketInfo, settings *data.BucketSettings) { key := bktInfo.Name + bktInfo.SettingsObjectName() if err := c.systemCache.PutSettings(key, settings); err != nil { - c.logger.Warn(logs.CouldntCacheBucketSettings, zap.String("bucket", bktInfo.Name), zap.Error(err)) + c.logger.Warn(logs.CouldntCacheBucketSettings, zap.String("bucket", bktInfo.Name), zap.Error(err), logs.TagField(logs.TagDatapath)) } if err := c.accessCache.Put(owner, key); err != nil { - c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err)) + c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath)) } } @@ -252,11 +255,11 @@ func (c *Cache) PutCORS(owner user.ID, bkt *data.BucketInfo, cors *data.CORSConf key := bkt.CORSObjectName() if err := c.systemCache.PutCORS(key, cors); err != nil { - c.logger.Warn(logs.CouldntCacheCors, zap.String("bucket", bkt.Name), zap.Error(err)) + c.logger.Warn(logs.CouldntCacheCors, zap.String("bucket", bkt.Name), zap.Error(err), logs.TagField(logs.TagDatapath)) } if err := c.accessCache.Put(owner, key); err != nil { - c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err)) + c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath)) } } @@ -278,11 +281,11 @@ func (c *Cache) PutLifecycleConfiguration(owner user.ID, bkt *data.BucketInfo, c key := bkt.LifecycleConfigurationObjectName() if err := c.systemCache.PutLifecycleConfiguration(key, cfg); err != nil { - c.logger.Warn(logs.CouldntCacheLifecycleConfiguration, zap.String("bucket", bkt.Name), zap.Error(err)) + c.logger.Warn(logs.CouldntCacheLifecycleConfiguration, zap.String("bucket", bkt.Name), zap.Error(err), logs.TagField(logs.TagDatapath)) } if err := c.accessCache.Put(owner, key); err != nil { - c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err)) + c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err), logs.TagField(logs.TagDatapath)) } } @@ -296,7 +299,7 @@ func (c *Cache) GetNetworkInfo() *netmap.NetworkInfo { func (c *Cache) PutNetworkInfo(info netmap.NetworkInfo) { if err := c.networkCache.PutNetworkInfo(info); err != nil { - c.logger.Warn(logs.CouldntCacheNetworkInfo, zap.Error(err)) + c.logger.Warn(logs.CouldntCacheNetworkInfo, zap.Error(err), logs.TagField(logs.TagDatapath)) } } @@ -306,7 +309,7 @@ func (c *Cache) GetNetmap() *netmap.NetMap { func (c *Cache) PutNetmap(nm netmap.NetMap) { if err := c.networkCache.PutNetmap(nm); err != nil { - c.logger.Warn(logs.CouldntCacheNetmap, zap.Error(err)) + c.logger.Warn(logs.CouldntCacheNetmap, zap.Error(err), logs.TagField(logs.TagDatapath)) } } diff --git a/api/layer/container.go b/api/layer/container.go index cc9346eb..4e97ae18 100644 --- a/api/layer/container.go +++ b/api/layer/container.go @@ -64,6 +64,7 @@ func (n *Layer) containerInfo(ctx context.Context, prm frostfs.PrmContainer) (*d log.Error(logs.CouldNotParseContainerObjectLockEnabledAttribute, zap.String("lock_enabled", attrLockEnabled), zap.Error(err), + logs.TagField(logs.TagDatapath), ) } } @@ -88,7 +89,7 @@ func (n *Layer) containerList(ctx context.Context, listParams ListBucketsParams) res, err := n.frostFS.UserContainers(ctx, prm) if err != nil { - n.reqLogger(ctx).Error(logs.CouldNotListUserContainers, zap.Error(err)) + n.reqLogger(ctx).Error(logs.CouldNotListUserContainers, zap.Error(err), logs.TagField(logs.TagExternalStorage)) return nil, err } @@ -100,7 +101,7 @@ func (n *Layer) containerList(ctx context.Context, listParams ListBucketsParams) } info, err := n.containerInfo(ctx, getPrm) if err != nil { - n.reqLogger(ctx).Error(logs.CouldNotFetchContainerInfo, zap.Error(err)) + n.reqLogger(ctx).Error(logs.CouldNotFetchContainerInfo, zap.Error(err), logs.TagField(logs.TagExternalStorage)) continue } diff --git a/api/layer/cors.go b/api/layer/cors.go index 925f8ed2..f35f5538 100644 --- a/api/layer/cors.go +++ b/api/layer/cors.go @@ -92,7 +92,8 @@ func (n *Layer) deleteCORSObject(ctx context.Context, bktInfo *data.BucketInfo, if err := n.objectDeleteWithAuth(ctx, corsBkt, addr.Object(), prmAuth); err != nil { n.reqLogger(ctx).Error(logs.CouldntDeleteCorsObject, zap.Error(err), zap.String("cnrID", corsBkt.CID.EncodeToString()), - zap.String("objID", addr.Object().EncodeToString())) + zap.String("objID", addr.Object().EncodeToString()), + logs.TagField(logs.TagExternalStorage)) } } diff --git a/api/layer/layer.go b/api/layer/layer.go index 7b2d8ec3..13ccf6a1 100644 --- a/api/layer/layer.go +++ b/api/layer/layer.go @@ -543,7 +543,9 @@ func (n *Layer) GetExtendedObjectInfo(ctx context.Context, p *HeadObjectParams) n.reqLogger(ctx).Debug(logs.GetObject, zap.Stringer("cid", p.BktInfo.CID), - zap.Stringer("oid", objInfo.ObjectInfo.ID)) + zap.Stringer("oid", objInfo.ObjectInfo.ID), + logs.TagField(logs.TagDatapath), + ) return objInfo, nil } @@ -596,8 +598,8 @@ func (n *Layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings if !client.IsErrObjectAlreadyRemoved(obj.Error) && !client.IsErrObjectNotFound(obj.Error) { return obj } - n.reqLogger(ctx).Debug(logs.CouldntDeleteObjectFromStorageContinueDeleting, - zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID), zap.Error(obj.Error)) + n.reqLogger(ctx).Debug(logs.CouldntDeleteObjectFromStorageContinueDeleting, zap.Stringer("cid", bkt.CID), + zap.String("oid", obj.VersionID), zap.Error(obj.Error), logs.TagField(logs.TagExternalStorage)) } if obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeVersion.ID); obj.Error != nil { @@ -635,8 +637,8 @@ func (n *Layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings if !client.IsErrObjectAlreadyRemoved(obj.Error) && !client.IsErrObjectNotFound(obj.Error) { return obj } - n.reqLogger(ctx).Debug(logs.CouldntDeleteObjectFromStorageContinueDeleting, - zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID), zap.Error(obj.Error)) + n.reqLogger(ctx).Debug(logs.CouldntDeleteObjectFromStorageContinueDeleting, zap.Stringer("cid", bkt.CID), + zap.String("oid", obj.VersionID), zap.Error(obj.Error), logs.TagField(logs.TagExternalStorage)) } } @@ -751,7 +753,7 @@ func (n *Layer) getNodeVersionsToDelete(ctx context.Context, bkt *data.BucketInf return nil, fmt.Errorf("%w: there isn't tree node with requested version id", apierr.GetAPIError(apierr.ErrNoSuchVersion)) } - n.reqLogger(ctx).Debug(logs.GetTreeNodeToDelete, zap.Stringer("cid", bkt.CID), zap.Strings("oids", oids)) + n.reqLogger(ctx).Debug(logs.GetTreeNodeToDelete, zap.Stringer("cid", bkt.CID), zap.Strings("oids", oids), logs.TagField(logs.TagDatapath)) return versionsToDelete, nil } @@ -821,7 +823,7 @@ func (n *Layer) DeleteObjects(ctx context.Context, p *DeleteObjectParams) []*Ver for i, obj := range p.Objects { p.Objects[i] = n.deleteObject(ctx, p.BktInfo, p.Settings, obj, p.NetworkInfo) if p.IsMultiple && p.Objects[i].Error != nil { - n.reqLogger(ctx).Error(logs.CouldntDeleteObject, zap.String("object", obj.String()), zap.Error(p.Objects[i].Error)) + n.reqLogger(ctx).Error(logs.CouldntDeleteObject, zap.String("object", obj.String()), zap.Error(p.Objects[i].Error), logs.TagField(logs.TagExternalStorage)) } } @@ -851,7 +853,7 @@ func (n *Layer) ResolveBucket(ctx context.Context, zone, name string) (cid.ID, e return cid.ID{}, err } - n.reqLogger(ctx).Info(logs.ResolveBucket, zap.Stringer("cid", cnrID)) + n.reqLogger(ctx).Info(logs.ResolveBucket, zap.Stringer("cid", cnrID), logs.TagField(logs.TagExternalBlockchain)) } return cnrID, nil @@ -876,12 +878,12 @@ func (n *Layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error { corsObj, err := n.treeService.GetBucketCORS(ctx, p.BktInfo) if err != nil { - n.reqLogger(ctx).Error(logs.GetBucketCors, zap.Error(err)) + n.reqLogger(ctx).Error(logs.GetBucketCors, zap.Error(err), logs.TagField(logs.TagExternalStorageTree)) } lifecycleObj, treeErr := n.treeService.GetBucketLifecycleConfiguration(ctx, p.BktInfo) if treeErr != nil { - n.reqLogger(ctx).Error(logs.GetBucketLifecycle, zap.Error(treeErr)) + n.reqLogger(ctx).Error(logs.GetBucketLifecycle, zap.Error(treeErr), logs.TagField(logs.TagExternalStorageTree)) } err = n.frostFS.DeleteContainer(ctx, p.BktInfo.CID, p.SessionToken) diff --git a/api/layer/lifecycle.go b/api/layer/lifecycle.go index f16aded3..a20eff93 100644 --- a/api/layer/lifecycle.go +++ b/api/layer/lifecycle.go @@ -79,7 +79,9 @@ func (n *Layer) deleteLifecycleObject(ctx context.Context, bktInfo *data.BucketI if err := n.objectDeleteWithAuth(ctx, lifecycleBkt, addr.Object(), prmAuth); err != nil { n.reqLogger(ctx).Error(logs.CouldntDeleteLifecycleObject, zap.Error(err), zap.String("cid", lifecycleBkt.CID.EncodeToString()), - zap.String("oid", addr.Object().EncodeToString())) + zap.String("oid", addr.Object().EncodeToString()), + logs.TagField(logs.TagExternalStorage), + ) } } diff --git a/api/layer/listing.go b/api/layer/listing.go index 790243b8..fb884942 100644 --- a/api/layer/listing.go +++ b/api/layer/listing.go @@ -541,7 +541,7 @@ func (n *Layer) initWorkerPool(ctx context.Context, size int, p commonVersionsLi realSize, err := GetObjectSize(oi) if err != nil { - reqLog.Debug(logs.FailedToGetRealObjectSize, zap.Error(err)) + reqLog.Debug(logs.FailedToGetRealObjectSize, zap.Error(err), logs.TagField(logs.TagDatapath)) realSize = oi.Size } @@ -554,7 +554,7 @@ func (n *Layer) initWorkerPool(ctx context.Context, size int, p commonVersionsLi }) if err != nil { wg.Done() - reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err)) + reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath)) } }(node) } @@ -645,7 +645,7 @@ func (n *Layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo meta, err := n.objectHead(ctx, bktInfo, node.OID) if err != nil { - n.reqLogger(ctx).Warn(logs.CouldNotFetchObjectMeta, zap.Error(err)) + n.reqLogger(ctx).Warn(logs.CouldNotFetchObjectMeta, zap.Error(err), logs.TagField(logs.TagExternalStorage)) return nil } diff --git a/api/layer/multipart_upload.go b/api/layer/multipart_upload.go index bccb0aae..1996add4 100644 --- a/api/layer/multipart_upload.go +++ b/api/layer/multipart_upload.go @@ -213,7 +213,7 @@ func (n *Layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, er func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInfo, p *UploadPartParams) (*data.ObjectInfo, error) { encInfo := FormEncryptionInfo(multipartInfo.Meta) if err := p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil { - n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err)) + n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err), logs.TagField(logs.TagDatapath)) return nil, apierr.GetAPIError(apierr.ErrInvalidEncryptionParameters) } @@ -266,7 +266,10 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner) err = n.frostFS.DeleteObject(ctx, prm) if err != nil { - n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID)) + n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, + zap.Stringer("cid", bktInfo.CID), + zap.Stringer("oid", createdObj.ID), + logs.TagField(logs.TagExternalStorage)) } return nil, apierr.GetAPIError(apierr.ErrInvalidDigest) } @@ -283,7 +286,10 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf if !bytes.Equal(contentHashBytes, createdObj.HashSum) { err = n.objectDelete(ctx, bktInfo, createdObj.ID) if err != nil { - n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID)) + n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, + zap.Stringer("cid", bktInfo.CID), + zap.Stringer("oid", createdObj.ID), + logs.TagField(logs.TagExternalStorage)) } return nil, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch) } @@ -291,7 +297,7 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf n.reqLogger(ctx).Debug(logs.UploadPart, zap.String("multipart upload", p.Info.UploadID), zap.Int("part number", p.PartNumber), - zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID)) + zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID), logs.TagField(logs.TagDatapath)) partInfo := &data.PartInfo{ Key: p.Info.Key, @@ -314,7 +320,8 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf if err = n.objectDelete(ctx, bktInfo, oldPartID); err != nil { n.reqLogger(ctx).Error(logs.CouldntDeleteOldPartObject, zap.Error(err), zap.String("cid", bktInfo.CID.EncodeToString()), - zap.String("oid", oldPartID.EncodeToString())) + zap.String("oid", oldPartID.EncodeToString()), + logs.TagField(logs.TagExternalStorage)) } } } @@ -481,7 +488,8 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar n.reqLogger(ctx).Error(logs.CouldNotPutCompletedObject, zap.String("uploadID", p.Info.UploadID), zap.String("uploadKey", p.Info.Key), - zap.Error(err)) + zap.Error(err), + logs.TagField(logs.TagExternalStorage)) return nil, nil, apierr.GetAPIError(apierr.ErrInternalError) } @@ -493,7 +501,8 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar if err = n.objectDelete(ctx, p.Info.Bkt, partInfo.OID); err != nil { n.reqLogger(ctx).Warn(logs.CouldNotDeleteUploadPart, zap.Stringer("cid", p.Info.Bkt.CID), zap.Stringer("oid", &partInfo.OID), - zap.Error(err)) + zap.Error(err), + logs.TagField(logs.TagExternalStorage)) } addr.SetObject(partInfo.OID) n.cache.DeleteObject(addr) @@ -587,7 +596,7 @@ func (n *Layer) deleteUploadedParts(ctx context.Context, bkt *data.BucketInfo, p oids, err := relations.ListAllRelations(ctx, n.frostFS.Relations(), bkt.CID, info.OID, tokens) if err != nil { n.reqLogger(ctx).Warn(logs.FailedToListAllObjectRelations, zap.String("cid", bkt.CID.EncodeToString()), - zap.String("oid", info.OID.EncodeToString()), zap.Error(err)) + zap.String("oid", info.OID.EncodeToString()), zap.Error(err), logs.TagField(logs.TagExternalStorage)) continue } members = append(members, append(oids, info.OID)...) @@ -596,7 +605,7 @@ func (n *Layer) deleteUploadedParts(ctx context.Context, bkt *data.BucketInfo, p err := n.putTombstones(ctx, bkt, networkInfo, members) if err != nil { - n.reqLogger(ctx).Warn(logs.FailedToPutTombstones, zap.Error(err)) + n.reqLogger(ctx).Warn(logs.FailedToPutTombstones, zap.Error(err), logs.TagField(logs.TagExternalStorage)) } } @@ -609,7 +618,7 @@ func (n *Layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn encInfo := FormEncryptionInfo(multipartInfo.Meta) if err = p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil { - n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err)) + n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err), logs.TagField(logs.TagDatapath)) return nil, apierr.GetAPIError(apierr.ErrInvalidEncryptionParameters) } @@ -701,7 +710,8 @@ func (n *Layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data. zap.Stringer("cid", p.Bkt.CID), zap.String("upload id", p.UploadID), zap.Ints("part numbers", partsNumbers), - zap.Strings("oids", oids)) + zap.Strings("oids", oids), + logs.TagField(logs.TagDatapath)) return multipartInfo, res, nil } diff --git a/api/layer/object.go b/api/layer/object.go index ed9f1339..d5ab2c20 100644 --- a/api/layer/object.go +++ b/api/layer/object.go @@ -295,7 +295,11 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend if !bytes.Equal(headerMd5Hash, createdObj.MD5Sum) { err = n.objectDelete(ctx, p.BktInfo, createdObj.ID) if err != nil { - n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID)) + n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, + zap.Stringer("cid", p.BktInfo.CID), + zap.Stringer("oid", createdObj.ID), + logs.TagField(logs.TagExternalStorage), + ) } return nil, apierr.GetAPIError(apierr.ErrBadDigest) } @@ -309,13 +313,17 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend if !bytes.Equal(contentHashBytes, createdObj.HashSum) { err = n.objectDelete(ctx, p.BktInfo, createdObj.ID) if err != nil { - n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID)) + n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, + zap.Stringer("cid", p.BktInfo.CID), + zap.Stringer("oid", createdObj.ID), + logs.TagField(logs.TagExternalStorage)) } return nil, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch) } } - n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID)) + n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), + zap.Stringer("oid", createdObj.ID), logs.TagField(logs.TagExternalStorage)) now := TimeNow(ctx) newVersion := &data.NodeVersion{ BaseNodeVersion: data.BaseNodeVersion{ @@ -540,7 +548,7 @@ func (n *Layer) objectPutAndHash(ctx context.Context, prm frostfs.PrmObjectCreat func (n *Layer) payloadDiscard(ctx context.Context, payload io.Reader) { if payload != nil { if _, errDiscard := io.Copy(io.Discard, payload); errDiscard != nil { - n.reqLogger(ctx).Warn(logs.FailedToDiscardPutPayloadProbablyGoroutineLeaks, zap.Error(errDiscard)) + n.reqLogger(ctx).Warn(logs.FailedToDiscardPutPayloadProbablyGoroutineLeaks, zap.Error(errDiscard), logs.TagField(logs.TagExternalStorage)) } } } diff --git a/api/layer/system_object.go b/api/layer/system_object.go index 4f7f40ec..da8eb943 100644 --- a/api/layer/system_object.go +++ b/api/layer/system_object.go @@ -217,7 +217,7 @@ func (n *Layer) GetBucketSettings(ctx context.Context, bktInfo *data.BucketInfo) return nil, err } settings = &data.BucketSettings{Versioning: data.VersioningUnversioned} - n.reqLogger(ctx).Debug(logs.BucketSettingsNotFoundUseDefaults) + n.reqLogger(ctx).Debug(logs.BucketSettingsNotFoundUseDefaults, logs.TagField(logs.TagDatapath)) } n.cache.PutSettings(owner, bktInfo, settings) diff --git a/api/layer/tagging.go b/api/layer/tagging.go index a80e72b4..d2183f76 100644 --- a/api/layer/tagging.go +++ b/api/layer/tagging.go @@ -168,7 +168,8 @@ func (n *Layer) getNodeVersion(ctx context.Context, objVersion *data.ObjectVersi if err == nil && version != nil && !version.IsDeleteMarker { n.reqLogger(ctx).Debug(logs.GetTreeNode, - zap.Stringer("cid", objVersion.BktInfo.CID), zap.Stringer("oid", version.OID)) + zap.Stringer("cid", objVersion.BktInfo.CID), + zap.Stringer("oid", version.OID), logs.TagField(logs.TagExternalStorageTree)) } return version, err diff --git a/api/layer/tombstone.go b/api/layer/tombstone.go index 3b4e661c..5aac279d 100644 --- a/api/layer/tombstone.go +++ b/api/layer/tombstone.go @@ -65,13 +65,13 @@ func (n *Layer) submitPutTombstone(ctx context.Context, bkt *data.BucketInfo, me defer wg.Done() if err := n.putTombstoneObject(ctx, tomb, bkt); err != nil { - n.reqLogger(ctx).Warn(logs.FailedToPutTombstoneObject, zap.String("cid", bkt.CID.EncodeToString()), zap.Error(err)) + n.reqLogger(ctx).Warn(logs.FailedToPutTombstoneObject, zap.String("cid", bkt.CID.EncodeToString()), zap.Error(err), logs.TagField(logs.TagExternalStorage)) errCh <- fmt.Errorf("put tombstone object: %w", err) } }) if err != nil { wg.Done() - n.reqLogger(ctx).Warn(logs.FailedToSubmitTaskToPool, zap.Error(err)) + n.reqLogger(ctx).Warn(logs.FailedToSubmitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath)) errCh <- fmt.Errorf("submit task to pool: %w", err) } } @@ -106,7 +106,7 @@ func (n *Layer) getMembers(ctx context.Context, cnrID cid.ID, objID oid.ID, toke } n.reqLogger(ctx).Warn(logs.FailedToListAllObjectRelations, zap.String("cid", cnrID.EncodeToString()), - zap.String("oid", objID.EncodeToString()), zap.Error(err)) + zap.String("oid", objID.EncodeToString()), zap.Error(err), logs.TagField(logs.TagExternalStorage)) return nil, nil } return append(oids, objID), nil diff --git a/api/middleware/address_style.go b/api/middleware/address_style.go index 6e82ae68..bee60713 100644 --- a/api/middleware/address_style.go +++ b/api/middleware/address_style.go @@ -110,7 +110,7 @@ func preparePathStyleAddress(reqInfo *ReqInfo, r *http.Request, reqLogger *zap.L // https://github.com/go-chi/chi/issues/641 // https://github.com/go-chi/chi/issues/642 if obj, err := url.PathUnescape(reqInfo.ObjectName); err != nil { - reqLogger.Warn(logs.FailedToUnescapeObjectName, zap.Error(err)) + reqLogger.Warn(logs.FailedToUnescapeObjectName, zap.Error(err), logs.TagField(logs.TagDatapath)) } else { reqInfo.ObjectName = obj } diff --git a/api/middleware/auth.go b/api/middleware/auth.go index 2d9a91f5..921b3872 100644 --- a/api/middleware/auth.go +++ b/api/middleware/auth.go @@ -53,15 +53,15 @@ func Auth(center Center, log *zap.Logger) Func { box, err := center.Authenticate(r) if err != nil { if errors.Is(err, ErrNoAuthorizationHeader) { - reqLogOrDefault(ctx, log).Debug(logs.CouldntReceiveAccessBoxForGateKeyRandomKeyWillBeUsed, zap.Error(err)) + reqLogOrDefault(ctx, log).Debug(logs.CouldntReceiveAccessBoxForGateKeyRandomKeyWillBeUsed, zap.Error(err), logs.TagField(logs.TagDatapath)) } else { - reqLogOrDefault(ctx, log).Error(logs.FailedToPassAuthentication, zap.Error(err)) + reqLogOrDefault(ctx, log).Error(logs.FailedToPassAuthentication, zap.Error(err), logs.TagField(logs.TagDatapath)) err = apierr.TransformToS3Error(err) if err.(apierr.Error).ErrCode == apierr.ErrInternalError { err = apierr.GetAPIError(apierr.ErrAccessDenied) } if _, wrErr := WriteErrorResponse(w, GetReqInfo(r.Context()), err); wrErr != nil { - reqLogOrDefault(ctx, log).Error(logs.FailedToWriteResponse, zap.Error(wrErr)) + reqLogOrDefault(ctx, log).Error(logs.FailedToWriteResponse, zap.Error(wrErr), logs.TagField(logs.TagDatapath)) } return } @@ -71,7 +71,7 @@ func Auth(center Center, log *zap.Logger) Func { if box.AccessBox.Gate.BearerToken != nil { reqInfo.User = bearer.ResolveIssuer(*box.AccessBox.Gate.BearerToken).String() } - reqLogOrDefault(ctx, log).Debug(logs.SuccessfulAuth, zap.String("accessKeyID", box.AuthHeaders.AccessKeyID)) + reqLogOrDefault(ctx, log).Debug(logs.SuccessfulAuth, zap.String("accessKeyID", box.AuthHeaders.AccessKeyID), logs.TagField(logs.TagDatapath)) } h.ServeHTTP(w, r.WithContext(ctx)) @@ -89,15 +89,15 @@ func FrostfsIDValidation(frostfsID FrostFSIDValidator, log *zap.Logger) Func { ctx := r.Context() bd, err := GetBoxData(ctx) if err != nil || bd.Gate.BearerToken == nil { - reqLogOrDefault(ctx, log).Debug(logs.AnonRequestSkipFrostfsIDValidation) + reqLogOrDefault(ctx, log).Debug(logs.AnonRequestSkipFrostfsIDValidation, logs.TagField(logs.TagDatapath)) h.ServeHTTP(w, r) return } if err = validateBearerToken(frostfsID, bd.Gate.BearerToken); err != nil { - reqLogOrDefault(ctx, log).Error(logs.FrostfsIDValidationFailed, zap.Error(err)) + reqLogOrDefault(ctx, log).Error(logs.FrostfsIDValidationFailed, zap.Error(err), logs.TagField(logs.TagDatapath)) if _, wrErr := WriteErrorResponse(w, GetReqInfo(r.Context()), err); wrErr != nil { - reqLogOrDefault(ctx, log).Error(logs.FailedToWriteResponse, zap.Error(wrErr)) + reqLogOrDefault(ctx, log).Error(logs.FailedToWriteResponse, zap.Error(wrErr), logs.TagField(logs.TagDatapath)) } return } diff --git a/api/middleware/log_http_stub.go b/api/middleware/log_http_stub.go index 4c53840f..14634502 100644 --- a/api/middleware/log_http_stub.go +++ b/api/middleware/log_http_stub.go @@ -23,7 +23,7 @@ type ( ) func LogHTTP(l *zap.Logger, _ LogHTTPSettings) Func { - l.Warn(logs.LogHTTPDisabledInThisBuild) + l.Warn(logs.LogHTTPDisabledInThisBuild, logs.TagField(logs.TagApp)) return func(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { h.ServeHTTP(w, r) diff --git a/api/middleware/metrics.go b/api/middleware/metrics.go index fca113a3..9f4b49d0 100644 --- a/api/middleware/metrics.go +++ b/api/middleware/metrics.go @@ -139,7 +139,7 @@ func resolveCID(log *zap.Logger, resolveContainerID ContainerIDResolveFunc) cidR containerID, err := resolveContainerID(ctx, reqInfo.BucketName) if err != nil { - reqLogOrDefault(ctx, log).Debug(logs.FailedToResolveCID, zap.Error(err)) + reqLogOrDefault(ctx, log).Debug(logs.FailedToResolveCID, zap.Error(err), logs.TagField(logs.TagExternalBlockchain)) return "" } diff --git a/api/middleware/policy.go b/api/middleware/policy.go index 07340833..5d12c504 100644 --- a/api/middleware/policy.go +++ b/api/middleware/policy.go @@ -90,10 +90,10 @@ func PolicyCheck(cfg PolicyConfig) Func { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() if err := policyCheck(r, cfg); err != nil { - reqLogOrDefault(ctx, cfg.Log).Error(logs.PolicyValidationFailed, zap.Error(err)) + reqLogOrDefault(ctx, cfg.Log).Error(logs.PolicyValidationFailed, zap.Error(err), logs.TagField(logs.TagDatapath)) err = apierr.TransformToS3Error(err) if _, wrErr := WriteErrorResponse(w, GetReqInfo(ctx), err); wrErr != nil { - reqLogOrDefault(ctx, cfg.Log).Error(logs.FailedToWriteResponse, zap.Error(wrErr)) + reqLogOrDefault(ctx, cfg.Log).Error(logs.FailedToWriteResponse, zap.Error(wrErr), logs.TagField(logs.TagDatapath)) } return } @@ -200,7 +200,9 @@ func getPolicyRequest(r *http.Request, cfg PolicyConfig, reqType ReqType, bktNam reqLogOrDefault(r.Context(), cfg.Log).Debug(logs.PolicyRequest, zap.String("action", op), zap.String("resource", res), zap.Any("request properties", requestProps), - zap.Any("resource properties", resourceProps)) + zap.Any("resource properties", resourceProps), + logs.TagField(logs.TagDatapath), + ) return testutil.NewRequest(op, testutil.NewResource(res, resourceProps), requestProps), pk, groups, nil } diff --git a/api/middleware/reqinfo.go b/api/middleware/reqinfo.go index 9d3cdef5..fe011807 100644 --- a/api/middleware/reqinfo.go +++ b/api/middleware/reqinfo.go @@ -166,7 +166,7 @@ func Request(log *zap.Logger, settings RequestSettings) Func { // generate random UUIDv4 id, err := uuid.NewRandom() if err != nil { - log.Error(logs.FailedToGenerateRequestID, zap.Error(err)) + log.Error(logs.FailedToGenerateRequestID, zap.Error(err), logs.TagField(logs.TagDatapath)) } // set request id into response header @@ -198,7 +198,8 @@ func Request(log *zap.Logger, settings RequestSettings) Func { r = r.WithContext(SetReqLogger(ctx, reqLogger)) reqLogger.Info(logs.RequestStart, zap.String("host", r.Host), - zap.String("remote_host", reqInfo.RemoteHost), zap.String("namespace", reqInfo.Namespace)) + zap.String("remote_host", reqInfo.RemoteHost), zap.String("namespace", reqInfo.Namespace), + logs.TagField(logs.TagDatapath)) // continue execution h.ServeHTTP(lw, r) diff --git a/api/middleware/response.go b/api/middleware/response.go index e86151ff..eb2ac725 100644 --- a/api/middleware/response.go +++ b/api/middleware/response.go @@ -311,7 +311,7 @@ func LogSuccessResponse(l *zap.Logger) Func { } ctx := r.Context() - reqLogger := reqLogOrDefault(ctx, l) + reqLogger := reqLogOrDefault(ctx, l).With(logs.TagField(logs.TagDatapath)) reqInfo := GetReqInfo(ctx) fields := make([]zap.Field, 0, 6) diff --git a/api/router.go b/api/router.go index 49f348fd..832f9405 100644 --- a/api/router.go +++ b/api/router.go @@ -245,13 +245,14 @@ func errorResponseHandler(w http.ResponseWriter, r *http.Request) { zap.String("method", reqInfo.API), zap.String("http method", r.Method), zap.String("url", r.RequestURI), + logs.TagField(logs.TagDatapath), } if wrErr != nil { fields = append(fields, zap.NamedError("write_response_error", wrErr)) } - log.Error(logs.RequestUnmatched, fields...) + log.Error(logs.RequestUnmatched, append(fields, logs.TagField(logs.TagDatapath))...) } } @@ -266,13 +267,14 @@ func notSupportedHandler() http.HandlerFunc { fields := []zap.Field{ zap.String("http method", r.Method), zap.String("url", r.RequestURI), + logs.TagField(logs.TagDatapath), } if wrErr != nil { fields = append(fields, zap.NamedError("write_response_error", wrErr)) } - log.Error(logs.NotSupported, fields...) + log.Error(logs.NotSupported, append(fields, logs.TagField(logs.TagDatapath))...) } } } diff --git a/cmd/s3-gw/app.go b/cmd/s3-gw/app.go index 72b49baf..3cc4d192 100644 --- a/cmd/s3-gw/app.go +++ b/cmd/s3-gw/app.go @@ -53,6 +53,7 @@ import ( "github.com/panjf2000/ants/v2" "github.com/spf13/viper" "go.uber.org/zap" + "go.uber.org/zap/zapcore" "golang.org/x/exp/slices" "golang.org/x/text/encoding/ianaindex" "google.golang.org/grpc" @@ -91,6 +92,10 @@ type ( wrkDone chan struct{} } + tagsConfig struct { + tagLogs sync.Map + } + loggerSettings struct { mu sync.RWMutex appMetrics *metrics.AppMetrics @@ -99,6 +104,7 @@ type ( appSettings struct { logLevel zap.AtomicLevel httpLogging s3middleware.LogHTTPConfig + tagsConfig *tagsConfig maxClient maxClientsConfig defaultMaxAge int reconnectInterval time.Duration @@ -138,13 +144,54 @@ type ( deadline time.Duration count int } - - Logger struct { - logger *zap.Logger - lvl zap.AtomicLevel - } ) +func (t *tagsConfig) LevelEnabled(tag string, tgtLevel zapcore.Level) bool { + lvl, ok := t.tagLogs.Load(tag) + if !ok { + return false + } + + return lvl.(zapcore.Level).Enabled(tgtLevel) +} + +func (t *tagsConfig) update(cfg *viper.Viper) error { + tags, err := fetchLogTagsConfig(cfg) + if err != nil { + return err + } + + t.tagLogs.Range(func(key, value any) bool { + k := key.(string) + v := value.(zapcore.Level) + + if lvl, ok := tags[k]; ok { + if lvl != v { + t.tagLogs.Store(key, lvl) + } + } else { + t.tagLogs.Delete(key) + delete(tags, k) + } + return true + }) + + for k, v := range tags { + t.tagLogs.Store(k, v) + } + + return nil +} + +func newTagsConfig(v *viper.Viper) *tagsConfig { + var t tagsConfig + if err := t.update(v); err != nil { + // panic here is analogue of the similar panic during common log level initialization. + panic(err.Error()) + } + return &t +} + func (s *loggerSettings) DroppedLogsInc() { s.mu.RLock() defer s.mu.RUnlock() @@ -163,8 +210,10 @@ func (s *loggerSettings) setMetrics(appMetrics *metrics.AppMetrics) { func newApp(ctx context.Context, cfg *appCfg) *App { logSettings := &loggerSettings{} - log := pickLogger(cfg.config(), logSettings) + tagConfig := newTagsConfig(cfg.config()) + log := pickLogger(cfg.config(), logSettings, tagConfig) settings := newAppSettings(log, cfg.config()) + settings.tagsConfig = tagConfig appCache := layer.NewCache(getCacheOptions(cfg.config(), log.logger)) app := &App{ @@ -205,7 +254,7 @@ func (a *App) initAuthCenter(ctx context.Context) { if a.config().IsSet(cfgContainersAccessBox) { cnrID, err := a.resolveContainerID(ctx, cfgContainersAccessBox) if err != nil { - a.log.Fatal(logs.CouldNotFetchAccessBoxContainerInfo, zap.Error(err)) + a.log.Fatal(logs.CouldNotFetchAccessBoxContainerInfo, zap.Error(err), logs.TagField(logs.TagApp)) } a.settings.accessbox = &cnrID } @@ -224,7 +273,7 @@ func (a *App) initLayer(ctx context.Context) { // prepare random key for anonymous requests randomKey, err := keys.NewPrivateKey() if err != nil { - a.log.Fatal(logs.CouldntGenerateRandomKey, zap.Error(err)) + a.log.Fatal(logs.CouldntGenerateRandomKey, zap.Error(err), logs.TagField(logs.TagApp)) } var gateOwner user.ID @@ -234,7 +283,7 @@ func (a *App) initLayer(ctx context.Context) { if a.config().IsSet(cfgContainersCORS) { corsCnrInfo, err = a.fetchContainerInfo(ctx, cfgContainersCORS) if err != nil { - a.log.Fatal(logs.CouldNotFetchCORSContainerInfo, zap.Error(err)) + a.log.Fatal(logs.CouldNotFetchCORSContainerInfo, zap.Error(err), logs.TagField(logs.TagApp)) } } @@ -242,7 +291,7 @@ func (a *App) initLayer(ctx context.Context) { if a.config().IsSet(cfgContainersLifecycle) { lifecycleCnrInfo, err = a.fetchContainerInfo(ctx, cfgContainersLifecycle) if err != nil { - a.log.Fatal(logs.CouldNotFetchLifecycleContainerInfo, zap.Error(err)) + a.log.Fatal(logs.CouldNotFetchLifecycleContainerInfo, zap.Error(err), logs.TagField(logs.TagApp)) } } @@ -268,7 +317,7 @@ func (a *App) initLayer(ctx context.Context) { func (a *App) initWorkerPool() *ants.Pool { workerPool, err := ants.NewPool(a.settings.workerPoolSize) if err != nil { - a.log.Fatal(logs.FailedToCreateWorkerPool, zap.Error(err)) + a.log.Fatal(logs.FailedToCreateWorkerPool, zap.Error(err), logs.TagField(logs.TagApp)) } return workerPool } @@ -277,6 +326,7 @@ func newAppSettings(log *Logger, v *viper.Viper) *appSettings { settings := &appSettings{ logLevel: log.lvl, httpLogging: s3middleware.LogHTTPConfig{}, + tagsConfig: newTagsConfig(v), maxClient: newMaxClients(v), defaultMaxAge: fetchDefaultMaxAge(v, log.logger), reconnectInterval: fetchReconnectInterval(v), @@ -623,7 +673,7 @@ func (a *App) initFrostfsID(ctx context.Context) { }, }) if err != nil { - a.log.Fatal(logs.InitFrostfsIDContractFailed, zap.Error(err)) + a.log.Fatal(logs.InitFrostfsIDFailed, zap.Error(err), logs.TagField(logs.TagApp)) } a.frostfsid, err = frostfsid.NewFrostFSID(frostfsid.Config{ @@ -632,7 +682,7 @@ func (a *App) initFrostfsID(ctx context.Context) { Logger: a.log, }) if err != nil { - a.log.Fatal(logs.InitFrostfsIDContractFailed, zap.Error(err)) + a.log.Fatal(logs.InitFrostfsIDFailed, zap.Error(err), logs.TagField(logs.TagApp)) } } @@ -648,7 +698,7 @@ func (a *App) initPolicyStorage(ctx context.Context) { }, }) if err != nil { - a.log.Fatal(logs.InitPolicyContractFailed, zap.Error(err)) + a.log.Fatal(logs.InitPolicyContractFailed, zap.Error(err), logs.TagField(logs.TagApp)) } a.policyStorage = policy.NewStorage(policy.StorageConfig{ @@ -662,7 +712,7 @@ func (a *App) initResolver() { var err error a.bucketResolver, err = resolver.NewBucketResolver(a.getResolverOrder(), a.getResolverConfig()) if err != nil { - a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err)) + a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err), logs.TagField(logs.TagApp)) } } @@ -677,11 +727,11 @@ func (a *App) getResolverOrder() []string { order := a.config().GetStringSlice(cfgResolveOrder) if a.config().GetString(cfgRPCEndpoint) == "" { order = remove(order, resolver.NNSResolver) - a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided) + a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided, logs.TagField(logs.TagApp)) } if len(order) == 0 { - a.log.Info(logs.ContainerResolverWillBeDisabled) + a.log.Info(logs.ContainerResolverWillBeDisabled, logs.TagField(logs.TagApp)) } return order @@ -704,13 +754,13 @@ func (a *App) initTracing(ctx context.Context) { if trustedCa := a.config().GetString(cfgTracingTrustedCa); trustedCa != "" { caBytes, err := os.ReadFile(trustedCa) if err != nil { - a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err)) + a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp)) return } certPool := x509.NewCertPool() ok := certPool.AppendCertsFromPEM(caBytes) if !ok { - a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert")) + a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"), logs.TagField(logs.TagApp)) return } cfg.ServerCaCertPool = certPool @@ -718,17 +768,17 @@ func (a *App) initTracing(ctx context.Context) { attributes, err := fetchTracingAttributes(a.config()) if err != nil { - a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err)) + a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp)) return } cfg.Attributes = attributes updated, err := tracing.Setup(ctx, cfg) if err != nil { - a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err)) + a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp)) } if updated { - a.log.Info(logs.TracingConfigUpdated) + a.log.Info(logs.TracingConfigUpdated, logs.TagField(logs.TagApp)) } } @@ -738,7 +788,7 @@ func (a *App) shutdownTracing() { defer cancel() if err := tracing.Shutdown(shdnCtx); err != nil { - a.log.Warn(logs.FailedToShutdownTracing, zap.Error(err)) + a.log.Warn(logs.FailedToShutdownTracing, zap.Error(err), logs.TagField(logs.TagApp)) } } @@ -755,7 +805,7 @@ func newMaxClients(cfg *viper.Viper) maxClientsConfig { func getDialerSource(logger *zap.Logger, cfg *viper.Viper) *internalnet.DialerSource { source, err := internalnet.NewDialerSource(fetchMultinetConfig(cfg, logger)) if err != nil { - logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err)) + logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err), logs.TagField(logs.TagApp)) } return source } @@ -767,12 +817,12 @@ func (a *App) initPools(ctx context.Context) { password := wallet.GetPassword(a.config(), cfgWalletPassphrase) key, err := wallet.GetKeyFromPath(a.config().GetString(cfgWalletPath), a.config().GetString(cfgWalletAddress), password) if err != nil { - a.log.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err)) + a.log.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err), logs.TagField(logs.TagApp)) } prm.SetKey(&key.PrivateKey) prmTree.SetKey(key) - a.log.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes()))) + a.log.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())), logs.TagField(logs.TagApp)) for _, peer := range fetchPeers(a.log, a.config()) { prm.AddNode(peer) @@ -799,8 +849,8 @@ func (a *App) initPools(ctx context.Context) { prm.SetGracefulCloseOnSwitchTimeout(fetchSetGracefulCloseOnSwitchTimeout(a.config())) - prm.SetLogger(a.log) - prmTree.SetLogger(a.log) + prm.SetLogger(a.log.With(logs.TagField(logs.TagDatapath))) + prmTree.SetLogger(a.log.With(logs.TagField(logs.TagDatapath))) prmTree.SetMaxRequestAttempts(a.config().GetInt(cfgTreePoolMaxAttempts)) @@ -814,11 +864,11 @@ func (a *App) initPools(ctx context.Context) { p, err := pool.NewPool(prm) if err != nil { - a.log.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err)) + a.log.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err), logs.TagField(logs.TagApp)) } if err = p.Dial(ctx); err != nil { - a.log.Fatal(logs.FailedToDialConnectionPool, zap.Error(err)) + a.log.Fatal(logs.FailedToDialConnectionPool, zap.Error(err), logs.TagField(logs.TagApp)) } if a.config().GetBool(cfgTreePoolNetmapSupport) { @@ -827,10 +877,10 @@ func (a *App) initPools(ctx context.Context) { treePool, err := treepool.NewPool(prmTree) if err != nil { - a.log.Fatal(logs.FailedToCreateTreePool, zap.Error(err)) + a.log.Fatal(logs.FailedToCreateTreePool, zap.Error(err), logs.TagField(logs.TagApp)) } if err = treePool.Dial(ctx); err != nil { - a.log.Fatal(logs.FailedToDialTreePool, zap.Error(err)) + a.log.Fatal(logs.FailedToDialTreePool, zap.Error(err), logs.TagField(logs.TagApp)) } a.treePool = treePool @@ -856,6 +906,7 @@ func (a *App) Wait() { a.log.Info(logs.ApplicationStarted, zap.String("name", "frostfs-s3-gw"), zap.String("version", version.Version), + logs.TagField(logs.TagApp), ) a.metrics.State().SetVersion(version.Version) @@ -863,7 +914,7 @@ func (a *App) Wait() { <-a.webDone // wait for web-server to be stopped - a.log.Info(logs.ApplicationFinished) + a.log.Info(logs.ApplicationFinished, logs.TagField(logs.TagApp)) } func (a *App) setHealthStatus() { @@ -909,11 +960,11 @@ func (a *App) Serve(ctx context.Context) { for i := range servs { go func(i int) { - a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address())) + a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()), logs.TagField(logs.TagApp)) if err := srv.Serve(servs[i].Listener()); err != nil && err != http.ErrServerClosed { a.metrics.MarkUnhealthy(servs[i].Address()) - a.log.Fatal(logs.ListenAndServe, zap.Error(err)) + a.log.Fatal(logs.ListenAndServe, zap.Error(err), logs.TagField(logs.TagApp)) } }(i) } @@ -938,7 +989,7 @@ LOOP: ctx, cancel := shutdownContext() defer cancel() - a.log.Info(logs.StoppingServer, zap.Error(srv.Shutdown(ctx))) + a.log.Info(logs.StoppingServer, zap.Error(srv.Shutdown(ctx)), logs.TagField(logs.TagApp)) a.metrics.Shutdown() a.stopServices() @@ -952,23 +1003,23 @@ func shutdownContext() (context.Context, context.CancelFunc) { } func (a *App) configReload(ctx context.Context) { - a.log.Info(logs.SIGHUPConfigReloadStarted) + a.log.Info(logs.SIGHUPConfigReloadStarted, logs.TagField(logs.TagApp)) if !a.config().IsSet(cmdConfig) && !a.config().IsSet(cmdConfigDir) { - a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed) + a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed, logs.TagField(logs.TagApp)) return } if err := a.cfg.reload(); err != nil { - a.log.Warn(logs.FailedToReloadConfig, zap.Error(err)) + a.log.Warn(logs.FailedToReloadConfig, zap.Error(err), logs.TagField(logs.TagApp)) return } if err := a.bucketResolver.UpdateResolvers(a.getResolverOrder()); err != nil { - a.log.Warn(logs.FailedToReloadResolvers, zap.Error(err)) + a.log.Warn(logs.FailedToReloadResolvers, zap.Error(err), logs.TagField(logs.TagApp)) } if err := a.updateServers(); err != nil { - a.log.Warn(logs.FailedToReloadServerParameters, zap.Error(err)) + a.log.Warn(logs.FailedToReloadServerParameters, zap.Error(err), logs.TagField(logs.TagApp)) } a.setRuntimeParameters() @@ -982,18 +1033,22 @@ func (a *App) configReload(ctx context.Context) { a.initTracing(ctx) a.setHealthStatus() - a.log.Info(logs.SIGHUPConfigReloadCompleted) + a.log.Info(logs.SIGHUPConfigReloadCompleted, logs.TagField(logs.TagApp)) } func (a *App) updateSettings() { if lvl, err := getLogLevel(a.config()); err != nil { - a.log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err)) + a.log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp)) } else { a.settings.logLevel.SetLevel(lvl) } if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.config(), a.log)); err != nil { - a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err)) + a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp)) + } + + if err := a.settings.tagsConfig.update(a.config()); err != nil { + a.log.Warn(logs.TagsLogConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp)) } a.settings.update(a.config(), a.log) @@ -1024,17 +1079,17 @@ func (a *App) initServers(ctx context.Context) { if err != nil { a.unbindServers = append(a.unbindServers, serverInfo) a.metrics.MarkUnhealthy(serverInfo.Address) - a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err))...) + a.log.With(logs.TagField(logs.TagApp)).Warn(logs.FailedToAddServer, append(fields, zap.Error(err))...) continue } a.metrics.MarkHealthy(serverInfo.Address) a.servers = append(a.servers, srv) - a.log.Info(logs.AddServer, fields...) + a.log.With(logs.TagField(logs.TagApp)).Info(logs.AddServer, fields...) } if len(a.servers) == 0 { - a.log.Fatal(logs.NoHealthyServers) + a.log.Fatal(logs.NoHealthyServers, logs.TagField(logs.TagApp)) } } @@ -1138,7 +1193,7 @@ func (a *App) initHandler() { a.api, err = handler.New(a.log, a.obj, a.settings, a.policyStorage, a.frostfsid) if err != nil { - a.log.Fatal(logs.CouldNotInitializeAPIHandler, zap.Error(err)) + a.log.Fatal(logs.CouldNotInitializeAPIHandler, zap.Error(err), logs.TagField(logs.TagApp)) } } @@ -1170,7 +1225,7 @@ func (a *App) getServers() []Server { func (a *App) setRuntimeParameters() { if len(os.Getenv("GOMEMLIMIT")) != 0 { // default limit < yaml limit < app env limit < GOMEMLIMIT - a.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT) + a.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT, logs.TagField(logs.TagApp)) return } @@ -1179,7 +1234,9 @@ func (a *App) setRuntimeParameters() { if softMemoryLimit != previous { a.log.Info(logs.RuntimeSoftMemoryLimitUpdated, zap.Int64("new_value", softMemoryLimit), - zap.Int64("old_value", previous)) + zap.Int64("old_value", previous), + logs.TagField(logs.TagApp), + ) } } @@ -1216,23 +1273,23 @@ func (a *App) tryReconnect(ctx context.Context, sr *http.Server) bool { srv, err := newServer(ctx, serverInfo) if err != nil { - a.log.Warn(logs.ServerReconnectFailed, zap.Error(err)) + a.log.Warn(logs.ServerReconnectFailed, zap.Error(err), logs.TagField(logs.TagApp)) failedServers = append(failedServers, serverInfo) a.metrics.MarkUnhealthy(serverInfo.Address) continue } go func() { - a.log.Info(logs.StartingServer, zap.String("address", srv.Address())) + a.log.Info(logs.StartingServer, zap.String("address", srv.Address()), logs.TagField(logs.TagApp)) a.metrics.MarkHealthy(serverInfo.Address) if err = sr.Serve(srv.Listener()); err != nil && !errors.Is(err, http.ErrServerClosed) { - a.log.Warn(logs.ListenAndServe, zap.Error(err)) + a.log.Warn(logs.ListenAndServe, zap.Error(err), logs.TagField(logs.TagApp)) a.metrics.MarkUnhealthy(serverInfo.Address) } }() a.servers = append(a.servers, srv) - a.log.Info(logs.ServerReconnectedSuccessfully, fields...) + a.log.With(logs.TagField(logs.TagApp)).Info(logs.ServerReconnectedSuccessfully, fields...) } a.unbindServers = failedServers diff --git a/cmd/s3-gw/app_settings.go b/cmd/s3-gw/app_settings.go index ff888567..884a116a 100644 --- a/cmd/s3-gw/app_settings.go +++ b/cmd/s3-gw/app_settings.go @@ -21,20 +21,13 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool" - "git.frostfs.info/TrueCloudLab/zapjournald" "github.com/spf13/pflag" "github.com/spf13/viper" - "github.com/ssgreg/journald" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) -const ( - destinationStdout = "stdout" - destinationJournald = "journald" - - wildcardPlaceholder = "" -) +const wildcardPlaceholder = "" const ( defaultRebalanceInterval = 60 * time.Second @@ -89,7 +82,8 @@ var ( defaultDefaultNamespaces = []string{"", "root"} ) -const ( // Settings. +// Settings. +const ( // Logger. cfgLoggerLevel = "logger.level" cfgLoggerDestination = "logger.destination" @@ -99,6 +93,11 @@ const ( // Settings. cfgLoggerSamplingThereafter = "logger.sampling.thereafter" cfgLoggerSamplingInterval = "logger.sampling.interval" + cfgLoggerTags = "logger.tags" + cfgLoggerTagsPrefixTmpl = cfgLoggerTags + ".%d." + cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "name" + cfgLoggerTagsLevelTmpl = cfgLoggerTagsPrefixTmpl + "level" + // HttpLogging. cfgHTTPLoggingEnabled = "http_logging.enabled" cfgHTTPLoggingMaxBody = "http_logging.max_body" @@ -470,14 +469,18 @@ func fetchDefaultPolicy(l *zap.Logger, cfg *viper.Viper) netmap.PlacementPolicy policyStr := cfg.GetString(cfgPolicyDefault) if err := policy.DecodeString(policyStr); err != nil { l.Warn(logs.FailedToParseDefaultLocationConstraint, - zap.String("policy", policyStr), zap.String("default", defaultPlacementPolicy), zap.Error(err)) + zap.String("policy", policyStr), zap.String("default", defaultPlacementPolicy), + zap.Error(err), logs.TagField(logs.TagApp)) } else { return policy } } if err := policy.DecodeString(defaultPlacementPolicy); err != nil { - l.Fatal(logs.FailedToParseDefaultDefaultLocationConstraint, zap.String("policy", defaultPlacementPolicy)) + l.Fatal(logs.FailedToParseDefaultDefaultLocationConstraint, + zap.String("policy", defaultPlacementPolicy), + logs.TagField(logs.TagApp), + ) } return policy @@ -490,7 +493,9 @@ func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultV l.Error(logs.InvalidLifetimeUsingDefaultValue, zap.String("parameter", cfgEntry), zap.Duration("value in config", lifetime), - zap.Duration("default", defaultValue)) + zap.Duration("default", defaultValue), + logs.TagField(logs.TagApp), + ) } else { return lifetime } @@ -506,7 +511,9 @@ func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue l.Error(logs.InvalidCacheSizeUsingDefaultValue, zap.String("parameter", cfgEntry), zap.Int("value in config", size), - zap.Int("default", defaultValue)) + zap.Int("default", defaultValue), + logs.TagField(logs.TagApp), + ) } else { return size } @@ -528,7 +535,8 @@ func fetchRemovingCheckInterval(v *viper.Viper, l *zap.Logger) time.Duration { l.Error(logs.InvalidAccessBoxCacheRemovingCheckInterval, zap.String("parameter", cfgAccessBoxCacheRemovingCheckInterval), zap.Duration("value in config", duration), - zap.Duration("default", defaultAccessBoxCacheRemovingCheckInterval)) + zap.Duration("default", defaultAccessBoxCacheRemovingCheckInterval), + logs.TagField(logs.TagApp)) return defaultAccessBoxCacheRemovingCheckInterval } @@ -542,7 +550,9 @@ func fetchDefaultMaxAge(cfg *viper.Viper, l *zap.Logger) int { if defaultMaxAge <= 0 && defaultMaxAge != -1 { l.Fatal(logs.InvalidDefaultMaxAge, zap.String("parameter", cfgDefaultMaxAge), - zap.String("value in config", strconv.Itoa(defaultMaxAge))) + zap.String("value in config", strconv.Itoa(defaultMaxAge)), + logs.TagField(logs.TagApp), + ) } } @@ -553,14 +563,19 @@ func fetchRegionMappingPolicies(l *zap.Logger, cfg *viper.Viper) map[string]netm filepath := cfg.GetString(cfgPolicyRegionMapFile) regionPolicyMap, err := readRegionMap(filepath) if err != nil { - l.Warn(logs.FailedToReadRegionMapFilePolicies, zap.String("file", filepath), zap.Error(err)) + l.Warn(logs.FailedToReadRegionMapFilePolicies, + zap.String("file", filepath), + zap.Error(err), + logs.TagField(logs.TagApp)) return make(map[string]netmap.PlacementPolicy) } regionMap := make(map[string]netmap.PlacementPolicy, len(regionPolicyMap)) for region, policy := range regionPolicyMap { if region == api.DefaultLocationConstraint { - l.Warn(logs.DefaultLocationConstraintCantBeOverriden, zap.String("policy", policy)) + l.Warn(logs.DefaultLocationConstraintCantBeOverriden, + zap.String("policy", policy), + logs.TagField(logs.TagApp)) continue } @@ -575,7 +590,10 @@ func fetchRegionMappingPolicies(l *zap.Logger, cfg *viper.Viper) map[string]netm continue } - l.Warn(logs.FailedToParseLocationConstraint, zap.String("region", region), zap.String("policy", policy)) + l.Warn(logs.FailedToParseLocationConstraint, + zap.String("region", region), + zap.String("policy", policy), + logs.TagField(logs.TagApp)) } return regionMap @@ -607,7 +625,11 @@ func fetchDefaultCopiesNumbers(l *zap.Logger, v *viper.Viper) []uint32 { parsedValue, err := strconv.ParseUint(unparsed[i], 10, 32) if err != nil { l.Warn(logs.FailedToParseDefaultCopiesNumbers, - zap.Strings("copies numbers", unparsed), zap.Uint32s("default", defaultCopiesNumbers), zap.Error(err)) + zap.Strings("copies numbers", unparsed), + zap.Uint32s("default", defaultCopiesNumbers), + zap.Error(err), + logs.TagField(logs.TagApp), + ) return defaultCopiesNumbers } result[i] = uint32(parsedValue) @@ -663,15 +685,17 @@ func fetchCopiesNumbers(l *zap.Logger, v *viper.Viper) map[string][]uint32 { for j := range vector { parsedValue, err := strconv.ParseUint(vector[j], 10, 32) if err != nil { - l.Warn(logs.FailedToParseCopiesNumbers, zap.String("location", constraint), - zap.Strings("copies numbers", vector), zap.Error(err)) + l.Warn(logs.FailedToParseCopiesNumbers, + zap.String("location", constraint), + zap.Strings("copies numbers", vector), zap.Error(err), + logs.TagField(logs.TagApp)) continue } vector32[j] = uint32(parsedValue) } copiesNums[constraint] = vector32 - l.Info(logs.ConstraintAdded, zap.String("location", constraint), zap.Strings("copies numbers", vector)) + l.Info(logs.ConstraintAdded, zap.String("location", constraint), zap.Strings("copies numbers", vector), logs.TagField(logs.TagApp)) } return copiesNums } @@ -680,7 +704,9 @@ func fetchDefaultNamespaces(l *zap.Logger, v *viper.Viper) []string { defaultNamespaces := v.GetStringSlice(cfgKludgeDefaultNamespaces) if len(defaultNamespaces) == 0 { defaultNamespaces = defaultDefaultNamespaces - l.Warn(logs.DefaultNamespacesCannotBeEmpty, zap.Strings("namespaces", defaultNamespaces)) + l.Warn(logs.DefaultNamespacesCannotBeEmpty, + zap.Strings("namespaces", defaultNamespaces), + logs.TagField(logs.TagApp)) } for i := range defaultNamespaces { // to be set namespaces in env variable as `S3_GW_KLUDGE_DEFAULT_NAMESPACES="" 'root'` @@ -704,7 +730,7 @@ func fetchNamespacesConfig(l *zap.Logger, v *viper.Viper) (NamespacesConfig, []s nsConfig, err := readNamespacesConfig(v.GetString(cfgNamespacesConfig)) if err != nil { - l.Warn(logs.FailedToParseNamespacesConfig, zap.Error(err)) + l.Warn(logs.FailedToParseNamespacesConfig, zap.Error(err), logs.TagField(logs.TagApp)) } defaultNamespacesNames := fetchDefaultNamespaces(l, v) @@ -718,11 +744,13 @@ func fetchNamespacesConfig(l *zap.Logger, v *viper.Viper) (NamespacesConfig, []s } if len(overrideDefaults) > 0 { - l.Warn(logs.DefaultNamespaceConfigValuesBeOverwritten) + l.Warn(logs.DefaultNamespaceConfigValuesBeOverwritten, logs.TagField(logs.TagApp)) defaultNSValue.LocationConstraints = overrideDefaults[0].LocationConstraints defaultNSValue.CopiesNumbers = overrideDefaults[0].CopiesNumbers if len(overrideDefaults) > 1 { - l.Warn(logs.MultipleDefaultOverridesFound, zap.String("name", overrideDefaults[0].Name)) + l.Warn(logs.MultipleDefaultOverridesFound, + zap.String("name", overrideDefaults[0].Name), + logs.TagField(logs.TagApp)) } } @@ -765,7 +793,7 @@ func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam { priority := v.GetInt(key + "priority") if address == "" { - l.Warn(logs.SkipEmptyAddress) + l.Warn(logs.SkipEmptyAddress, logs.TagField(logs.TagApp)) break } if weight <= 0 { // unspecified or wrong @@ -780,7 +808,9 @@ func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam { l.Info(logs.AddedStoragePeer, zap.Int("priority", priority), zap.String("address", address), - zap.Float64("weight", weight)) + zap.Float64("weight", weight), + logs.TagField(logs.TagApp), + ) } return nodes @@ -804,7 +834,7 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo { } if _, ok := seen[serverInfo.Address]; ok { - log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address)) + log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address), logs.TagField(logs.TagApp)) continue } seen[serverInfo.Address] = struct{}{} @@ -833,13 +863,13 @@ func fetchVHSNamespaces(v *viper.Viper, log *zap.Logger) map[string]bool { nsMap := v.GetStringMap(cfgVHSNamespaces) for ns, val := range nsMap { if _, ok := vhsNamespacesEnabled[ns]; ok { - log.Warn(logs.WarnDuplicateNamespaceVHS, zap.String("namespace", ns)) + log.Warn(logs.WarnDuplicateNamespaceVHS, zap.String("namespace", ns), logs.TagField(logs.TagApp)) continue } enabledFlag, ok := val.(bool) if !ok { - log.Warn(logs.WarnValueVHSEnabledFlagWrongType, zap.String("namespace", ns)) + log.Warn(logs.WarnValueVHSEnabledFlagWrongType, zap.String("namespace", ns), logs.TagField(logs.TagApp)) continue } @@ -923,6 +953,41 @@ func fetchTombstoneWorkerPoolSize(v *viper.Viper) int { return tombstoneWorkerPoolSize } +func fetchLogTagsConfig(v *viper.Viper) (map[string]zapcore.Level, error) { + res := make(map[string]zapcore.Level) + + defaultLevel := v.GetString(cfgLoggerLevel) + var defaultLvl zapcore.Level + if err := defaultLvl.Set(defaultLevel); err != nil { + return nil, fmt.Errorf("failed to parse log level, unknown level: '%s'", defaultLevel) + } + + for i := 0; ; i++ { + name := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i)) + if name == "" { + break + } + + lvl := defaultLvl + level := v.GetString(fmt.Sprintf(cfgLoggerTagsLevelTmpl, i)) + if level != "" { + if err := lvl.Set(level); err != nil { + return nil, fmt.Errorf("failed to parse log tags config, unknown level: '%s'", level) + } + } + + res[name] = lvl + } + + if len(res) == 0 && !v.IsSet(cfgLoggerTags) { + for _, tag := range defaultTags { + res[tag] = defaultLvl + } + } + + return res, nil +} + func newViper(flags *pflag.FlagSet) (*viper.Viper, error) { v := viper.New() @@ -1233,129 +1298,19 @@ type LoggerAppSettings interface { DroppedLogsInc() } -func pickLogger(v *viper.Viper, settings LoggerAppSettings) *Logger { - lvl, err := getLogLevel(v) - if err != nil { - panic(err) - } - - dest := v.GetString(cfgLoggerDestination) - - switch dest { - case destinationStdout: - return newStdoutLogger(v, lvl, settings) - case destinationJournald: - return newJournaldLogger(v, lvl, settings) - default: - panic(fmt.Sprintf("wrong destination for logger: %s", dest)) - } -} - -// newStdoutLogger constructs a Logger instance for the current application. -// Panics on failure. -// -// Logger contains a logger is built from zap's production logging configuration with: -// - parameterized level (debug by default) -// - console encoding -// - ISO8601 time encoding -// - sampling intervals -// -// and atomic log level to dynamically change it. -// -// Logger records a stack trace for all messages at or above fatal level. -// -// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace. -func newStdoutLogger(v *viper.Viper, lvl zapcore.Level, settings LoggerAppSettings) *Logger { - stdout := zapcore.AddSync(os.Stderr) - level := zap.NewAtomicLevelAt(lvl) - - consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, level) - - consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, settings) - - return &Logger{ - logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))), - lvl: level, - } -} - -func newJournaldLogger(v *viper.Viper, lvl zapcore.Level, settings LoggerAppSettings) *Logger { - level := zap.NewAtomicLevelAt(lvl) - - encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields) - - core := zapjournald.NewCore(level, encoder, &journald.Journal{}, zapjournald.SyslogFields) - coreWithContext := core.With([]zapcore.Field{ - zapjournald.SyslogFacility(zapjournald.LogDaemon), - zapjournald.SyslogIdentifier(), - zapjournald.SyslogPid(), - }) - - coreWithContext = applyZapCoreMiddlewares(coreWithContext, v, settings) - - l := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))) - - return &Logger{ - logger: l, - lvl: level, - } -} - -func newLogEncoder() zapcore.Encoder { - c := zap.NewProductionEncoderConfig() - c.EncodeTime = zapcore.ISO8601TimeEncoder - - return zapcore.NewConsoleEncoder(c) -} - -func applyZapCoreMiddlewares(core zapcore.Core, v *viper.Viper, settings LoggerAppSettings) zapcore.Core { - if v.GetBool(cfgLoggerSamplingEnabled) { - core = zapcore.NewSamplerWithOptions(core, - v.GetDuration(cfgLoggerSamplingInterval), - v.GetInt(cfgLoggerSamplingInitial), - v.GetInt(cfgLoggerSamplingThereafter), - zapcore.SamplerHook(func(_ zapcore.Entry, dec zapcore.SamplingDecision) { - if dec&zapcore.LogDropped > 0 { - settings.DroppedLogsInc() - } - })) - } - - return core -} - -func getLogLevel(v *viper.Viper) (zapcore.Level, error) { - var lvl zapcore.Level - lvlStr := v.GetString(cfgLoggerLevel) - err := lvl.UnmarshalText([]byte(lvlStr)) - if err != nil { - return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+ - "value should be one of %v", lvlStr, err, [...]zapcore.Level{ - zapcore.DebugLevel, - zapcore.InfoLevel, - zapcore.WarnLevel, - zapcore.ErrorLevel, - zapcore.DPanicLevel, - zapcore.PanicLevel, - zapcore.FatalLevel, - }) - } - return lvl, nil -} - func validateDomains(domains []string, log *zap.Logger) []string { validDomains := make([]string, 0, len(domains)) LOOP: for _, domain := range domains { if strings.Contains(domain, ":") { - log.Warn(logs.WarnDomainContainsPort, zap.String("domain", domain)) + log.Warn(logs.WarnDomainContainsPort, zap.String("domain", domain), logs.TagField(logs.TagApp)) continue } domainParts := strings.Split(domain, ".") for _, part := range domainParts { if strings.ContainsAny(part, "<>") && part != wildcardPlaceholder { - log.Warn(logs.WarnDomainContainsInvalidPlaceholder, zap.String("domain", domain)) + log.Warn(logs.WarnDomainContainsInvalidPlaceholder, zap.String("domain", domain), logs.TagField(logs.TagApp)) continue LOOP } } diff --git a/cmd/s3-gw/logger.go b/cmd/s3-gw/logger.go new file mode 100644 index 00000000..b58fd2b7 --- /dev/null +++ b/cmd/s3-gw/logger.go @@ -0,0 +1,210 @@ +package main + +import ( + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" + "git.frostfs.info/TrueCloudLab/zapjournald" + "github.com/spf13/viper" + "github.com/ssgreg/journald" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const ( + destinationStdout string = "stdout" + destinationJournald string = "journald" +) + +var defaultTags = []string{logs.TagApp, logs.TagApp, logs.TagDatapath} + +type Logger struct { + logger *zap.Logger + lvl zap.AtomicLevel +} + +func pickLogger(v *viper.Viper, loggerSettings LoggerAppSettings, tagSettings TagFilterSettings) *Logger { + dest := v.GetString(cfgLoggerDestination) + + switch dest { + case destinationStdout: + return newStdoutLogger(v, loggerSettings, tagSettings) + case destinationJournald: + return newJournaldLogger(v, loggerSettings, tagSettings) + default: + panic(fmt.Sprintf("wrong destination for logger: %s", dest)) + } +} + +// newStdoutLogger constructs a Logger instance for the current application. +// Panics on failure. +// +// Logger contains a logger is built from zap's production logging configuration with: +// - parameterized level (debug by default) +// - console encoding +// - ISO8601 time encoding +// - sampling intervals +// +// and atomic log level to dynamically change it. +// +// Logger records a stack trace for all messages at or above fatal level. +// +// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace. +func newStdoutLogger(v *viper.Viper, loggerSettings LoggerAppSettings, tagSettings TagFilterSettings) *Logger { + c := newZapLogConfig(v) + + out, errSink, err := openZapSinks(c) + if err != nil { + panic(fmt.Sprintf("open zap sinks: %v", err.Error())) + } + + core := zapcore.NewCore(zapcore.NewConsoleEncoder(c.EncoderConfig), out, c.Level) + core = applyZapCoreMiddlewares(core, v, loggerSettings, tagSettings) + l := zap.New(core, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.ErrorOutput(errSink)) + + return &Logger{logger: l, lvl: c.Level} +} + +func newJournaldLogger(v *viper.Viper, logSettings LoggerAppSettings, tagSettings TagFilterSettings) *Logger { + c := newZapLogConfig(v) + + // We can use NewJSONEncoder instead if, say, frontend + // would like to access journald logs and parse them easily. + encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields) + + journalCore := zapjournald.NewCore(c.Level, encoder, &journald.Journal{}, zapjournald.SyslogFields) + core := journalCore.With([]zapcore.Field{ + zapjournald.SyslogFacility(zapjournald.LogDaemon), + zapjournald.SyslogIdentifier(), + zapjournald.SyslogPid(), + }) + core = applyZapCoreMiddlewares(core, v, logSettings, tagSettings) + l := zap.New(core, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))) + return &Logger{logger: l, lvl: c.Level} +} + +func openZapSinks(cfg zap.Config) (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { + sink, closeOut, err := zap.Open(cfg.OutputPaths...) + if err != nil { + return nil, nil, err + } + errSink, _, err := zap.Open(cfg.ErrorOutputPaths...) + if err != nil { + closeOut() + return nil, nil, err + } + return sink, errSink, nil +} + +var _ zapcore.Core = (*zapCoreTagFilterWrapper)(nil) + +type zapCoreTagFilterWrapper struct { + core zapcore.Core + settings TagFilterSettings + extra []zap.Field +} + +type TagFilterSettings interface { + LevelEnabled(tag string, lvl zapcore.Level) bool +} + +func (c *zapCoreTagFilterWrapper) Enabled(level zapcore.Level) bool { + return c.core.Enabled(level) +} + +func (c *zapCoreTagFilterWrapper) With(fields []zapcore.Field) zapcore.Core { + return &zapCoreTagFilterWrapper{ + core: c.core.With(fields), + settings: c.settings, + extra: append(c.extra, fields...), + } +} + +func (c *zapCoreTagFilterWrapper) Check(entry zapcore.Entry, checked *zapcore.CheckedEntry) *zapcore.CheckedEntry { + if c.core.Enabled(entry.Level) { + return checked.AddCore(entry, c) + } + return checked +} + +func (c *zapCoreTagFilterWrapper) Write(entry zapcore.Entry, fields []zapcore.Field) error { + if c.shouldSkip(entry, fields) || c.shouldSkip(entry, c.extra) { + return nil + } + + return c.core.Write(entry, fields) +} + +func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field) bool { + for _, field := range fields { + if field.Key == logs.TagFieldName && field.Type == zapcore.StringType { + if !c.settings.LevelEnabled(field.String, entry.Level) { + return true + } + break + } + } + + return false +} + +func (c *zapCoreTagFilterWrapper) Sync() error { + return c.core.Sync() +} + +func applyZapCoreMiddlewares(core zapcore.Core, v *viper.Viper, appSettings LoggerAppSettings, tagSettings TagFilterSettings) zapcore.Core { + core = &zapCoreTagFilterWrapper{ + core: core, + settings: tagSettings, + } + + if v.GetBool(cfgLoggerSamplingEnabled) { + core = zapcore.NewSamplerWithOptions(core, + v.GetDuration(cfgLoggerSamplingInterval), + v.GetInt(cfgLoggerSamplingInitial), + v.GetInt(cfgLoggerSamplingThereafter), + zapcore.SamplerHook(func(_ zapcore.Entry, dec zapcore.SamplingDecision) { + if dec&zapcore.LogDropped > 0 { + appSettings.DroppedLogsInc() + } + })) + } + + return core +} + +func newZapLogConfig(v *viper.Viper) zap.Config { + lvl, err := getLogLevel(v) + if err != nil { + panic(err) + } + + c := zap.Config{ + Level: zap.NewAtomicLevelAt(lvl), + EncoderConfig: zap.NewProductionEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } + c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + + return c +} + +func getLogLevel(v *viper.Viper) (zapcore.Level, error) { + var lvl zapcore.Level + lvlStr := v.GetString(cfgLoggerLevel) + err := lvl.UnmarshalText([]byte(lvlStr)) + if err != nil { + return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+ + "value should be one of %v", lvlStr, err, [...]zapcore.Level{ + zapcore.DebugLevel, + zapcore.InfoLevel, + zapcore.WarnLevel, + zapcore.ErrorLevel, + zapcore.DPanicLevel, + zapcore.PanicLevel, + zapcore.FatalLevel, + }) + } + return lvl, nil +} diff --git a/cmd/s3-gw/service.go b/cmd/s3-gw/service.go index 6b8b9937..79392ceb 100644 --- a/cmd/s3-gw/service.go +++ b/cmd/s3-gw/service.go @@ -19,24 +19,24 @@ type Service struct { // Start runs http service with the exposed endpoint on the configured port. func (ms *Service) Start() { if ms.enabled { - ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr)) + ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp)) err := ms.ListenAndServe() if err != nil && err != http.ErrServerClosed { - ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort) + ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort, logs.TagField(logs.TagApp)) } } else { - ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled) + ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled, logs.TagField(logs.TagApp)) } } // ShutDown stops the service. func (ms *Service) ShutDown(ctx context.Context) { - ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr)) + ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp)) err := ms.Shutdown(ctx) if err != nil { - ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err)) + ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err), logs.TagField(logs.TagApp)) if err = ms.Close(); err != nil { - ms.log.Panic(logs.CantShutDownService, zap.Error(err)) + ms.log.Panic(logs.CantShutDownService, zap.Error(err), logs.TagField(logs.TagApp)) } } } diff --git a/config/config.env b/config/config.env index 0c314912..28b1c7b7 100644 --- a/config/config.env +++ b/config/config.env @@ -55,6 +55,11 @@ S3_GW_LOGGER_SAMPLING_ENABLED=false S3_GW_LOGGER_SAMPLING_INITIAL=100 S3_GW_LOGGER_SAMPLING_THEREAFTER=100 S3_GW_LOGGER_SAMPLING_INTERVAL=1s +S3_GW_LOGGER_TAGS_0_NAME=app +S3_GW_LOGGER_TAGS_0_LEVEL=info +S3_GW_LOGGER_TAGS_1_NAME=datapath +S3_GW_LOGGER_TAGS_1_LEVEL=fatal + # HTTP logger S3_GW_HTTP_LOGGING_ENABLED=false diff --git a/config/config.yaml b/config/config.yaml index 9b82b4e1..d9630e09 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -60,6 +60,12 @@ logger: initial: 100 thereafter: 100 interval: 1s + tags: + - name: "app" + level: "debug" + - name: "datapath" + - name: "external_storage" + - name: "external_storage_tree" # log http request data (URI, headers, query, etc) http_logging: diff --git a/creds/tokens/credentials.go b/creds/tokens/credentials.go index a8bd597d..aec89838 100644 --- a/creds/tokens/credentials.go +++ b/creds/tokens/credentials.go @@ -241,7 +241,7 @@ func (c *cred) getAccessBox(ctx context.Context, cnrID cid.ID, accessKeyID strin func (c *cred) Put(ctx context.Context, prm CredentialsParam) (oid.Address, error) { if prm.AccessKeyID != "" { - c.log.Info(logs.CheckCustomAccessKeyIDUniqueness, zap.String("access_key_id", prm.AccessKeyID)) + c.log.Info(logs.CheckCustomAccessKeyIDUniqueness, zap.String("access_key_id", prm.AccessKeyID), logs.TagField(logs.TagApp)) credsPrm := PrmGetCredsObject{ Container: prm.Container, AccessKeyID: prm.AccessKeyID, diff --git a/docs/configuration.md b/docs/configuration.md index 665b49b9..8278b2cd 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -381,6 +381,13 @@ logger: initial: 100 thereafter: 100 interval: 1s + tags: + - name: app + level: info + - name: datapath + - name: external_blockchain + - name: external_storage_tree + - name: external_storage ``` | Parameter | Type | SIGHUP reload | Default value | Description | @@ -392,6 +399,32 @@ logger: | `sampling.thereafter` | `int` | no | '100' | Sampling count of entries after an `interval`. | | `sampling.interval` | `duration` | no | '1s' | Sampling interval of messaging similar entries. | +## Tags + +There are additional log entries that can hurt performance and can be additionally logged by using `logger.tags` +parameter. +If section `tags` isn't set the default tags (see [Tag values](#tag-values)) be enabled. +If section `tags` set but empty no tags be used. + +```yaml +tags: + - name: "app" + level: info +``` + +| Parameter | Type | SIGHUP reload | Default value | Description | +|-----------------------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------| +| `name` | `string` | yes | | Tag name. Possible values see below in `Tag values` section. | +| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. | + +### Tag values + +* `app` - common application logs (enabled by default). +* `datapath` - main logic of application (enabled by default). +* `external_blockchain` - external interaction with neo-go blockchain (enabled by default). +* `external_storage` - external interaction with storage node. +* `external_storage_tree` - external interaction with tree service in storage node. + ### `http_logging` section diff --git a/internal/frostfs/frostfsid/frostfsid.go b/internal/frostfs/frostfsid/frostfsid.go index 26e4f624..8d61b453 100644 --- a/internal/frostfs/frostfsid/frostfsid.go +++ b/internal/frostfs/frostfsid/frostfsid.go @@ -61,7 +61,7 @@ func (f *FrostFSID) GetUserGroupIDsAndClaims(userHash util.Uint160) ([]string, m subj, err := f.getSubject(userHash) if err != nil { if strings.Contains(err.Error(), "not found") { - f.log.Debug(logs.UserGroupsListIsEmpty, zap.Error(err)) + f.log.Debug(logs.UserGroupsListIsEmpty, zap.Error(err), logs.TagField(logs.TagExternalBlockchain)) return nil, nil, nil } return nil, nil, err diff --git a/internal/frostfs/policy/morph_rule_chain_storage.go b/internal/frostfs/policy/morph_rule_chain_storage.go index 1d826171..4edf0bfc 100644 --- a/internal/frostfs/policy/morph_rule_chain_storage.go +++ b/internal/frostfs/policy/morph_rule_chain_storage.go @@ -69,7 +69,7 @@ func (c *MorphRuleChainStorage) ListMorphRuleChains(name chain.Name, target engi } if err = c.cache.Put(key, list); err != nil { - c.log.Warn(logs.CouldntCacheListPolicyChains) + c.log.Warn(logs.CouldntCacheListPolicyChains, logs.TagField(logs.TagApp)) } return list, nil diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 9dbec872..b8874f16 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -1,189 +1,229 @@ package logs +import "go.uber.org/zap" + const ( + TagFieldName = "tag" + + TagApp = "app" + TagDatapath = "datapath" + TagExternalStorage = "external_storage" + TagExternalStorageTree = "external_storage_tree" + TagExternalBlockchain = "external_blockchain" +) + +func TagField(tag string) zap.Field { + return zap.String(TagFieldName, tag) +} + +// App. +const ( + ApplicationStarted = "application started" + ApplicationFinished = "application finished" + StartingServer = "starting server" + StoppingServer = "stopping server" + ServiceIsRunning = "service is running" + ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port" + ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled" + ShuttingDownService = "shutting down service" + CantGracefullyShutDownService = "can't gracefully shut down service, force stop" + FailedToShutdownTracing = "failed to shutdown tracing" + UsingCredentials = "using credentials" + FailedToAddServer = "failed to add server" + AddServer = "add server" + CantShutDownService = "can't shut down service" + FailedToCreateResolver = "failed to create resolver" + CouldntGenerateRandomKey = "couldn't generate random key" + FailedToCreateConnectionPool = "failed to create connection pool" + FailedToDialConnectionPool = "failed to dial connection pool" + FailedToCreateTreePool = "failed to create tree pool" + FailedToDialTreePool = "failed to dial tree pool" + ListenAndServe = "listen and serve" + NoHealthyServers = "no healthy servers" + CouldNotInitializeAPIHandler = "could not initialize API handler" + InitFrostfsIDFailed = "init frostfsid failed" + InitPolicyContractFailed = "init policy contract failed" + ServerReconnecting = "reconnecting server..." + ServerReconnectedSuccessfully = "server reconnected successfully" + ServerReconnectFailed = "failed to reconnect server" + CouldNotFetchCORSContainerInfo = "couldn't fetch CORS container info" + CouldNotFetchAccessBoxContainerInfo = "couldn't fetch AccessBox container info" + MultinetDialSuccess = "multinet dial successful" + MultinetDialFail = "multinet dial failed" + FailedToCreateWorkerPool = "failed to create worker pool" + CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key" + AddedStoragePeer = "added storage peer" + SIGHUPConfigReloadStarted = "SIGHUP config reload started" + MetricsAreDisabled = "metrics are disabled" + ConstraintAdded = "constraint added" + ContainerResolverWillBeDisabled = "container resolver will be disabled because of resolvers 'resolver_order' is empty" + FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed" + FailedToReloadConfig = "failed to reload config" + TracingConfigUpdated = "tracing config updated" + FailedToReloadResolvers = "failed to reload resolvers" + FailedToReloadServerParameters = "failed to reload server parameters" + SIGHUPConfigReloadCompleted = "SIGHUP config reload completed" + LogLevelWontBeUpdated = "log level won't be updated" + ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver 'nns' won't be used since 'rpc_endpoint' isn't provided" + InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)" + InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value" + FailedToParseDefaultLocationConstraint = "failed to parse 'default' location constraint, default one will be used" + FailedToReadRegionMapFilePolicies = "failed to read region map file, policies will be empty" + DefaultLocationConstraintCantBeOverriden = "'default' location constraint can't be overriden by custom policy, use 'placement_policy.default'" + FailedToParseLocationConstraint = "failed to parse location constraint, it cannot be used" + FailedToParseDefaultCopiesNumbers = "failed to parse 'default' copies numbers, default one will be used" + FailedToParseCopiesNumbers = "failed to parse copies numbers, skip" + FailedToInitializeTracing = "failed to initialize tracing" + DefaultNamespacesCannotBeEmpty = "default namespaces cannot be empty, defaults will be used" + FailedToParseNamespacesConfig = "failed to unmarshal namespaces config" + DefaultNamespaceConfigValuesBeOverwritten = "default namespace config value be overwritten by values from 'namespaces.config'" + MultipleDefaultOverridesFound = "multiple default overrides found, only one will be used" + SkipEmptyAddress = "skip, empty address" + FailedToParseDefaultDefaultLocationConstraint = "failed to parse default 'default' location constraint" + LogHTTPDisabledInThisBuild = "http logging disabled in this build" + InvalidDefaultMaxAge = "invalid defaultMaxAge" + RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" + RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" + InvalidAccessBoxCacheRemovingCheckInterval = "invalid accessbox check removing interval, using default value" + WarnDuplicateAddress = "duplicate address" + FailedToLoadMultinetConfig = "failed to load multinet config" + MultinetConfigWontBeUpdated = "multinet config won't be updated" + WarnDomainContainsPort = "the domain contains a port, domain skipped" + TagsLogConfigWontBeUpdated = "tags log config won't be updated" + CouldNotFetchLifecycleContainerInfo = "couldn't fetch lifecycle container info" + WarnDuplicateNamespaceVHS = "duplicate namespace with enabled VHS, config value skipped" + WarnValueVHSEnabledFlagWrongType = "the value of the VHS enable flag for the namespace is of the wrong type, config value skipped" + WarnDomainContainsInvalidPlaceholder = "the domain contains an invalid placeholder, domain skipped" +) + +// Datapath. +const ( + NotSupported = "not supported" RequestUnmatched = "request unmatched" - CheckContainer = "check container" - CreateContainer = "create container" - StoreBearerTokenIntoFrostFS = "store bearer token into FrostFS" - UpdateAccessCredObjectIntoFrostFS = "update access cred object into FrostFS" - MetricsAreDisabled = "metrics are disabled" - FoundMoreThanOneUnversionedNode = "found more than one unversioned node" - ServiceIsRunning = "service is running" - ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port" - ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled" - ShuttingDownService = "shutting down service" - CantGracefullyShutDownService = "can't gracefully shut down service, force stop" - ContainerResolverWillBeDisabled = "container resolver will be disabled because of resolvers 'resolver_order' is empty" - FailedToInitializeTracing = "failed to initialize tracing" - TracingConfigUpdated = "tracing config updated" - FailedToShutdownTracing = "failed to shutdown tracing" - UsingCredentials = "using credentials" - ApplicationStarted = "application started" - ApplicationFinished = "application finished" - StartingServer = "starting server" - StoppingServer = "stopping server" - SIGHUPConfigReloadStarted = "SIGHUP config reload started" - FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed" - FailedToReloadConfig = "failed to reload config" - FailedToReloadResolvers = "failed to reload resolvers" - FailedToReloadServerParameters = "failed to reload server parameters" - SIGHUPConfigReloadCompleted = "SIGHUP config reload completed" - LogLevelWontBeUpdated = "log level won't be updated" - FailedToAddServer = "failed to add server" - AddServer = "add server" - ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver 'nns' won't be used since 'rpc_endpoint' isn't provided" - InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)" - InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value" - FailedToParseDefaultLocationConstraint = "failed to parse 'default' location constraint, default one will be used" - FailedToReadRegionMapFilePolicies = "failed to read region map file, policies will be empty" - DefaultLocationConstraintCantBeOverriden = "'default' location constraint can't be overriden by custom policy, use 'placement_policy.default'" - FailedToParseLocationConstraint = "failed to parse location constraint, it cannot be used" - FailedToParseDefaultCopiesNumbers = "failed to parse 'default' copies numbers, default one will be used" - FailedToParseCopiesNumbers = "failed to parse copies numbers, skip" - DefaultNamespacesCannotBeEmpty = "default namespaces cannot be empty, defaults will be used" - FailedToParseNamespacesConfig = "failed to unmarshal namespaces config" - DefaultNamespaceConfigValuesBeOverwritten = "default namespace config value be overwritten by values from 'namespaces.config'" - MultipleDefaultOverridesFound = "multiple default overrides found, only one will be used" - FailedToParseDefaultDefaultLocationConstraint = "failed to parse default 'default' location constraint" - ConstraintAdded = "constraint added" - SkipEmptyAddress = "skip, empty address" - AddedStoragePeer = "added storage peer" - PrepareConnectionPool = "prepare connection pool" - PrepareFrostfsIDClient = "prepare frostfsid client" - PreparePolicyClient = "prepare policy client" - CreateSubjectInFrostFSID = "create subject in frostfsid" - SubjectAlreadyExistsInFrostFSID = "subject already exists in frostfsid" - SetSubjectNameInFrostFSID = "set subject name in frostfsid" - AddPolicyChainRules = "add policy chain rules" + RequestStart = "request start" + RequestFailed = "request failed" + RequestEnd = "request end" + AnonRequestSkipFrostfsIDValidation = "anon request, skip FrostfsID validation" + CouldntReceiveAccessBoxForGateKeyRandomKeyWillBeUsed = "couldn't receive access box for gate key, random key will be used" + FrostfsIDValidationFailed = "FrostfsID validation failed" + FailedToPassAuthentication = "failed to pass authentication" + PolicyValidationFailed = "policy validation failed" + CouldNotCloseRequestBody = "could not close request body" + BucketOwnerKeyIsMissing = "bucket owner key is missing" + SuccessfulAuth = "successful auth" + PolicyRequest = "policy request" + FailedToGenerateRequestID = "failed to generate request id" + FailedToWriteResponse = "failed to write response" + PolicyCouldntBeConvertedToNativeRules = "policy couldn't be converted to native rules, only s3 rules be applied" + FailedToParseHTTPTime = "failed to parse http time, header is ignored" + WarnInvalidTypeTLSTerminationHeader = "invalid type of value of tls termination header" + FailedToUnescapeObjectName = "failed to unescape object name" + MismatchedObjEncryptionInfo = "mismatched obj encryptionInfo" + CouldNotParseContainerObjectLockEnabledAttribute = "could not parse container object lock enabled attribute" + FailedToParsePartInfo = "failed to parse part info" + SettingsNodeInvalidOwnerKey = "settings node: invalid owner key" + FailedToParseAddressInTreeNode = "failed to parse object addr in tree node" + UnexpectedMultiNodeIDsInSubTreeMultiParts = "unexpected multi node ids in sub tree multi parts" + FoundSeveralSystemNodes = "found several system nodes" + BucketLifecycleNodeHasMultipleIDs = "bucket lifecycle node has multiple ids" + UploadPart = "upload part" + FailedToSubmitTaskToPool = "failed to submit task to pool" + FailedToGetRealObjectSize = "failed to get real object size" + PartDetails = "part details" + BucketSettingsNotFoundUseDefaults = "bucket settings not found, use defaults" + ParseTreeNode = "parse tree node" + GetObject = "get object" + GetTreeNodeToDelete = "get tree node to delete" + InvalidBucketObjectLockEnabledHeader = "invalid X-Amz-Bucket-Object-Lock-Enabled header" InvalidCacheEntryType = "invalid cache entry type" InvalidCacheKeyType = "invalid cache key type" - ObjectIsCopied = "object is copied" - RequestFailed = "request failed" - GetBucketInfo = "get bucket info" - GetBucketCors = "get bucket cors" - CouldntDeleteObject = "couldn't delete object" - BucketIsCreated = "bucket is created" - CouldNotParseContainerObjectLockEnabledAttribute = "could not parse container object lock enabled attribute" - CouldNotListUserContainers = "could not list user containers" - CouldNotFetchContainerInfo = "could not fetch container info" - MismatchedObjEncryptionInfo = "mismatched obj encryptionInfo" - UploadPart = "upload part" - CouldntDeleteOldPartObject = "couldn't delete old part object" - CouldNotPutCompletedObject = "could not put a completed object (multipart upload)" - CouldNotDeleteUploadPart = "could not delete upload part" - CouldntDeletePart = "couldn't delete part" - PartDetails = "part details" - GetObject = "get object" - ResolveBucket = "resolve bucket" - CouldntDeleteCorsObject = "couldn't delete cors object" - PutObject = "put object" - FailedToDeleteObject = "failed to delete object" - FailedToDiscardPutPayloadProbablyGoroutineLeaks = "failed to discard put payload, probably goroutine leaks" - FailedToSubmitTaskToPool = "failed to submit task to pool" - CouldNotFetchObjectMeta = "could not fetch object meta" - GetTreeNode = "get tree node" - GetTreeNodeToDelete = "get tree node to delete" CouldntPutBucketInfoIntoCache = "couldn't put bucket info into cache" CouldntAddObjectToCache = "couldn't add object to cache" CouldntCacheAccessControlOperation = "couldn't cache access control operation" CouldntPutObjAddressToNameCache = "couldn't put obj address to name cache" + CouldntPutAccessBoxIntoCache = "couldn't put accessbox into cache" CouldntCacheListOfObjects = "couldn't cache list of objects" CouldntCacheListSession = "couldn't cache list session" CouldntCacheTags = "couldn't cache tags" CouldntCacheLockInfo = "couldn't cache lock info" + CouldntCacheNetworkInfo = "couldn't cache network info" + CouldntCacheSubject = "couldn't cache subject info" + CouldntCacheNetmap = "couldn't cache netmap" + CouldntCacheUserKey = "couldn't cache user key" CouldntCacheBucketSettings = "couldn't cache bucket settings" CouldntCacheCors = "couldn't cache cors" CouldntCacheListPolicyChains = "couldn't cache list policy chains" - RequestEnd = "request end" - CouldntReceiveAccessBoxForGateKeyRandomKeyWillBeUsed = "couldn't receive access box for gate key, random key will be used" - FailedToPassAuthentication = "failed to pass authentication" - FailedToResolveCID = "failed to resolve CID" - RequestStart = "request start" - LogHTTP = "http log" - FailedToCloseHTTPBody = "failed to close http body" - FailedToInitializeHTTPLogger = "failed to initialize http logger" - FailedToReloadHTTPFileLogger = "failed to reload http file logger" - FailedToReadHTTPBody = "failed to read http body" - FailedToProcessHTTPBody = "failed to process http body" - LogHTTPDisabledInThisBuild = "http logging disabled in this build" - FailedToUnescapeObjectName = "failed to unescape object name" - InvalidDefaultMaxAge = "invalid defaultMaxAge" - CantShutDownService = "can't shut down service" - CouldntGenerateRandomKey = "couldn't generate random key" - FailedToCreateResolver = "failed to create resolver" - CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key" - FailedToCreateConnectionPool = "failed to create connection pool" - FailedToDialConnectionPool = "failed to dial connection pool" - FailedToCreateTreePool = "failed to create tree pool" - FailedToDialTreePool = "failed to dial tree pool" - ListenAndServe = "listen and serve" - NoHealthyServers = "no healthy servers" - CouldNotInitializeAPIHandler = "could not initialize API handler" - RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" - RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" - AnonRequestSkipFrostfsIDValidation = "anon request, skip FrostfsID validation" - FrostfsIDValidationFailed = "FrostfsID validation failed" - InitFrostfsIDContractFailed = "init frostfsid contract failed" - InitPolicyContractFailed = "init policy contract failed" - PolicyValidationFailed = "policy validation failed" - ServerReconnecting = "reconnecting server..." - ServerReconnectedSuccessfully = "server reconnected successfully" - ServerReconnectFailed = "failed to reconnect server" - ParseTreeNode = "parse tree node" - FailedToGetRealObjectSize = "failed to get real object size" - CouldntDeleteObjectFromStorageContinueDeleting = "couldn't delete object from storage, continue deleting from tree" - CouldntPutAccessBoxIntoCache = "couldn't put accessbox into cache" - InvalidAccessBoxCacheRemovingCheckInterval = "invalid accessbox check removing interval, using default value" - CouldNotCloseRequestBody = "could not close request body" - BucketOwnerKeyIsMissing = "bucket owner key is missing" - SettingsNodeInvalidOwnerKey = "settings node: invalid owner key" - SuccessfulAuth = "successful auth" - PolicyRequest = "policy request" - FailedToGenerateRequestID = "failed to generate request id" - InvalidBucketObjectLockEnabledHeader = "invalid X-Amz-Bucket-Object-Lock-Enabled header" - InvalidTreeKV = "invalid tree service meta KV" - FailedToWriteResponse = "failed to write response" - WarnDuplicateAddress = "duplicate address" - PolicyCouldntBeConvertedToNativeRules = "policy couldn't be converted to native rules, only s3 rules be applied" - CouldntCacheSubject = "couldn't cache subject info" - UserGroupsListIsEmpty = "user groups list is empty, subject not found" - CouldntCacheUserKey = "couldn't cache user key" - ObjectTaggingNodeHasMultipleIDs = "object tagging node has multiple ids" - BucketTaggingNodeHasMultipleIDs = "bucket tagging node has multiple ids" - BucketSettingsNodeHasMultipleIDs = "bucket settings node has multiple ids" - BucketCORSNodeHasMultipleIDs = "bucket cors node has multiple ids" - SystemNodeHasMultipleIDs = "system node has multiple ids" - FailedToRemoveOldSystemNode = "failed to remove old system node" - FailedToParseAddressInTreeNode = "failed to parse object addr in tree node" - UnexpectedMultiNodeIDsInSubTreeMultiParts = "unexpected multi node ids in sub tree multi parts" - FoundSeveralSystemNodes = "found several system nodes" - FailedToParsePartInfo = "failed to parse part info" - CouldNotFetchCORSContainerInfo = "couldn't fetch CORS container info" - CouldNotFetchAccessBoxContainerInfo = "couldn't fetch AccessBox container info" - CloseCredsObjectPayload = "close creds object payload" - CouldntDeleteLifecycleObject = "couldn't delete lifecycle configuration object" CouldntCacheLifecycleConfiguration = "couldn't cache lifecycle configuration" - CouldNotFetchLifecycleContainerInfo = "couldn't fetch lifecycle container info" - BucketLifecycleNodeHasMultipleIDs = "bucket lifecycle node has multiple ids" - GetBucketLifecycle = "get bucket lifecycle" - WarnDuplicateNamespaceVHS = "duplicate namespace with enabled VHS, config value skipped" - WarnValueVHSEnabledFlagWrongType = "the value of the VHS enable flag for the namespace is of the wrong type, config value skipped" - WarnDomainContainsInvalidPlaceholder = "the domain contains an invalid placeholder, domain skipped" - FailedToRemoveOldPartNode = "failed to remove old part node" - CouldntCacheNetworkInfo = "couldn't cache network info" - NotSupported = "not supported" - CheckCustomAccessKeyIDUniqueness = "check custom access key id uniqueness" - FailedToLoadMultinetConfig = "failed to load multinet config" - MultinetConfigWontBeUpdated = "multinet config won't be updated" - MultinetDialSuccess = "multinet dial successful" - MultinetDialFail = "multinet dial failed" - FailedToParseHTTPTime = "failed to parse http time, header is ignored" - FailedToPutTombstoneObject = "failed to put tombstone object" - FailedToCreateWorkerPool = "failed to create worker pool" - FailedToListAllObjectRelations = "failed to list all object relations" - WarnInvalidTypeTLSTerminationHeader = "invalid type of value of tls termination header" - FailedToPutTombstones = "failed to put tombstones" - WarnDomainContainsPort = "the domain contains a port, domain skipped" - CouldntCacheNetmap = "couldn't cache netmap" - BucketSettingsNotFoundUseDefaults = "bucket settings not found, use defaults" +) + +// External storage. +const ( + ObjectIsCopied = "object is copied" + CouldntDeleteObject = "couldn't delete object" + CouldNotListUserContainers = "could not list user containers" + CouldNotFetchContainerInfo = "could not fetch container info" + CouldntDeleteObjectFromStorageContinueDeleting = "couldn't delete object from storage, continue deleting from tree" + FailedToPutTombstoneObject = "failed to put tombstone object" + FailedToListAllObjectRelations = "failed to list all object relations" + FailedToPutTombstones = "failed to put tombstones" + CouldntDeleteCorsObject = "couldn't delete cors object" + BucketIsCreated = "bucket is created" + CouldntDeleteOldPartObject = "couldn't delete old part object" + CouldNotPutCompletedObject = "could not put a completed object (multipart upload)" + CouldNotDeleteUploadPart = "could not delete upload part" + PutObject = "put object" + CouldNotFetchObjectMeta = "could not fetch object meta" + FailedToDiscardPutPayloadProbablyGoroutineLeaks = "failed to discard put payload, probably goroutine leaks" + FailedToDeleteObject = "failed to delete object" + CouldntDeleteLifecycleObject = "couldn't delete lifecycle configuration object" +) + +// External blockchain. +const ( + UserGroupsListIsEmpty = "user groups list is empty, subject not found" + ResolveBucket = "resolve bucket" + FailedToResolveCID = "failed to resolve CID" + GetBucketInfo = "get bucket info" +) + +// External storage tree. +const ( + FoundMoreThanOneUnversionedNode = "found more than one unversioned node" + GetBucketCors = "get bucket cors" + GetTreeNode = "get tree node" + InvalidTreeKV = "invalid tree service meta KV" + FailedToRemoveOldSystemNode = "failed to remove old system node" + GetBucketLifecycle = "get bucket lifecycle" + FailedToRemoveOldPartNode = "failed to remove old part node" + SystemNodeHasMultipleIDs = "system node has multiple ids" + ObjectTaggingNodeHasMultipleIDs = "object tagging node has multiple ids" + BucketTaggingNodeHasMultipleIDs = "bucket tagging node has multiple ids" + BucketSettingsNodeHasMultipleIDs = "bucket settings node has multiple ids" + BucketCORSNodeHasMultipleIDs = "bucket cors node has multiple ids" +) + +// Authmate. +const ( + CreateContainer = "create container" + CheckContainer = "check container" + CheckCustomAccessKeyIDUniqueness = "check custom access key id uniqueness" + StoreBearerTokenIntoFrostFS = "store bearer token into FrostFS" + UpdateAccessCredObjectIntoFrostFS = "update access cred object into FrostFS" + SubjectAlreadyExistsInFrostFSID = "subject already exists in frostfsid" + SetSubjectNameInFrostFSID = "set subject name in frostfsid" + AddPolicyChainRules = "add policy chain rules" + PrepareConnectionPool = "prepare connection pool" + PrepareFrostfsIDClient = "prepare frostfsid client" + PreparePolicyClient = "prepare policy client" + CreateSubjectInFrostFSID = "create subject in frostfsid" + CloseCredsObjectPayload = "close creds object payload" +) + +// Log HTTP. +const ( + LogHTTP = "http log" + FailedToCloseHTTPBody = "failed to close http body" + FailedToInitializeHTTPLogger = "failed to initialize http logger" + FailedToReadHTTPBody = "failed to read http body" + FailedToProcessHTTPBody = "failed to process http body" ) diff --git a/internal/net/event_handler.go b/internal/net/event_handler.go index 866fdd0c..5a8677b2 100644 --- a/internal/net/event_handler.go +++ b/internal/net/event_handler.go @@ -17,9 +17,16 @@ func (l LogEventHandler) DialPerformed(sourceIP net.Addr, _, address string, err sourceIPString = sourceIP.Network() + "://" + sourceIP.String() } if err == nil { - l.logger.Debug(logs.MultinetDialSuccess, zap.String("source", sourceIPString), zap.String("destination", address)) + l.logger.Debug(logs.MultinetDialSuccess, + zap.String("source", sourceIPString), + zap.String("destination", address), + logs.TagField(logs.TagApp)) } else { - l.logger.Debug(logs.MultinetDialFail, zap.String("source", sourceIPString), zap.String("destination", address), zap.Error(err)) + l.logger.Debug(logs.MultinetDialFail, + zap.String("source", sourceIPString), + zap.String("destination", address), + zap.Error(err), + logs.TagField(logs.TagApp)) } } diff --git a/metrics/app.go b/metrics/app.go index 1a2ef077..2ac59fd0 100644 --- a/metrics/app.go +++ b/metrics/app.go @@ -27,7 +27,7 @@ type AppMetricsConfig struct { func NewAppMetrics(cfg AppMetricsConfig) *AppMetrics { if !cfg.Enabled { - cfg.Logger.Warn(logs.MetricsAreDisabled) + cfg.Logger.Warn(logs.MetricsAreDisabled, logs.TagField(logs.TagApp)) } registry := cfg.Registerer @@ -44,7 +44,7 @@ func NewAppMetrics(cfg AppMetricsConfig) *AppMetrics { func (m *AppMetrics) SetEnabled(enabled bool) { if !enabled { - m.logger.Warn(logs.MetricsAreDisabled) + m.logger.Warn(logs.MetricsAreDisabled, logs.TagField(logs.TagApp)) } m.mu.Lock() diff --git a/pkg/service/tree/tree.go b/pkg/service/tree/tree.go index 862c0a3c..e9e6d216 100644 --- a/pkg/service/tree/tree.go +++ b/pkg/service/tree/tree.go @@ -257,7 +257,7 @@ func newNodeVersionFromTreeNode(log *zap.Logger, filePath string, treeNode *tree if createdStr, ok := treeNode.Get(createdKV); ok { if utcMilli, err := strconv.ParseInt(createdStr, 10, 64); err != nil { - log.Warn(logs.InvalidTreeKV, zap.String(createdKV, createdStr), zap.Error(err)) + log.Warn(logs.InvalidTreeKV, zap.String(createdKV, createdStr), zap.Error(err), logs.TagField(logs.TagExternalStorageTree)) } else { created := time.UnixMilli(utcMilli) version.Created = &created @@ -267,7 +267,7 @@ func newNodeVersionFromTreeNode(log *zap.Logger, filePath string, treeNode *tree if ownerStr, ok := treeNode.Get(ownerKV); ok { var owner user.ID if err := owner.DecodeString(ownerStr); err != nil { - log.Warn(logs.InvalidTreeKV, zap.String(ownerKV, ownerStr), zap.Error(err)) + log.Warn(logs.InvalidTreeKV, zap.String(ownerKV, ownerStr), zap.Error(err), logs.TagField(logs.TagExternalStorageTree)) } else { version.Owner = &owner } @@ -275,7 +275,7 @@ func newNodeVersionFromTreeNode(log *zap.Logger, filePath string, treeNode *tree if creationEpoch, ok := treeNode.Get(creationEpochKV); ok { if epoch, err := strconv.ParseUint(creationEpoch, 10, 64); err != nil { - log.Warn(logs.InvalidTreeKV, zap.String(creationEpochKV, creationEpoch), zap.Error(err)) + log.Warn(logs.InvalidTreeKV, zap.String(creationEpochKV, creationEpoch), zap.Error(err), logs.TagField(logs.TagExternalStorageTree)) } else { version.CreationEpoch = epoch } @@ -342,13 +342,13 @@ func newMultipartInfoFromTreeNode(log *zap.Logger, filePath string, treeNode *tr if ownerID, ok := treeNode.Get(ownerKV); ok { if err := multipartInfo.Owner.DecodeString(ownerID); err != nil { - log.Warn(logs.InvalidTreeKV, zap.String(ownerKV, ownerID), zap.Error(err)) + log.Warn(logs.InvalidTreeKV, zap.String(ownerKV, ownerID), zap.Error(err), logs.TagField(logs.TagExternalStorageTree)) } } if created, ok := treeNode.Get(createdKV); ok { if utcMilli, err := strconv.ParseInt(created, 10, 64); err != nil { - log.Warn(logs.InvalidTreeKV, zap.String(createdKV, created), zap.Error(err)) + log.Warn(logs.InvalidTreeKV, zap.String(createdKV, created), zap.Error(err), logs.TagField(logs.TagExternalStorageTree)) } else { multipartInfo.Created = time.UnixMilli(utcMilli) } @@ -356,7 +356,7 @@ func newMultipartInfoFromTreeNode(log *zap.Logger, filePath string, treeNode *tr if finished, ok := treeNode.Get(finishedKV); ok { if flag, err := strconv.ParseBool(finished); err != nil { - log.Warn(logs.InvalidTreeKV, zap.String(finishedKV, finished), zap.Error(err)) + log.Warn(logs.InvalidTreeKV, zap.String(finishedKV, finished), zap.Error(err), logs.TagField(logs.TagExternalStorageTree)) } else { multipartInfo.Finished = flag } @@ -364,7 +364,7 @@ func newMultipartInfoFromTreeNode(log *zap.Logger, filePath string, treeNode *tr if creationEpoch, ok := treeNode.Get(creationEpochKV); ok { if epoch, err := strconv.ParseUint(creationEpoch, 10, 64); err != nil { - log.Warn(logs.InvalidTreeKV, zap.String(creationEpochKV, creationEpoch), zap.Error(err)) + log.Warn(logs.InvalidTreeKV, zap.String(creationEpochKV, creationEpoch), zap.Error(err), logs.TagField(logs.TagExternalStorageTree)) } else { multipartInfo.CreationEpoch = epoch } @@ -395,23 +395,23 @@ func newMultipartInfo(log *zap.Logger, node NodeResponse) (*data.MultipartInfo, multipartInfo.Key = string(kv.GetValue()) case createdKV: if utcMilli, err := strconv.ParseInt(string(kv.GetValue()), 10, 64); err != nil { - log.Warn(logs.InvalidTreeKV, zap.String(createdKV, string(kv.GetValue())), zap.Error(err)) + log.Warn(logs.InvalidTreeKV, zap.String(createdKV, string(kv.GetValue())), zap.Error(err), logs.TagField(logs.TagExternalStorageTree)) } else { multipartInfo.Created = time.UnixMilli(utcMilli) } case ownerKV: if err := multipartInfo.Owner.DecodeString(string(kv.GetValue())); err != nil { - log.Warn(logs.InvalidTreeKV, zap.String(ownerKV, string(kv.GetValue())), zap.Error(err)) + log.Warn(logs.InvalidTreeKV, zap.String(ownerKV, string(kv.GetValue())), zap.Error(err), logs.TagField(logs.TagExternalStorageTree)) } case finishedKV: if isFinished, err := strconv.ParseBool(string(kv.GetValue())); err != nil { - log.Warn(logs.InvalidTreeKV, zap.String(finishedKV, string(kv.GetValue())), zap.Error(err)) + log.Warn(logs.InvalidTreeKV, zap.String(finishedKV, string(kv.GetValue())), zap.Error(err), logs.TagField(logs.TagExternalStorageTree)) } else { multipartInfo.Finished = isFinished } case creationEpochKV: if epoch, err := strconv.ParseUint(string(kv.GetValue()), 10, 64); err != nil { - log.Warn(logs.InvalidTreeKV, zap.String(creationEpochKV, string(kv.GetValue())), zap.Error(err)) + log.Warn(logs.InvalidTreeKV, zap.String(creationEpochKV, string(kv.GetValue())), zap.Error(err), logs.TagField(logs.TagExternalStorageTree)) } else { multipartInfo.CreationEpoch = epoch } @@ -515,7 +515,7 @@ func (c *Tree) GetSettingsNode(ctx context.Context, bktInfo *data.BucketInfo) (* if ownerKeyHex, ok := node.Get(ownerKeyKV); ok { if settings.OwnerKey, err = keys.NewPublicKeyFromString(ownerKeyHex); err != nil { - c.reqLogger(ctx).Error(logs.SettingsNodeInvalidOwnerKey, zap.Error(err)) + c.reqLogger(ctx).Error(logs.SettingsNodeInvalidOwnerKey, zap.Error(err), logs.TagField(logs.TagDatapath)) } } @@ -539,7 +539,7 @@ func (c *Tree) PutSettingsNode(ctx context.Context, bktInfo *data.BucketInfo, se latest := multiNode.Latest() ind := latest.GetLatestNodeIndex() if latest.IsSplit() { - c.reqLogger(ctx).Error(logs.BucketSettingsNodeHasMultipleIDs, zap.Uint64s("ids", latest.ID)) + c.reqLogger(ctx).Error(logs.BucketSettingsNodeHasMultipleIDs, zap.Uint64s("ids", latest.ID), logs.TagField(logs.TagExternalStorageTree)) } if err = c.service.MoveNode(ctx, bktInfo, systemTree, latest.ID[ind], 0, meta); err != nil { @@ -582,7 +582,7 @@ func (c *Tree) PutBucketCORS(ctx context.Context, bktInfo *data.BucketInfo, addr latest := multiNode.Latest() ind := latest.GetLatestNodeIndex() if latest.IsSplit() { - c.reqLogger(ctx).Error(logs.BucketCORSNodeHasMultipleIDs) + c.reqLogger(ctx).Error(logs.BucketCORSNodeHasMultipleIDs, logs.TagField(logs.TagExternalStorageTree)) } if err = c.service.MoveNode(ctx, bktInfo, systemTree, latest.ID[ind], 0, meta); err != nil { @@ -640,14 +640,14 @@ func (c *Tree) cleanOldNodes(ctx context.Context, nodes []*treeNode, bktInfo *da for _, node := range nodes { ind := node.GetLatestNodeIndex() if node.IsSplit() { - c.reqLogger(ctx).Error(logs.SystemNodeHasMultipleIDs, zap.String("FileName", node.Meta[FileNameKey]), zap.Uint64s("ids", node.ID)) + c.reqLogger(ctx).Error(logs.SystemNodeHasMultipleIDs, zap.String("FileName", node.Meta[FileNameKey]), zap.Uint64s("ids", node.ID), logs.TagField(logs.TagExternalStorageTree)) } if err := c.service.RemoveNode(ctx, bktInfo, systemTree, node.ID[ind]); err != nil { - c.reqLogger(ctx).Warn(logs.FailedToRemoveOldSystemNode, zap.String("FileName", node.Meta[FileNameKey]), zap.Uint64("id", node.ID[ind])) + c.reqLogger(ctx).Warn(logs.FailedToRemoveOldSystemNode, zap.String("FileName", node.Meta[FileNameKey]), zap.Uint64("id", node.ID[ind]), logs.TagField(logs.TagExternalStorageTree)) } else { addr, err := getTreeNodeAddress(node) if err != nil { - c.log.Warn(logs.FailedToParseAddressInTreeNode, zap.String("FileName", node.Meta[FileNameKey]), zap.Uint64("id", node.ID[ind])) + c.log.Warn(logs.FailedToParseAddressInTreeNode, zap.String("FileName", node.Meta[FileNameKey]), zap.Uint64("id", node.ID[ind]), logs.TagField(logs.TagDatapath)) continue } res = append(res, addr) @@ -702,7 +702,7 @@ func (c *Tree) PutObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, o ind := tagNode.GetLatestNodeIndex() if tagNode.IsSplit() { - c.reqLogger(ctx).Error(logs.ObjectTaggingNodeHasMultipleIDs) + c.reqLogger(ctx).Error(logs.ObjectTaggingNodeHasMultipleIDs, logs.TagField(logs.TagExternalStorageTree)) } return c.service.MoveNode(ctx, bktInfo, versionTree, tagNode.ID[ind], objVersion.ID, treeTagSet) @@ -751,7 +751,7 @@ func (c *Tree) PutBucketTagging(ctx context.Context, bktInfo *data.BucketInfo, t latest := multiNode.Latest() ind := latest.GetLatestNodeIndex() if latest.IsSplit() { - c.reqLogger(ctx).Error(logs.BucketTaggingNodeHasMultipleIDs, zap.Uint64s("ids", latest.ID)) + c.reqLogger(ctx).Error(logs.BucketTaggingNodeHasMultipleIDs, zap.Uint64s("ids", latest.ID), logs.TagField(logs.TagExternalStorageTree)) } if err = c.service.MoveNode(ctx, bktInfo, systemTree, latest.ID[ind], 0, treeTagSet); err != nil { @@ -1049,7 +1049,7 @@ func (s *VersionsByPrefixStreamImpl) parseNodeResponse(node NodeResponse) (res * trNode, fileName, err := parseTreeNode(node) if err != nil { if !errors.Is(err, errNodeDoesntContainFileName) { - s.log.Debug(logs.ParseTreeNode, zap.Error(err)) + s.log.Debug(logs.ParseTreeNode, zap.Error(err), logs.TagField(logs.TagDatapath)) } return nil, true, nil } @@ -1287,7 +1287,9 @@ func (c *Tree) getUnversioned(ctx context.Context, bktInfo *data.BucketInfo, tre if len(nodes) > 1 { c.reqLogger(ctx).Debug(logs.FoundMoreThanOneUnversionedNode, - zap.String("treeID", treeID), zap.String("filepath", filepath)) + zap.String("treeID", treeID), + zap.String("filepath", filepath), + logs.TagField(logs.TagExternalStorageTree)) } sort.Slice(nodes, func(i, j int) bool { @@ -1460,7 +1462,8 @@ func (c *Tree) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartN zap.String("upload id", info.UploadID), zap.Uint64("multipart node id ", multipartNodeID), zap.Uint64s("id", part.GetNodeID()), - zap.Error(err)) + zap.Error(err), + logs.TagField(logs.TagDatapath)) continue } if partInfo.Number == info.Number { @@ -1488,7 +1491,8 @@ func (c *Tree) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartN c.reqLogger(ctx).Warn(logs.FailedToRemoveOldPartNode, zap.String("key", info.Key), zap.String("upload id", info.UploadID), - zap.Uint64("id", nodeID)) + zap.Uint64("id", nodeID), + logs.TagField(logs.TagExternalStorageTree)) } } @@ -1514,7 +1518,8 @@ func (c *Tree) GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipart // multipart parts nodeID shouldn't have multiple values c.reqLogger(ctx).Warn(logs.UnexpectedMultiNodeIDsInSubTreeMultiParts, zap.Uint64("multipart node id ", multipartNodeID), - zap.Uint64s("node ids", part.GetNodeID())) + zap.Uint64s("node ids", part.GetNodeID()), + logs.TagField(logs.TagDatapath)) continue } if part.GetNodeID()[0] == multipartNodeID { @@ -1525,7 +1530,8 @@ func (c *Tree) GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipart c.reqLogger(ctx).Warn(logs.FailedToParsePartInfo, zap.Uint64("multipart node id ", multipartNodeID), zap.Uint64s("node ids", part.GetNodeID()), - zap.Error(err)) + zap.Error(err), + logs.TagField(logs.TagDatapath)) continue } result = append(result, partInfo) @@ -1556,7 +1562,7 @@ func (c *Tree) PutBucketLifecycleConfiguration(ctx context.Context, bktInfo *dat latest := multiNode.Latest() ind := latest.GetLatestNodeIndex() if latest.IsSplit() { - c.reqLogger(ctx).Error(logs.BucketLifecycleNodeHasMultipleIDs) + c.reqLogger(ctx).Error(logs.BucketLifecycleNodeHasMultipleIDs, logs.TagField(logs.TagDatapath)) } if err = c.service.MoveNode(ctx, bktInfo, systemTree, latest.ID[ind], 0, meta); err != nil { @@ -1831,7 +1837,7 @@ func (c *Tree) getSystemNode(ctx context.Context, bktInfo *data.BucketInfo, name return nil, tree.ErrNodeNotFound } if len(nodes) != 1 { - c.reqLogger(ctx).Warn(logs.FoundSeveralSystemNodes, zap.String("name", name)) + c.reqLogger(ctx).Warn(logs.FoundSeveralSystemNodes, zap.String("name", name), logs.TagField(logs.TagDatapath)) } return newMultiNode(nodes)