Compare commits
3 commits
master
...
feat/add-g
Author | SHA1 | Date | |
---|---|---|---|
d42f67e053 | |||
99340b2717 | |||
c16788f9c6 |
6 changed files with 231 additions and 28 deletions
|
@ -108,7 +108,6 @@ type applicationConfiguration struct {
|
|||
level string
|
||||
destination string
|
||||
timestamp bool
|
||||
options []zap.Option
|
||||
}
|
||||
|
||||
ObjectCfg struct {
|
||||
|
@ -233,14 +232,6 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
|||
a.LoggerCfg.level = loggerconfig.Level(c)
|
||||
a.LoggerCfg.destination = loggerconfig.Destination(c)
|
||||
a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
|
||||
var opts []zap.Option
|
||||
if loggerconfig.ToLokiConfig(c).Enabled {
|
||||
opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
|
||||
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c))
|
||||
return lokiCore
|
||||
})}
|
||||
}
|
||||
a.LoggerCfg.options = opts
|
||||
|
||||
// Object
|
||||
|
||||
|
@ -727,6 +718,12 @@ func initCfg(appCfg *config.Config) *cfg {
|
|||
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
|
||||
log, err := logger.NewLogger(logPrm)
|
||||
fatalOnErr(err)
|
||||
if loggerconfig.ToLokiConfig(appCfg).Enabled {
|
||||
log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
|
||||
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
|
||||
return lokiCore
|
||||
}))
|
||||
}
|
||||
|
||||
c.internals = initInternals(appCfg, log)
|
||||
|
||||
|
@ -1093,7 +1090,6 @@ func (c *cfg) loggerPrm() (logger.Prm, error) {
|
|||
return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination)
|
||||
}
|
||||
prm.PrependTimestamp = c.LoggerCfg.timestamp
|
||||
prm.Options = c.LoggerCfg.options
|
||||
|
||||
return prm, nil
|
||||
}
|
||||
|
|
2
go.mod
2
go.mod
|
@ -9,7 +9,7 @@ require (
|
|||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250307150202-749b4e9ab592
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1
|
||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b
|
||||
|
|
4
go.sum
4
go.sum
|
@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681
|
|||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275 h1:WqWxCnCl2ekfjWja/CpGeF2rf4h0x199xhdnsm/j+E8=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250307150202-749b4e9ab592 h1:n7Pl8V7O1yS07J/fqdbzZjVe/mQW42a7eS0QHfgrzJw=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250307150202-749b4e9ab592/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 h1:svCl6NDAPZ/KuQPjdVKo74RkCIANesxUPM45zQZDhSw=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
|
||||
|
|
214
internal/qos/grpc_test.go
Normal file
214
internal/qos/grpc_test.go
Normal file
|
@ -0,0 +1,214 @@
|
|||
package qos_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
const (
|
||||
okKey = "ok"
|
||||
)
|
||||
|
||||
var (
|
||||
errTest = errors.New("mock")
|
||||
errWrongTag = errors.New("wrong tag")
|
||||
errNoTag = errors.New("failed to get tag from context")
|
||||
errResExhausted = new(apistatus.ResourceExhausted)
|
||||
tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync}
|
||||
)
|
||||
|
||||
type mockGRPCServerStream struct {
|
||||
grpc.ServerStream
|
||||
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (m *mockGRPCServerStream) Context() context.Context {
|
||||
return m.ctx
|
||||
}
|
||||
|
||||
type limiter struct {
|
||||
released bool
|
||||
}
|
||||
|
||||
func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) {
|
||||
if key != okKey {
|
||||
return nil, false
|
||||
}
|
||||
return func() { l.released = true }, true
|
||||
}
|
||||
|
||||
func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) (bool, error) {
|
||||
interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim })
|
||||
called := false
|
||||
handler := func(ctx context.Context, req any) (any, error) {
|
||||
called = true
|
||||
return nil, errTest
|
||||
}
|
||||
_, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler)
|
||||
return called, err
|
||||
}
|
||||
|
||||
func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) (bool, error) {
|
||||
interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim })
|
||||
called := false
|
||||
handler := func(srv any, stream grpc.ServerStream) error {
|
||||
called = true
|
||||
return errTest
|
||||
}
|
||||
err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{
|
||||
FullMethod: methodName,
|
||||
}, handler)
|
||||
return called, err
|
||||
}
|
||||
|
||||
func Test_MaxActiveRPCLimiter(t *testing.T) {
|
||||
// UnaryServerInterceptor
|
||||
t.Run("unary fail", func(t *testing.T) {
|
||||
var lim limiter
|
||||
|
||||
called, err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "")
|
||||
require.EqualError(t, err, errResExhausted.Error())
|
||||
require.False(t, called)
|
||||
})
|
||||
t.Run("unary pass critical", func(t *testing.T) {
|
||||
var lim limiter
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
|
||||
|
||||
called, err := unaryMaxActiveRPCLimiter(ctx, &lim, "")
|
||||
require.EqualError(t, err, errTest.Error())
|
||||
require.True(t, called)
|
||||
require.False(t, lim.released)
|
||||
})
|
||||
t.Run("unary pass", func(t *testing.T) {
|
||||
var lim limiter
|
||||
|
||||
called, err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey)
|
||||
require.EqualError(t, err, errTest.Error())
|
||||
require.True(t, called && lim.released)
|
||||
})
|
||||
// StreamServerInterceptor
|
||||
t.Run("stream fail", func(t *testing.T) {
|
||||
var lim limiter
|
||||
|
||||
called, err := streamMaxActiveRPCLimiter(context.Background(), &lim, "")
|
||||
require.EqualError(t, err, errResExhausted.Error())
|
||||
require.False(t, called)
|
||||
})
|
||||
t.Run("stream pass critical", func(t *testing.T) {
|
||||
var lim limiter
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
|
||||
|
||||
called, err := streamMaxActiveRPCLimiter(ctx, &lim, "")
|
||||
require.EqualError(t, err, errTest.Error())
|
||||
require.True(t, called)
|
||||
require.False(t, lim.released)
|
||||
})
|
||||
t.Run("stream pass", func(t *testing.T) {
|
||||
var lim limiter
|
||||
|
||||
called, err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey)
|
||||
require.EqualError(t, err, errTest.Error())
|
||||
require.True(t, called && lim.released)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) {
|
||||
interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor()
|
||||
handler := func(ctx context.Context, req any) (any, error) {
|
||||
if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errWrongTag
|
||||
}
|
||||
_, err := interceptor(context.Background(), nil, nil, handler)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) {
|
||||
interceptor := qos.NewAdjustOutgoingIOTagUnaryClientInterceptor()
|
||||
|
||||
// check context with no value
|
||||
called := false
|
||||
invoker := func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
|
||||
called = true
|
||||
if _, ok := tagging.IOTagFromContext(ctx); ok {
|
||||
return fmt.Errorf("%v: expected no IO tags", errWrongTag)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
require.NoError(t, interceptor(context.Background(), "", nil, nil, nil, invoker, nil))
|
||||
require.True(t, called)
|
||||
|
||||
// check context for internal tag
|
||||
targetTag := qos.IOTagInternal.String()
|
||||
invoker = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
|
||||
raw, ok := tagging.IOTagFromContext(ctx)
|
||||
if !ok {
|
||||
return errNoTag
|
||||
}
|
||||
if raw != targetTag {
|
||||
return errWrongTag
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for _, tag := range tags {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
|
||||
require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
|
||||
}
|
||||
|
||||
// check context for client tag
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), "")
|
||||
targetTag = qos.IOTagClient.String()
|
||||
require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
|
||||
}
|
||||
|
||||
func TestAdjustOutgoingIOTagStreamClientInterceptor(t *testing.T) {
|
||||
interceptor := qos.NewAdjustOutgoingIOTagStreamClientInterceptor()
|
||||
|
||||
// check context with no value
|
||||
called := false
|
||||
streamer := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
called = true
|
||||
if _, ok := tagging.IOTagFromContext(ctx); ok {
|
||||
return nil, fmt.Errorf("%v: expected no IO tags", errWrongTag)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
_, err := interceptor(context.Background(), nil, nil, "", streamer, nil)
|
||||
require.True(t, called)
|
||||
require.NoError(t, err)
|
||||
|
||||
// check context for internal tag
|
||||
targetTag := qos.IOTagInternal.String()
|
||||
streamer = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
raw, ok := tagging.IOTagFromContext(ctx)
|
||||
if !ok {
|
||||
return nil, errNoTag
|
||||
}
|
||||
if raw != targetTag {
|
||||
return nil, errWrongTag
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
for _, tag := range tags {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
|
||||
_, err := interceptor(ctx, nil, nil, "", streamer, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// check context for client tag
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), "")
|
||||
targetTag = qos.IOTagClient.String()
|
||||
_, err = interceptor(ctx, nil, nil, "", streamer, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
|
@ -527,8 +527,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
|
|||
return
|
||||
}
|
||||
|
||||
var release qos.ReleaseFunc
|
||||
release, err = s.opsLimiter.ReadRequest(ctx)
|
||||
release, err := s.opsLimiter.ReadRequest(ctx)
|
||||
if err != nil {
|
||||
log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
|
||||
s.m.RUnlock()
|
||||
|
|
|
@ -36,9 +36,6 @@ type Prm struct {
|
|||
|
||||
// PrependTimestamp specifies whether to prepend a timestamp in the log
|
||||
PrependTimestamp bool
|
||||
|
||||
// Options for zap.Logger
|
||||
Options []zap.Option
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -106,12 +103,10 @@ func newConsoleLogger(prm Prm) (*Logger, error) {
|
|||
c.EncoderConfig.TimeKey = ""
|
||||
}
|
||||
|
||||
opts := []zap.Option{
|
||||
lZap, err := c.Build(
|
||||
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
|
||||
zap.AddCallerSkip(1),
|
||||
}
|
||||
opts = append(opts, prm.Options...)
|
||||
lZap, err := c.Build(opts...)
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -155,12 +150,7 @@ func newJournaldLogger(prm Prm) (*Logger, error) {
|
|||
c.Sampling.Thereafter,
|
||||
samplerOpts...,
|
||||
)
|
||||
opts := []zap.Option{
|
||||
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
|
||||
zap.AddCallerSkip(1),
|
||||
}
|
||||
opts = append(opts, prm.Options...)
|
||||
lZap := zap.New(samplingCore, opts...)
|
||||
lZap := zap.New(samplingCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1))
|
||||
|
||||
l := &Logger{z: lZap, lvl: lvl}
|
||||
|
||||
|
@ -171,6 +161,10 @@ func (l *Logger) Reload(prm Prm) {
|
|||
l.lvl.SetLevel(prm.level)
|
||||
}
|
||||
|
||||
func (l *Logger) WithOptions(options ...zap.Option) {
|
||||
l.z = l.z.WithOptions(options...)
|
||||
}
|
||||
|
||||
func (l *Logger) With(fields ...zap.Field) *Logger {
|
||||
return &Logger{z: l.z.With(fields...)}
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue