Remove deprecated cache metrics code

The metrics tracker in cached blob statter was replaced with prometheus
metrics and no longer needed.
Remove unused log wrapping.

Signed-off-by: Derek McGowan <derek@mcgstyle.net>
This commit is contained in:
Derek McGowan 2020-03-04 14:37:31 -08:00
parent 495a4af7cf
commit be29c05a1e
No known key found for this signature in database
GPG key ID: F58C5D0A4405ACDB
2 changed files with 4 additions and 125 deletions

View file

@ -1,66 +0,0 @@
package storage
import (
"context"
"expvar"
"sync/atomic"
dcontext "github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage/cache"
)
type blobStatCollector struct {
metrics cache.Metrics
}
func (bsc *blobStatCollector) Hit() {
atomic.AddUint64(&bsc.metrics.Requests, 1)
atomic.AddUint64(&bsc.metrics.Hits, 1)
}
func (bsc *blobStatCollector) Miss() {
atomic.AddUint64(&bsc.metrics.Requests, 1)
atomic.AddUint64(&bsc.metrics.Misses, 1)
}
func (bsc *blobStatCollector) Metrics() cache.Metrics {
return bsc.metrics
}
func (bsc *blobStatCollector) Logger(ctx context.Context) cache.Logger {
return dcontext.GetLogger(ctx)
}
// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor
// cache requests. Note this is kept globally and made available via expvar.
// For more detailed metrics, its recommend to instrument a particular cache
// implementation.
var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{}
func init() {
registry := expvar.Get("registry")
if registry == nil {
registry = expvar.NewMap("registry")
}
cache := registry.(*expvar.Map).Get("cache")
if cache == nil {
cache = &expvar.Map{}
cache.(*expvar.Map).Init()
registry.(*expvar.Map).Set("cache", cache)
}
storage := cache.(*expvar.Map).Get("storage")
if storage == nil {
storage = &expvar.Map{}
storage.(*expvar.Map).Init()
cache.(*expvar.Map).Set("storage", storage)
}
storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} {
// no need for synchronous access: the increments are atomic and
// during reading, we don't care if the data is up to date. The
// numbers will always *eventually* be reported correctly.
return blobStatterCacheMetrics
}))
}

View file

@ -9,35 +9,9 @@ import (
"github.com/opencontainers/go-digest"
)
// Metrics is used to hold metric counters
// related to the number of times a cache was
// hit or missed.
type Metrics struct {
Requests uint64
Hits uint64
Misses uint64
}
// Logger can be provided on the MetricsTracker to log errors.
//
// Usually, this is just a proxy to dcontext.GetLogger.
type Logger interface {
Errorf(format string, args ...interface{})
}
// MetricsTracker represents a metric tracker
// which simply counts the number of hits and misses.
type MetricsTracker interface {
Hit()
Miss()
Metrics() Metrics
Logger(context.Context) Logger
}
type cachedBlobStatter struct {
cache distribution.BlobDescriptorService
backend distribution.BlobDescriptorService
tracker MetricsTracker
}
var (
@ -54,16 +28,6 @@ func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend dist
}
}
// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and
// falls back to a backend. Hits and misses will send to the tracker.
func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter {
return &cachedBlobStatter{
cache: cache,
backend: backend,
tracker: tracker,
}
}
func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
cacheCount.WithValues("Request").Inc(1)
@ -71,9 +35,6 @@ func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (di
desc, cacheErr := cbds.cache.Stat(ctx, dgst)
if cacheErr == nil {
cacheCount.WithValues("Hit").Inc(1)
if cbds.tracker != nil {
cbds.tracker.Hit()
}
return desc, nil
}
@ -86,20 +47,16 @@ func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (di
if cacheErr == distribution.ErrBlobUnknown {
// cache doesn't have info. update it with info got from backend
cacheCount.WithValues("Miss").Inc(1)
if cbds.tracker != nil {
cbds.tracker.Miss()
}
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(err).Error("error from cache setting desc")
}
// we don't need to return cache error upstream if any. continue returning value from backend
return desc, nil
} else {
// unknown error from cache. just log and error. do not store cache as it may be trigger many set calls
dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(cacheErr).Error("error from cache stat(ing) blob")
cacheCount.WithValues("Error").Inc(1)
}
// unknown error from cache. just log and error. do not store cache as it may be trigger many set calls
dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(cacheErr).Error("error from cache stat(ing) blob")
cacheCount.WithValues("Error").Inc(1)
return desc, nil
}
@ -122,15 +79,3 @@ func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Di
}
return nil
}
func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) {
if tracker == nil {
return
}
logger := tracker.Logger(ctx)
if logger == nil {
return
}
logger.Errorf(format, args...)
}