Cleanup storage cache metrics

Split request and hit metrics into separate metrics, rather than using
labels. This avoids duplication of data and makes metric math easier.

* Count cache errors separately to avoid weird math.
* Hit ratio: `registry_storage_cache_hits_total / registry_storage_cache_requests_total`
* Miss ratio: `1 - (registry_storage_cache_hits_total / registry_storage_cache_requests_total`
* Misses: `registry_storage_cache_requests_total -
registry_storage_cache_hits_total`

Signed-off-by: Ben Kochie <superq@gmail.com>
This commit is contained in:
Ben Kochie 2019-04-09 13:43:55 +02:00 committed by SuperQ
parent 807a836852
commit c19adfdf06
No known key found for this signature in database
GPG key ID: C646B23C9E3245F1

View file

@ -14,8 +14,14 @@ type cachedBlobStatter struct {
backend distribution.BlobDescriptorService backend distribution.BlobDescriptorService
} }
// cacheCount is the number of total cache request received/hits/misses var (
var cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") // cacheRequestCount is the number of total cache requests received.
cacheRequestCount = prometheus.StorageNamespace.NewCounter("cache_requests", "The number of cache request received")
// cacheRequestCount is the number of total cache requests received.
cacheHitCount = prometheus.StorageNamespace.NewCounter("cache_hits", "The number of cache request received")
// cacheErrorCount is the number of cache request errors.
cacheErrorCount = prometheus.StorageNamespace.NewCounter("cache_errors", "The number of cache request errors")
)
// NewCachedBlobStatter creates a new statter which prefers a cache and // NewCachedBlobStatter creates a new statter which prefers a cache and
// falls back to a backend. // falls back to a backend.
@ -27,12 +33,12 @@ func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend dist
} }
func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
cacheCount.WithValues("Request").Inc(1) cacheRequestCount.Inc(1)
// try getting from cache // try getting from cache
desc, cacheErr := cbds.cache.Stat(ctx, dgst) desc, cacheErr := cbds.cache.Stat(ctx, dgst)
if cacheErr == nil { if cacheErr == nil {
cacheCount.WithValues("Hit").Inc(1) cacheHitCount.Inc(1)
return desc, nil return desc, nil
} }
@ -43,8 +49,6 @@ func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (di
} }
if cacheErr == distribution.ErrBlobUnknown { if cacheErr == distribution.ErrBlobUnknown {
// cache doesn't have info. update it with info got from backend
cacheCount.WithValues("Miss").Inc(1)
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(err).Error("error from cache setting desc") dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(err).Error("error from cache setting desc")
} }
@ -52,7 +56,7 @@ func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (di
} else { } else {
// unknown error from cache. just log and error. do not store cache as it may be trigger many set calls // unknown error from cache. just log and error. do not store cache as it may be trigger many set calls
dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(cacheErr).Error("error from cache stat(ing) blob") dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(cacheErr).Error("error from cache stat(ing) blob")
cacheCount.WithValues("Error").Inc(1) cacheErrorCount.Inc(1)
} }
return desc, nil return desc, nil