Cleanup storage cache metrics
Split request and hit metrics into separate metrics, rather than using labels. This avoids duplication of data and makes metric math easier. * Count cache errors separately to avoid weird math. * Hit ratio: `registry_storage_cache_hits_total / registry_storage_cache_requests_total` * Miss ratio: `1 - (registry_storage_cache_hits_total / registry_storage_cache_requests_total` * Misses: `registry_storage_cache_requests_total - registry_storage_cache_hits_total` Signed-off-by: Ben Kochie <superq@gmail.com>
This commit is contained in:
parent
807a836852
commit
c19adfdf06
1 changed files with 11 additions and 7 deletions
|
@ -14,8 +14,14 @@ type cachedBlobStatter struct {
|
|||
backend distribution.BlobDescriptorService
|
||||
}
|
||||
|
||||
// cacheCount is the number of total cache request received/hits/misses
|
||||
var cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type")
|
||||
var (
|
||||
// cacheRequestCount is the number of total cache requests received.
|
||||
cacheRequestCount = prometheus.StorageNamespace.NewCounter("cache_requests", "The number of cache request received")
|
||||
// cacheRequestCount is the number of total cache requests received.
|
||||
cacheHitCount = prometheus.StorageNamespace.NewCounter("cache_hits", "The number of cache request received")
|
||||
// cacheErrorCount is the number of cache request errors.
|
||||
cacheErrorCount = prometheus.StorageNamespace.NewCounter("cache_errors", "The number of cache request errors")
|
||||
)
|
||||
|
||||
// NewCachedBlobStatter creates a new statter which prefers a cache and
|
||||
// falls back to a backend.
|
||||
|
@ -27,12 +33,12 @@ func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend dist
|
|||
}
|
||||
|
||||
func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
||||
cacheCount.WithValues("Request").Inc(1)
|
||||
cacheRequestCount.Inc(1)
|
||||
|
||||
// try getting from cache
|
||||
desc, cacheErr := cbds.cache.Stat(ctx, dgst)
|
||||
if cacheErr == nil {
|
||||
cacheCount.WithValues("Hit").Inc(1)
|
||||
cacheHitCount.Inc(1)
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
|
@ -43,8 +49,6 @@ func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (di
|
|||
}
|
||||
|
||||
if cacheErr == distribution.ErrBlobUnknown {
|
||||
// cache doesn't have info. update it with info got from backend
|
||||
cacheCount.WithValues("Miss").Inc(1)
|
||||
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
|
||||
dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(err).Error("error from cache setting desc")
|
||||
}
|
||||
|
@ -52,7 +56,7 @@ func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (di
|
|||
} else {
|
||||
// unknown error from cache. just log and error. do not store cache as it may be trigger many set calls
|
||||
dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(cacheErr).Error("error from cache stat(ing) blob")
|
||||
cacheCount.WithValues("Error").Inc(1)
|
||||
cacheErrorCount.Inc(1)
|
||||
}
|
||||
|
||||
return desc, nil
|
||||
|
|
Loading…
Reference in a new issue