forked from TrueCloudLab/distribution
c19adfdf06
Split request and hit metrics into separate metrics, rather than using labels. This avoids duplication of data and makes metric math easier. * Count cache errors separately to avoid weird math. * Hit ratio: `registry_storage_cache_hits_total / registry_storage_cache_requests_total` * Miss ratio: `1 - (registry_storage_cache_hits_total / registry_storage_cache_requests_total` * Misses: `registry_storage_cache_requests_total - registry_storage_cache_hits_total` Signed-off-by: Ben Kochie <superq@gmail.com>
83 lines
2.7 KiB
Go
83 lines
2.7 KiB
Go
package cache
|
|
|
|
import (
|
|
"context"
|
|
|
|
"github.com/distribution/distribution/v3"
|
|
dcontext "github.com/distribution/distribution/v3/context"
|
|
prometheus "github.com/distribution/distribution/v3/metrics"
|
|
"github.com/opencontainers/go-digest"
|
|
)
|
|
|
|
type cachedBlobStatter struct {
|
|
cache distribution.BlobDescriptorService
|
|
backend distribution.BlobDescriptorService
|
|
}
|
|
|
|
var (
|
|
// cacheRequestCount is the number of total cache requests received.
|
|
cacheRequestCount = prometheus.StorageNamespace.NewCounter("cache_requests", "The number of cache request received")
|
|
// cacheRequestCount is the number of total cache requests received.
|
|
cacheHitCount = prometheus.StorageNamespace.NewCounter("cache_hits", "The number of cache request received")
|
|
// cacheErrorCount is the number of cache request errors.
|
|
cacheErrorCount = prometheus.StorageNamespace.NewCounter("cache_errors", "The number of cache request errors")
|
|
)
|
|
|
|
// NewCachedBlobStatter creates a new statter which prefers a cache and
|
|
// falls back to a backend.
|
|
func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService {
|
|
return &cachedBlobStatter{
|
|
cache: cache,
|
|
backend: backend,
|
|
}
|
|
}
|
|
|
|
func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
|
cacheRequestCount.Inc(1)
|
|
|
|
// try getting from cache
|
|
desc, cacheErr := cbds.cache.Stat(ctx, dgst)
|
|
if cacheErr == nil {
|
|
cacheHitCount.Inc(1)
|
|
return desc, nil
|
|
}
|
|
|
|
// couldn't get from cache; get from backend
|
|
desc, err := cbds.backend.Stat(ctx, dgst)
|
|
if err != nil {
|
|
return desc, err
|
|
}
|
|
|
|
if cacheErr == distribution.ErrBlobUnknown {
|
|
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
|
|
dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(err).Error("error from cache setting desc")
|
|
}
|
|
// we don't need to return cache error upstream if any. continue returning value from backend
|
|
} else {
|
|
// unknown error from cache. just log and error. do not store cache as it may be trigger many set calls
|
|
dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(cacheErr).Error("error from cache stat(ing) blob")
|
|
cacheErrorCount.Inc(1)
|
|
}
|
|
|
|
return desc, nil
|
|
}
|
|
|
|
func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
|
|
err := cbds.cache.Clear(ctx, dgst)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
err = cbds.backend.Clear(ctx, dgst)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
|
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
|
|
dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(err).Error("error from cache setting desc")
|
|
}
|
|
return nil
|
|
}
|