Merge pull request #1386 from andrewnguyen/garbage_collect_pick_from_1050

garbage collection
This commit is contained in:
Richard Scothern 2016-02-29 15:01:48 -08:00
commit 56eb3f51f4
14 changed files with 796 additions and 29 deletions

150
docs/garbagecollect.go Normal file
View file

@ -0,0 +1,150 @@
package registry
import (
"fmt"
"os"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage"
"github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/factory"
"github.com/spf13/cobra"
)
func markAndSweep(storageDriver driver.StorageDriver) error {
ctx := context.Background()
// Construct a registry
registry, err := storage.NewRegistry(ctx, storageDriver)
if err != nil {
return fmt.Errorf("failed to construct registry: %v", err)
}
repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator)
if !ok {
return fmt.Errorf("coercion error: unable to convert Namespace to RepositoryEnumerator")
}
// mark
markSet := make(map[digest.Digest]struct{})
err = repositoryEnumerator.Enumerate(ctx, func(repoName string) error {
var err error
named, err := reference.ParseNamed(repoName)
if err != nil {
return fmt.Errorf("failed to parse repo name %s: %v", repoName, err)
}
repository, err := registry.Repository(ctx, named)
if err != nil {
return fmt.Errorf("failed to construct repository: %v", err)
}
manifestService, err := repository.Manifests(ctx)
if err != nil {
return fmt.Errorf("failed to construct manifest service: %v", err)
}
manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator)
if !ok {
return fmt.Errorf("coercion error: unable to convert ManifestService into ManifestEnumerator")
}
err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error {
// Mark the manifest's blob
markSet[dgst] = struct{}{}
manifest, err := manifestService.Get(ctx, dgst)
if err != nil {
return fmt.Errorf("failed to retrieve manifest for digest %v: %v", dgst, err)
}
descriptors := manifest.References()
for _, descriptor := range descriptors {
markSet[descriptor.Digest] = struct{}{}
}
switch manifest.(type) {
case *schema1.SignedManifest:
signaturesGetter, ok := manifestService.(distribution.SignaturesGetter)
if !ok {
return fmt.Errorf("coercion error: unable to convert ManifestSErvice into SignaturesGetter")
}
signatures, err := signaturesGetter.GetSignatures(ctx, dgst)
if err != nil {
return fmt.Errorf("failed to get signatures for signed manifest: %v", err)
}
for _, signatureDigest := range signatures {
markSet[signatureDigest] = struct{}{}
}
break
case *schema2.DeserializedManifest:
config := manifest.(*schema2.DeserializedManifest).Config
markSet[config.Digest] = struct{}{}
break
}
return nil
})
return err
})
if err != nil {
return fmt.Errorf("failed to mark: %v\n", err)
}
// sweep
blobService := registry.Blobs()
deleteSet := make(map[digest.Digest]struct{})
err = blobService.Enumerate(ctx, func(dgst digest.Digest) error {
// check if digest is in markSet. If not, delete it!
if _, ok := markSet[dgst]; !ok {
deleteSet[dgst] = struct{}{}
}
return nil
})
// Construct vacuum
vacuum := storage.NewVacuum(ctx, storageDriver)
for dgst := range deleteSet {
err = vacuum.RemoveBlob(string(dgst))
if err != nil {
return fmt.Errorf("failed to delete blob %s: %v\n", dgst, err)
}
}
return err
}
// GCCmd is the cobra command that corresponds to the garbage-collect subcommand
var GCCmd = &cobra.Command{
Use: "garbage-collect <config>",
Short: "`garbage-collects` deletes layers not referenced by any manifests",
Long: "`garbage-collects` deletes layers not referenced by any manifests",
Run: func(cmd *cobra.Command, args []string) {
config, err := resolveConfiguration(args)
if err != nil {
fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
cmd.Usage()
os.Exit(1)
}
driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters())
if err != nil {
fmt.Fprintf(os.Stderr, "failed to construct %s driver: %v", config.Storage.Type(), err)
os.Exit(1)
}
err = markAndSweep(driver)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err)
os.Exit(1)
}
},
}

343
docs/garbagecollect_test.go Normal file
View file

@ -0,0 +1,343 @@
package registry
import (
"io"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage"
"github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/inmemory"
"github.com/docker/distribution/testutil"
)
type image struct {
manifest distribution.Manifest
manifestDigest digest.Digest
layers map[digest.Digest]io.ReadSeeker
}
func createRegistry(t *testing.T, driver driver.StorageDriver) distribution.Namespace {
ctx := context.Background()
registry, err := storage.NewRegistry(ctx, driver, storage.EnableDelete)
if err != nil {
t.Fatalf("Failed to construct namespace")
}
return registry
}
func makeRepository(t *testing.T, registry distribution.Namespace, name string) distribution.Repository {
ctx := context.Background()
// Initialize a dummy repository
named, err := reference.ParseNamed(name)
if err != nil {
t.Fatalf("Failed to parse name %s: %v", name, err)
}
repo, err := registry.Repository(ctx, named)
if err != nil {
t.Fatalf("Failed to construct repository: %v", err)
}
return repo
}
func makeManifestService(t *testing.T, repository distribution.Repository) distribution.ManifestService {
ctx := context.Background()
manifestService, err := repository.Manifests(ctx)
if err != nil {
t.Fatalf("Failed to construct manifest store: %v", err)
}
return manifestService
}
func allBlobs(t *testing.T, registry distribution.Namespace) map[digest.Digest]struct{} {
ctx := context.Background()
blobService := registry.Blobs()
allBlobsMap := make(map[digest.Digest]struct{})
err := blobService.Enumerate(ctx, func(dgst digest.Digest) error {
allBlobsMap[dgst] = struct{}{}
return nil
})
if err != nil {
t.Fatalf("Error getting all blobs: %v", err)
}
return allBlobsMap
}
func uploadImage(t *testing.T, repository distribution.Repository, im image) digest.Digest {
// upload layers
err := testutil.UploadBlobs(repository, im.layers)
if err != nil {
t.Fatalf("layer upload failed: %v", err)
}
// upload manifest
ctx := context.Background()
manifestService := makeManifestService(t, repository)
manifestDigest, err := manifestService.Put(ctx, im.manifest)
if err != nil {
t.Fatalf("manifest upload failed: %v", err)
}
return manifestDigest
}
func uploadRandomSchema1Image(t *testing.T, repository distribution.Repository) image {
randomLayers, err := testutil.CreateRandomLayers(2)
if err != nil {
t.Fatalf("%v", err)
}
digests := []digest.Digest{}
for digest := range randomLayers {
digests = append(digests, digest)
}
manifest, err := testutil.MakeSchema1Manifest(digests)
if err != nil {
t.Fatalf("%v", err)
}
manifestDigest := uploadImage(t, repository, image{manifest: manifest, layers: randomLayers})
return image{
manifest: manifest,
manifestDigest: manifestDigest,
layers: randomLayers,
}
}
func uploadRandomSchema2Image(t *testing.T, repository distribution.Repository) image {
randomLayers, err := testutil.CreateRandomLayers(2)
if err != nil {
t.Fatalf("%v", err)
}
digests := []digest.Digest{}
for digest := range randomLayers {
digests = append(digests, digest)
}
manifest, err := testutil.MakeSchema2Manifest(repository, digests)
if err != nil {
t.Fatalf("%v", err)
}
manifestDigest := uploadImage(t, repository, image{manifest: manifest, layers: randomLayers})
return image{
manifest: manifest,
manifestDigest: manifestDigest,
layers: randomLayers,
}
}
func TestNoDeletionNoEffect(t *testing.T) {
ctx := context.Background()
inmemoryDriver := inmemory.New()
registry := createRegistry(t, inmemoryDriver)
repo := makeRepository(t, registry, "palailogos")
manifestService, err := repo.Manifests(ctx)
image1 := uploadRandomSchema1Image(t, repo)
image2 := uploadRandomSchema1Image(t, repo)
image3 := uploadRandomSchema2Image(t, repo)
// construct manifestlist for fun.
blobstatter := registry.BlobStatter()
manifestList, err := testutil.MakeManifestList(blobstatter, []digest.Digest{
image1.manifestDigest, image2.manifestDigest})
if err != nil {
t.Fatalf("Failed to make manifest list: %v", err)
}
_, err = manifestService.Put(ctx, manifestList)
if err != nil {
t.Fatalf("Failed to add manifest list: %v", err)
}
// Run GC
err = markAndSweep(inmemoryDriver)
if err != nil {
t.Fatalf("Failed mark and sweep: %v", err)
}
blobs := allBlobs(t, registry)
// the +1 at the end is for the manifestList
// the first +3 at the end for each manifest's blob
// the second +3 at the end for each manifest's signature/config layer
totalBlobCount := len(image1.layers) + len(image2.layers) + len(image3.layers) + 1 + 3 + 3
if len(blobs) != totalBlobCount {
t.Fatalf("Garbage collection affected storage")
}
}
func TestDeletionHasEffect(t *testing.T) {
ctx := context.Background()
inmemoryDriver := inmemory.New()
registry := createRegistry(t, inmemoryDriver)
repo := makeRepository(t, registry, "komnenos")
manifests, err := repo.Manifests(ctx)
image1 := uploadRandomSchema1Image(t, repo)
image2 := uploadRandomSchema1Image(t, repo)
image3 := uploadRandomSchema2Image(t, repo)
manifests.Delete(ctx, image2.manifestDigest)
manifests.Delete(ctx, image3.manifestDigest)
// Run GC
err = markAndSweep(inmemoryDriver)
if err != nil {
t.Fatalf("Failed mark and sweep: %v", err)
}
blobs := allBlobs(t, registry)
// check that the image1 manifest and all the layers are still in blobs
if _, ok := blobs[image1.manifestDigest]; !ok {
t.Fatalf("First manifest is missing")
}
for layer := range image1.layers {
if _, ok := blobs[layer]; !ok {
t.Fatalf("manifest 1 layer is missing: %v", layer)
}
}
// check that image2 and image3 layers are not still around
for layer := range image2.layers {
if _, ok := blobs[layer]; ok {
t.Fatalf("manifest 2 layer is present: %v", layer)
}
}
for layer := range image3.layers {
if _, ok := blobs[layer]; ok {
t.Fatalf("manifest 3 layer is present: %v", layer)
}
}
}
func getAnyKey(digests map[digest.Digest]io.ReadSeeker) (d digest.Digest) {
for d = range digests {
break
}
return
}
func getKeys(digests map[digest.Digest]io.ReadSeeker) (ds []digest.Digest) {
for d := range digests {
ds = append(ds, d)
}
return
}
func TestDeletionWithSharedLayer(t *testing.T) {
ctx := context.Background()
inmemoryDriver := inmemory.New()
registry := createRegistry(t, inmemoryDriver)
repo := makeRepository(t, registry, "tzimiskes")
// Create random layers
randomLayers1, err := testutil.CreateRandomLayers(3)
if err != nil {
t.Fatalf("failed to make layers: %v", err)
}
randomLayers2, err := testutil.CreateRandomLayers(3)
if err != nil {
t.Fatalf("failed to make layers: %v", err)
}
// Upload all layers
err = testutil.UploadBlobs(repo, randomLayers1)
if err != nil {
t.Fatalf("failed to upload layers: %v", err)
}
err = testutil.UploadBlobs(repo, randomLayers2)
if err != nil {
t.Fatalf("failed to upload layers: %v", err)
}
// Construct manifests
manifest1, err := testutil.MakeSchema1Manifest(getKeys(randomLayers1))
if err != nil {
t.Fatalf("failed to make manifest: %v", err)
}
sharedKey := getAnyKey(randomLayers1)
manifest2, err := testutil.MakeSchema2Manifest(repo, append(getKeys(randomLayers2), sharedKey))
if err != nil {
t.Fatalf("failed to make manifest: %v", err)
}
manifestService := makeManifestService(t, repo)
// Upload manifests
_, err = manifestService.Put(ctx, manifest1)
if err != nil {
t.Fatalf("manifest upload failed: %v", err)
}
manifestDigest2, err := manifestService.Put(ctx, manifest2)
if err != nil {
t.Fatalf("manifest upload failed: %v", err)
}
// delete
err = manifestService.Delete(ctx, manifestDigest2)
if err != nil {
t.Fatalf("manifest deletion failed: %v", err)
}
// check that all of the layers in layer 1 are still there
blobs := allBlobs(t, registry)
for dgst := range randomLayers1 {
if _, ok := blobs[dgst]; !ok {
t.Fatalf("random layer 1 blob missing: %v", dgst)
}
}
}
func TestOrphanBlobDeleted(t *testing.T) {
inmemoryDriver := inmemory.New()
registry := createRegistry(t, inmemoryDriver)
repo := makeRepository(t, registry, "michael_z_doukas")
digests, err := testutil.CreateRandomLayers(1)
if err != nil {
t.Fatalf("Failed to create random digest: %v", err)
}
if err = testutil.UploadBlobs(repo, digests); err != nil {
t.Fatalf("Failed to upload blob: %v", err)
}
// formality to create the necessary directories
uploadRandomSchema2Image(t, repo)
// Run GC
err = markAndSweep(inmemoryDriver)
if err != nil {
t.Fatalf("Failed mark and sweep: %v", err)
}
blobs := allBlobs(t, registry)
// check that orphan blob layers are not still around
for dgst := range digests {
if _, ok := blobs[dgst]; ok {
t.Fatalf("Orphan layer is present: %v", dgst)
}
}
}

View file

@ -93,8 +93,3 @@ func (pms proxyManifestStore) Put(ctx context.Context, manifest distribution.Man
func (pms proxyManifestStore) Delete(ctx context.Context, dgst digest.Digest) error { func (pms proxyManifestStore) Delete(ctx context.Context, dgst digest.Digest) error {
return distribution.ErrUnsupported return distribution.ErrUnsupported
} }
/*func (pms proxyManifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) {
return 0, distribution.ErrUnsupported
}
*/

View file

@ -166,6 +166,14 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named
}, nil }, nil
} }
func (pr *proxyingRegistry) Blobs() distribution.BlobEnumerator {
return pr.embedded.Blobs()
}
func (pr *proxyingRegistry) BlobStatter() distribution.BlobStatter {
return pr.embedded.BlobStatter()
}
// authChallenger encapsulates a request to the upstream to establish credential challenges // authChallenger encapsulates a request to the upstream to establish credential challenges
type authChallenger interface { type authChallenger interface {
tryEstablishChallenges(context.Context) error tryEstablishChallenges(context.Context) error

View file

@ -24,16 +24,12 @@ import (
"github.com/yvasiyarov/gorelic" "github.com/yvasiyarov/gorelic"
) )
// Cmd is a cobra command for running the registry. // ServeCmd is a cobra command for running the registry.
var Cmd = &cobra.Command{ var ServeCmd = &cobra.Command{
Use: "registry <config>", Use: "serve <config>",
Short: "registry stores and distributes Docker images", Short: "`serve` stores and distributes Docker images",
Long: "registry stores and distributes Docker images.", Long: "`serve` stores and distributes Docker images.",
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
if showVersion {
version.PrintVersion()
return
}
// setup context // setup context
ctx := context.WithVersion(context.Background(), version.Version) ctx := context.WithVersion(context.Background(), version.Version)
@ -65,12 +61,6 @@ var Cmd = &cobra.Command{
}, },
} }
var showVersion bool
func init() {
Cmd.PersistentFlags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit")
}
// A Registry represents a complete instance of the registry. // A Registry represents a complete instance of the registry.
// TODO(aaronl): It might make sense for Registry to become an interface. // TODO(aaronl): It might make sense for Registry to become an interface.
type Registry struct { type Registry struct {

28
docs/root.go Normal file
View file

@ -0,0 +1,28 @@
package registry
import (
"github.com/docker/distribution/version"
"github.com/spf13/cobra"
)
var showVersion bool
func init() {
RootCmd.AddCommand(ServeCmd)
RootCmd.AddCommand(GCCmd)
RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit")
}
// RootCmd is the main command for the 'registry' binary.
var RootCmd = &cobra.Command{
Use: "registry",
Short: "`registry`",
Long: "`registry`",
Run: func(cmd *cobra.Command, args []string) {
if showVersion {
version.PrintVersion()
return
}
cmd.Usage()
},
}

View file

@ -1,6 +1,8 @@
package storage package storage
import ( import (
"path"
"github.com/docker/distribution" "github.com/docker/distribution"
"github.com/docker/distribution/context" "github.com/docker/distribution/context"
"github.com/docker/distribution/digest" "github.com/docker/distribution/digest"
@ -85,6 +87,36 @@ func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distr
}, bs.driver.PutContent(ctx, bp, p) }, bs.driver.PutContent(ctx, bp, p)
} }
func (bs *blobStore) Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error {
specPath, err := pathFor(blobsPathSpec{})
if err != nil {
return err
}
err = Walk(ctx, bs.driver, specPath, func(fileInfo driver.FileInfo) error {
// skip directories
if fileInfo.IsDir() {
return nil
}
currentPath := fileInfo.Path()
// we only want to parse paths that end with /data
_, fileName := path.Split(currentPath)
if fileName != "data" {
return nil
}
digest, err := digestFromPath(currentPath)
if err != nil {
return err
}
return ingester(digest)
})
return err
}
// path returns the canonical path for the blob identified by digest. The blob // path returns the canonical path for the blob identified by digest. The blob
// may or may not exist. // may or may not exist.
func (bs *blobStore) path(dgst digest.Digest) (string, error) { func (bs *blobStore) path(dgst digest.Digest) (string, error) {

View file

@ -64,3 +64,34 @@ func (reg *registry) Repositories(ctx context.Context, repos []string, last stri
return n, errVal return n, errVal
} }
// Enumerate applies ingester to each repository
func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) error {
repoNameBuffer := make([]string, 100)
var last string
for {
n, err := reg.Repositories(ctx, repoNameBuffer, last)
if err != nil && err != io.EOF {
return err
}
if n == 0 {
break
}
last = repoNameBuffer[n-1]
for i := 0; i < n; i++ {
repoName := repoNameBuffer[i]
err = ingester(repoName)
if err != nil {
return err
}
}
if err == io.EOF {
break
}
}
return nil
}

View file

@ -3,6 +3,7 @@ package storage
import ( import (
"fmt" "fmt"
"net/http" "net/http"
"path"
"time" "time"
"github.com/docker/distribution" "github.com/docker/distribution"
@ -37,6 +38,9 @@ type linkedBlobStore struct {
// removed an the blob links folder should be merged. The first entry is // removed an the blob links folder should be merged. The first entry is
// treated as the "canonical" link location and will be used for writes. // treated as the "canonical" link location and will be used for writes.
linkPathFns []linkPathFunc linkPathFns []linkPathFunc
// linkDirectoryPathSpec locates the root directories in which one might find links
linkDirectoryPathSpec pathSpec
} }
var _ distribution.BlobStore = &linkedBlobStore{} var _ distribution.BlobStore = &linkedBlobStore{}
@ -236,6 +240,55 @@ func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) erro
return nil return nil
} }
func (lbs *linkedBlobStore) Enumerate(ctx context.Context, ingestor func(digest.Digest) error) error {
rootPath, err := pathFor(lbs.linkDirectoryPathSpec)
if err != nil {
return err
}
err = Walk(ctx, lbs.blobStore.driver, rootPath, func(fileInfo driver.FileInfo) error {
// exit early if directory...
if fileInfo.IsDir() {
return nil
}
filePath := fileInfo.Path()
// check if it's a link
_, fileName := path.Split(filePath)
if fileName != "link" {
return nil
}
// read the digest found in link
digest, err := lbs.blobStore.readlink(ctx, filePath)
if err != nil {
return err
}
// ensure this conforms to the linkPathFns
_, err = lbs.Stat(ctx, digest)
if err != nil {
// we expect this error to occur so we move on
if err == distribution.ErrBlobUnknown {
return nil
}
return err
}
err = ingestor(digest)
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) { func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) {
repo, err := lbs.registry.Repository(ctx, sourceRepo) repo, err := lbs.registry.Repository(ctx, sourceRepo)
if err != nil { if err != nil {

View file

@ -2,6 +2,7 @@ package storage
import ( import (
"fmt" "fmt"
"path"
"encoding/json" "encoding/json"
"github.com/docker/distribution" "github.com/docker/distribution"
@ -129,6 +130,52 @@ func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error {
return ms.blobStore.Delete(ctx, dgst) return ms.blobStore.Delete(ctx, dgst)
} }
func (ms *manifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { func (ms *manifestStore) Enumerate(ctx context.Context, ingester func(digest.Digest) error) error {
return 0, distribution.ErrUnsupported err := ms.blobStore.Enumerate(ctx, func(dgst digest.Digest) error {
err := ingester(dgst)
if err != nil {
return err
}
return nil
})
return err
}
// Only valid for schema1 signed manifests
func (ms *manifestStore) GetSignatures(ctx context.Context, manifestDigest digest.Digest) ([]digest.Digest, error) {
// sanity check that digest refers to a schema1 digest
manifest, err := ms.Get(ctx, manifestDigest)
if err != nil {
return nil, err
}
if _, ok := manifest.(*schema1.SignedManifest); !ok {
return nil, fmt.Errorf("digest %v is not for schema1 manifest", manifestDigest)
}
signaturesPath, err := pathFor(manifestSignaturesPathSpec{
name: ms.repository.Named().Name(),
revision: manifestDigest,
})
if err != nil {
return nil, err
}
signaturesPath = path.Join(signaturesPath, "sha256")
signaturePaths, err := ms.blobStore.driver.List(ctx, signaturesPath)
if err != nil {
return nil, err
}
var digests []digest.Digest
for _, sigPath := range signaturePaths {
sigdigest, err := digest.ParseDigest("sha256:" + path.Base(sigPath))
if err != nil {
// merely found not a digest
continue
}
digests = append(digests, sigdigest)
}
return digests, nil
} }

View file

@ -74,6 +74,7 @@ const (
// //
// Manifests: // Manifests:
// //
// manifestRevisionsPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/
// manifestRevisionPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/ // manifestRevisionPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/
// manifestRevisionLinkPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/link // manifestRevisionLinkPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/link
// manifestSignaturesPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/signatures/ // manifestSignaturesPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/signatures/
@ -100,6 +101,7 @@ const (
// //
// Blob Store: // Blob Store:
// //
// blobsPathSpec: <root>/v2/blobs/
// blobPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest> // blobPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>
// blobDataPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data // blobDataPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
// blobMediaTypePathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data // blobMediaTypePathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
@ -125,6 +127,9 @@ func pathFor(spec pathSpec) (string, error) {
switch v := spec.(type) { switch v := spec.(type) {
case manifestRevisionsPathSpec:
return path.Join(append(repoPrefix, v.name, "_manifests", "revisions")...), nil
case manifestRevisionPathSpec: case manifestRevisionPathSpec:
components, err := digestPathComponents(v.revision, false) components, err := digestPathComponents(v.revision, false)
if err != nil { if err != nil {
@ -246,6 +251,17 @@ func pathFor(spec pathSpec) (string, error) {
blobLinkPathComponents := append(repoPrefix, v.name, "_layers") blobLinkPathComponents := append(repoPrefix, v.name, "_layers")
return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil
case blobsPathSpec:
blobsPathPrefix := append(rootPrefix, "blobs")
return path.Join(blobsPathPrefix...), nil
case blobPathSpec:
components, err := digestPathComponents(v.digest, true)
if err != nil {
return "", err
}
blobPathPrefix := append(rootPrefix, "blobs")
return path.Join(append(blobPathPrefix, components...)...), nil
case blobDataPathSpec: case blobDataPathSpec:
components, err := digestPathComponents(v.digest, true) components, err := digestPathComponents(v.digest, true)
if err != nil { if err != nil {
@ -281,6 +297,14 @@ type pathSpec interface {
pathSpec() pathSpec()
} }
// manifestRevisionsPathSpec describes the directory path for
// a manifest revision.
type manifestRevisionsPathSpec struct {
name string
}
func (manifestRevisionsPathSpec) pathSpec() {}
// manifestRevisionPathSpec describes the components of the directory path for // manifestRevisionPathSpec describes the components of the directory path for
// a manifest revision. // a manifest revision.
type manifestRevisionPathSpec struct { type manifestRevisionPathSpec struct {
@ -404,12 +428,17 @@ var blobAlgorithmReplacer = strings.NewReplacer(
";", "/", ";", "/",
) )
// // blobPathSpec contains the path for the registry global blob store. // blobsPathSpec contains the path for the blobs directory
// type blobPathSpec struct { type blobsPathSpec struct{}
// digest digest.Digest
// }
// func (blobPathSpec) pathSpec() {} func (blobsPathSpec) pathSpec() {}
// blobPathSpec contains the path for the registry global blob store.
type blobPathSpec struct {
digest digest.Digest
}
func (blobPathSpec) pathSpec() {}
// blobDataPathSpec contains the path for the registry global blob store. For // blobDataPathSpec contains the path for the registry global blob store. For
// now, this contains layer data, exclusively. // now, this contains layer data, exclusively.
@ -491,3 +520,23 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error)
return append(prefix, suffix...), nil return append(prefix, suffix...), nil
} }
// Reconstructs a digest from a path
func digestFromPath(digestPath string) (digest.Digest, error) {
digestPath = strings.TrimSuffix(digestPath, "/data")
dir, hex := path.Split(digestPath)
dir = path.Dir(dir)
dir, next := path.Split(dir)
// next is either the algorithm OR the first two characters in the hex string
var algo string
if next == hex[:2] {
algo = path.Base(dir)
} else {
algo = next
}
dgst := digest.NewDigestFromHex(algo, hex)
return dgst, dgst.Validate()
}

View file

@ -2,6 +2,8 @@ package storage
import ( import (
"testing" "testing"
"github.com/docker/distribution/digest"
) )
func TestPathMapper(t *testing.T) { func TestPathMapper(t *testing.T) {
@ -120,3 +122,29 @@ func TestPathMapper(t *testing.T) {
} }
} }
func TestDigestFromPath(t *testing.T) {
for _, testcase := range []struct {
path string
expected digest.Digest
multilevel bool
err error
}{
{
path: "/docker/registry/v2/blobs/sha256/99/9943fffae777400c0344c58869c4c2619c329ca3ad4df540feda74d291dd7c86/data",
multilevel: true,
expected: "sha256:9943fffae777400c0344c58869c4c2619c329ca3ad4df540feda74d291dd7c86",
err: nil,
},
} {
result, err := digestFromPath(testcase.path)
if err != testcase.err {
t.Fatalf("Unexpected error value %v when we wanted %v", err, testcase.err)
}
if result != testcase.expected {
t.Fatalf("Unexpected result value %v when we wanted %v", result, testcase.expected)
}
}
}

View file

@ -147,6 +147,14 @@ func (reg *registry) Repository(ctx context.Context, canonicalName reference.Nam
}, nil }, nil
} }
func (reg *registry) Blobs() distribution.BlobEnumerator {
return reg.blobStore
}
func (reg *registry) BlobStatter() distribution.BlobStatter {
return reg.statter
}
// repository provides name-scoped access to various services. // repository provides name-scoped access to various services.
type repository struct { type repository struct {
*registry *registry
@ -180,6 +188,8 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M
blobLinkPath, blobLinkPath,
} }
manifestDirectoryPathSpec := manifestRevisionsPathSpec{name: repo.name.Name()}
blobStore := &linkedBlobStore{ blobStore := &linkedBlobStore{
ctx: ctx, ctx: ctx,
blobStore: repo.blobStore, blobStore: repo.blobStore,
@ -193,7 +203,8 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M
// TODO(stevvooe): linkPath limits this blob store to only // TODO(stevvooe): linkPath limits this blob store to only
// manifests. This instance cannot be used for blob checks. // manifests. This instance cannot be used for blob checks.
linkPathFns: manifestLinkPathFns, linkPathFns: manifestLinkPathFns,
linkDirectoryPathSpec: manifestDirectoryPathSpec,
} }
ms := &manifestStore{ ms := &manifestStore{

View file

@ -34,11 +34,13 @@ func (v Vacuum) RemoveBlob(dgst string) error {
return err return err
} }
blobPath, err := pathFor(blobDataPathSpec{digest: d}) blobPath, err := pathFor(blobPathSpec{digest: d})
if err != nil { if err != nil {
return err return err
} }
context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath)
err = v.driver.Delete(v.ctx, blobPath) err = v.driver.Delete(v.ctx, blobPath)
if err != nil { if err != nil {
return err return err