Remove pathMapper object

The use of the pathMapper is no longer needed the way we have organized the
code base. The extra level of indirection has proved unnecessary and confusing
so we've opted to clean it up. In the future, we may require more flexibility,
but now it is simply not required.

Signed-off-by: Stephen J Day <stephen.day@docker.com>
This commit is contained in:
Stephen J Day 2015-08-17 18:51:05 -07:00
parent 8133b04efa
commit 641cdf3ba6
15 changed files with 93 additions and 91 deletions

View file

@ -13,7 +13,6 @@ import (
// creating and traversing backend links.
type blobStore struct {
driver driver.StorageDriver
pm *pathMapper
statter distribution.BlobStatter
}
@ -94,7 +93,7 @@ func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distr
// path returns the canonical path for the blob identified by digest. The blob
// may or may not exist.
func (bs *blobStore) path(dgst digest.Digest) (string, error) {
bp, err := bs.pm.path(blobDataPathSpec{
bp, err := pathFor(blobDataPathSpec{
digest: dgst,
})
@ -140,7 +139,6 @@ func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) {
type blobStatter struct {
driver driver.StorageDriver
pm *pathMapper
}
var _ distribution.BlobDescriptorService = &blobStatter{}
@ -149,9 +147,10 @@ var _ distribution.BlobDescriptorService = &blobStatter{}
// in the main blob store. If this method returns successfully, there is
// strong guarantee that the blob exists and is available.
func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
path, err := bs.pm.path(blobDataPathSpec{
path, err := pathFor(blobDataPathSpec{
digest: dgst,
})
if err != nil {
return distribution.Descriptor{}, err
}

View file

@ -266,7 +266,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri
// identified by dgst. The layer should be validated before commencing the
// move.
func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error {
blobPath, err := bw.blobStore.pm.path(blobDataPathSpec{
blobPath, err := pathFor(blobDataPathSpec{
digest: desc.Digest,
})
@ -324,7 +324,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor
// instance. An error will be returned if the clean up cannot proceed. If the
// resources are already not present, no error will be returned.
func (bw *blobWriter) removeResources(ctx context.Context) error {
dataPath, err := bw.blobStore.pm.path(uploadDataPathSpec{
dataPath, err := pathFor(uploadDataPathSpec{
name: bw.blobStore.repository.Name(),
id: bw.id,
})

View file

@ -111,12 +111,13 @@ type hashStateEntry struct {
// getStoredHashStates returns a slice of hashStateEntries for this upload.
func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) {
uploadHashStatePathPrefix, err := bw.blobStore.pm.path(uploadHashStatePathSpec{
uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{
name: bw.blobStore.repository.Name(),
id: bw.id,
alg: bw.digester.Digest().Algorithm(),
list: true,
})
if err != nil {
return nil, err
}
@ -156,12 +157,13 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error {
return errResumableDigestNotAvailable
}
uploadHashStatePath, err := bw.blobStore.pm.path(uploadHashStatePathSpec{
uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{
name: bw.blobStore.repository.Name(),
id: bw.id,
alg: bw.digester.Digest().Algorithm(),
offset: int64(h.Len()),
})
if err != nil {
return err
}

View file

@ -22,7 +22,7 @@ func (reg *registry) Repositories(ctx context.Context, repos []string, last stri
return 0, errors.New("no space in slice")
}
root, err := defaultPathMapper.path(repositoriesRootPathSpec{})
root, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return 0, err
}

View file

@ -23,7 +23,7 @@ func setupFS(t *testing.T) *setupEnv {
c := []byte("")
ctx := context.Background()
registry := NewRegistryWithDriver(ctx, d, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false)
rootpath, _ := defaultPathMapper.path(repositoriesRootPathSpec{})
rootpath, _ := pathFor(repositoriesRootPathSpec{})
repos := []string{
"/foo/a/_layers/1",

View file

@ -13,7 +13,7 @@ import (
// linkPathFunc describes a function that can resolve a link based on the
// repository name and digest.
type linkPathFunc func(pm *pathMapper, name string, dgst digest.Digest) (string, error)
type linkPathFunc func(name string, dgst digest.Digest) (string, error)
// linkedBlobStore provides a full BlobService that namespaces the blobs to a
// given repository. Effectively, it manages the links in a given repository
@ -104,7 +104,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter
uuid := uuid.Generate().String()
startedAt := time.Now().UTC()
path, err := lbs.blobStore.pm.path(uploadDataPathSpec{
path, err := pathFor(uploadDataPathSpec{
name: lbs.repository.Name(),
id: uuid,
})
@ -113,7 +113,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter
return nil, err
}
startedAtPath, err := lbs.blobStore.pm.path(uploadStartedAtPathSpec{
startedAtPath, err := pathFor(uploadStartedAtPathSpec{
name: lbs.repository.Name(),
id: uuid,
})
@ -133,7 +133,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter
func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume")
startedAtPath, err := lbs.blobStore.pm.path(uploadStartedAtPathSpec{
startedAtPath, err := pathFor(uploadStartedAtPathSpec{
name: lbs.repository.Name(),
id: id,
})
@ -157,7 +157,7 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution
return nil, err
}
path, err := lbs.pm.path(uploadDataPathSpec{
path, err := pathFor(uploadDataPathSpec{
name: lbs.repository.Name(),
id: id,
})
@ -228,7 +228,7 @@ func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution
}
seenDigests[dgst] = struct{}{}
blobLinkPath, err := linkPathFn(lbs.pm, lbs.repository.Name(), dgst)
blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst)
if err != nil {
return err
}
@ -298,7 +298,7 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis
func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) {
// clear any possible existence of a link described in linkPathFns
for _, linkPathFn := range lbs.linkPathFns {
blobLinkPath, err := linkPathFn(lbs.pm, lbs.repository.Name(), dgst)
blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst)
if err != nil {
return err
}
@ -321,7 +321,7 @@ func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (er
// linkPathFuncs to let us try a few different paths before returning not
// found.
func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) {
blobLinkPath, err := linkPathFn(lbs.pm, lbs.repository.Name(), dgst)
blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst)
if err != nil {
return "", err
}
@ -335,11 +335,11 @@ func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Dig
}
// blobLinkPath provides the path to the blob link, also known as layers.
func blobLinkPath(pm *pathMapper, name string, dgst digest.Digest) (string, error) {
return pm.path(layerLinkPathSpec{name: name, digest: dgst})
func blobLinkPath(name string, dgst digest.Digest) (string, error) {
return pathFor(layerLinkPathSpec{name: name, digest: dgst})
}
// manifestRevisionLinkPath provides the path to the manifest revision link.
func manifestRevisionLinkPath(pm *pathMapper, name string, dgst digest.Digest) (string, error) {
return pm.path(manifestRevisionLinkPathSpec{name: name, revision: dgst})
func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) {
return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst})
}

View file

@ -385,7 +385,7 @@ func TestLinkPathFuncs(t *testing.T) {
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf/link",
},
} {
p, err := testcase.linkPathFn(defaultPathMapper, testcase.repo, testcase.digest)
p, err := testcase.linkPathFn(testcase.repo, testcase.digest)
if err != nil {
t.Fatalf("unexpected error calling linkPathFn(pm, %q, %q): %v", testcase.repo, testcase.digest, err)
}

View file

@ -8,10 +8,18 @@ import (
"github.com/docker/distribution/digest"
)
const storagePathVersion = "v2"
const (
storagePathVersion = "v2" // fixed storage layout version
storagePathRoot = "/docker/registry/" // all driver paths have a prefix
// pathMapper maps paths based on "object names" and their ids. The "object
// names" mapped by pathMapper are internal to the storage system.
// TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though
// storage path root would configurable for all drivers through this
// package. In reality, we've found it simpler to do this on a per driver
// basis.
)
// pathFor maps paths based on "object names" and their ids. The "object
// names" mapped by are internal to the storage system.
//
// The path layout in the storage backend is roughly as follows:
//
@ -98,18 +106,7 @@ const storagePathVersion = "v2"
//
// For more information on the semantic meaning of each path and their
// contents, please see the path spec documentation.
type pathMapper struct {
root string
version string // should be a constant?
}
var defaultPathMapper = &pathMapper{
root: "/docker/registry/",
version: storagePathVersion,
}
// path returns the path identified by spec.
func (pm *pathMapper) path(spec pathSpec) (string, error) {
func pathFor(spec pathSpec) (string, error) {
// Switch on the path object type and return the appropriate path. At
// first glance, one may wonder why we don't use an interface to
@ -123,7 +120,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) {
// to an intermediate path object, than can be consumed and mapped by the
// other version.
rootPrefix := []string{pm.root, pm.version}
rootPrefix := []string{storagePathRoot, storagePathVersion}
repoPrefix := append(rootPrefix, "repositories")
switch v := spec.(type) {
@ -136,7 +133,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) {
return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil
case manifestRevisionLinkPathSpec:
root, err := pm.path(manifestRevisionPathSpec{
root, err := pathFor(manifestRevisionPathSpec{
name: v.name,
revision: v.revision,
})
@ -147,7 +144,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) {
return path.Join(root, "link"), nil
case manifestSignaturesPathSpec:
root, err := pm.path(manifestRevisionPathSpec{
root, err := pathFor(manifestRevisionPathSpec{
name: v.name,
revision: v.revision,
})
@ -158,10 +155,11 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) {
return path.Join(root, "signatures"), nil
case manifestSignatureLinkPathSpec:
root, err := pm.path(manifestSignaturesPathSpec{
root, err := pathFor(manifestSignaturesPathSpec{
name: v.name,
revision: v.revision,
})
if err != nil {
return "", err
}
@ -175,50 +173,55 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) {
case manifestTagsPathSpec:
return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil
case manifestTagPathSpec:
root, err := pm.path(manifestTagsPathSpec{
root, err := pathFor(manifestTagsPathSpec{
name: v.name,
})
if err != nil {
return "", err
}
return path.Join(root, v.tag), nil
case manifestTagCurrentPathSpec:
root, err := pm.path(manifestTagPathSpec{
root, err := pathFor(manifestTagPathSpec{
name: v.name,
tag: v.tag,
})
if err != nil {
return "", err
}
return path.Join(root, "current", "link"), nil
case manifestTagIndexPathSpec:
root, err := pm.path(manifestTagPathSpec{
root, err := pathFor(manifestTagPathSpec{
name: v.name,
tag: v.tag,
})
if err != nil {
return "", err
}
return path.Join(root, "index"), nil
case manifestTagIndexEntryLinkPathSpec:
root, err := pm.path(manifestTagIndexEntryPathSpec{
root, err := pathFor(manifestTagIndexEntryPathSpec{
name: v.name,
tag: v.tag,
revision: v.revision,
})
if err != nil {
return "", err
}
return path.Join(root, "link"), nil
case manifestTagIndexEntryPathSpec:
root, err := pm.path(manifestTagIndexPathSpec{
root, err := pathFor(manifestTagIndexPathSpec{
name: v.name,
tag: v.tag,
})
if err != nil {
return "", err
}

View file

@ -7,10 +7,6 @@ import (
)
func TestPathMapper(t *testing.T) {
pm := &pathMapper{
root: "/pathmapper-test",
}
for _, testcase := range []struct {
spec pathSpec
expected string
@ -21,14 +17,14 @@ func TestPathMapper(t *testing.T) {
name: "foo/bar",
revision: "sha256:abcdef0123456789",
},
expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789",
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789",
},
{
spec: manifestRevisionLinkPathSpec{
name: "foo/bar",
revision: "sha256:abcdef0123456789",
},
expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/link",
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/link",
},
{
spec: manifestSignatureLinkPathSpec{
@ -36,41 +32,41 @@ func TestPathMapper(t *testing.T) {
revision: "sha256:abcdef0123456789",
signature: "sha256:abcdef0123456789",
},
expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures/sha256/abcdef0123456789/link",
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures/sha256/abcdef0123456789/link",
},
{
spec: manifestSignaturesPathSpec{
name: "foo/bar",
revision: "sha256:abcdef0123456789",
},
expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures",
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures",
},
{
spec: manifestTagsPathSpec{
name: "foo/bar",
},
expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags",
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags",
},
{
spec: manifestTagPathSpec{
name: "foo/bar",
tag: "thetag",
},
expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag",
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag",
},
{
spec: manifestTagCurrentPathSpec{
name: "foo/bar",
tag: "thetag",
},
expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/current/link",
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/current/link",
},
{
spec: manifestTagIndexPathSpec{
name: "foo/bar",
tag: "thetag",
},
expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index",
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index",
},
{
spec: manifestTagIndexEntryPathSpec{
@ -78,7 +74,7 @@ func TestPathMapper(t *testing.T) {
tag: "thetag",
revision: "sha256:abcdef0123456789",
},
expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789",
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789",
},
{
spec: manifestTagIndexEntryLinkPathSpec{
@ -86,26 +82,26 @@ func TestPathMapper(t *testing.T) {
tag: "thetag",
revision: "sha256:abcdef0123456789",
},
expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789/link",
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789/link",
},
{
spec: layerLinkPathSpec{
name: "foo/bar",
digest: "tarsum.v1+test:abcdef",
},
expected: "/pathmapper-test/repositories/foo/bar/_layers/tarsum/v1/test/abcdef/link",
expected: "/docker/registry/v2/repositories/foo/bar/_layers/tarsum/v1/test/abcdef/link",
},
{
spec: blobDataPathSpec{
digest: digest.Digest("tarsum.dev+sha512:abcdefabcdefabcdef908909909"),
},
expected: "/pathmapper-test/blobs/tarsum/dev/sha512/ab/abcdefabcdefabcdef908909909/data",
expected: "/docker/registry/v2/blobs/tarsum/dev/sha512/ab/abcdefabcdefabcdef908909909/data",
},
{
spec: blobDataPathSpec{
digest: digest.Digest("tarsum.v1+sha256:abcdefabcdefabcdef908909909"),
},
expected: "/pathmapper-test/blobs/tarsum/v1/sha256/ab/abcdefabcdefabcdef908909909/data",
expected: "/docker/registry/v2/blobs/tarsum/v1/sha256/ab/abcdefabcdefabcdef908909909/data",
},
{
@ -113,17 +109,17 @@ func TestPathMapper(t *testing.T) {
name: "foo/bar",
id: "asdf-asdf-asdf-adsf",
},
expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data",
expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data",
},
{
spec: uploadStartedAtPathSpec{
name: "foo/bar",
id: "asdf-asdf-asdf-adsf",
},
expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat",
expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat",
},
} {
p, err := pm.path(testcase.spec)
p, err := pathFor(testcase.spec)
if err != nil {
t.Fatalf("unexpected generating path (%T): %v", testcase.spec, err)
}
@ -136,9 +132,10 @@ func TestPathMapper(t *testing.T) {
// Add a few test cases to ensure we cover some errors
// Specify a path that requires a revision and get a digest validation error.
badpath, err := pm.path(manifestSignaturesPathSpec{
badpath, err := pathFor(manifestSignaturesPathSpec{
name: "foo/bar",
})
if err == nil {
t.Fatalf("expected an error when mapping an invalid revision: %s", badpath)
}

View file

@ -62,10 +62,11 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv
uploads := make(map[string]uploadData, 0)
inUploadDir := false
root, err := defaultPathMapper.path(repositoriesRootPathSpec{})
root, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return uploads, append(errors, err)
}
err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error {
filePath := fileInfo.Path()
_, file := path.Split(filePath)

View file

@ -12,8 +12,6 @@ import (
"github.com/docker/distribution/uuid"
)
var pm = defaultPathMapper
func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) (driver.StorageDriver, context.Context) {
d := inmemory.New()
ctx := context.Background()
@ -24,7 +22,7 @@ func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.
}
func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) {
dataPath, err := pm.path(uploadDataPathSpec{name: repo, id: uploadID})
dataPath, err := pathFor(uploadDataPathSpec{name: repo, id: uploadID})
if err != nil {
t.Fatalf("Unable to resolve path")
}
@ -32,7 +30,7 @@ func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploa
t.Fatalf("Unable to write data file")
}
startedAtPath, err := pm.path(uploadStartedAtPathSpec{name: repo, id: uploadID})
startedAtPath, err := pathFor(uploadStartedAtPathSpec{name: repo, id: uploadID})
if err != nil {
t.Fatalf("Unable to resolve path")
}
@ -115,7 +113,7 @@ func TestPurgeOnlyUploads(t *testing.T) {
// Create a directory tree outside _uploads and ensure
// these files aren't deleted.
dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()})
dataPath, err := pathFor(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()})
if err != nil {
t.Fatalf(err.Error())
}

View file

@ -30,7 +30,6 @@ func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriv
// create global statter, with cache.
var statter distribution.BlobDescriptorService = &blobStatter{
driver: driver,
pm: defaultPathMapper,
}
if blobDescriptorCacheProvider != nil {
@ -39,7 +38,6 @@ func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriv
bs := &blobStore{
driver: driver,
pm: defaultPathMapper,
statter: statter,
}

View file

@ -26,7 +26,7 @@ func newSignatureStore(ctx context.Context, repo *repository, blobStore *blobSto
var _ distribution.SignatureService = &signatureStore{}
func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) {
signaturesPath, err := s.blobStore.pm.path(manifestSignaturesPathSpec{
signaturesPath, err := pathFor(manifestSignaturesPathSpec{
name: s.repository.Name(),
revision: dgst,
})
@ -119,12 +119,13 @@ func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error {
// manifest with the given digest. Effectively, each signature link path
// layout is a unique linked blob store.
func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Digest) *linkedBlobStore {
linkpath := func(pm *pathMapper, name string, dgst digest.Digest) (string, error) {
return pm.path(manifestSignatureLinkPathSpec{
linkpath := func(name string, dgst digest.Digest) (string, error) {
return pathFor(manifestSignatureLinkPathSpec{
name: name,
revision: revision,
signature: dgst,
})
}
return &linkedBlobStore{

View file

@ -18,9 +18,10 @@ type tagStore struct {
// tags lists the manifest tags for the specified repository.
func (ts *tagStore) tags() ([]string, error) {
p, err := ts.blobStore.pm.path(manifestTagPathSpec{
p, err := pathFor(manifestTagPathSpec{
name: ts.repository.Name(),
})
if err != nil {
return nil, err
}
@ -47,10 +48,11 @@ func (ts *tagStore) tags() ([]string, error) {
// exists returns true if the specified manifest tag exists in the repository.
func (ts *tagStore) exists(tag string) (bool, error) {
tagPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{
tagPath, err := pathFor(manifestTagCurrentPathSpec{
name: ts.repository.Name(),
tag: tag,
})
if err != nil {
return false, err
}
@ -66,7 +68,7 @@ func (ts *tagStore) exists(tag string) (bool, error) {
// tag tags the digest with the given tag, updating the the store to point at
// the current tag. The digest must point to a manifest.
func (ts *tagStore) tag(tag string, revision digest.Digest) error {
currentPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{
currentPath, err := pathFor(manifestTagCurrentPathSpec{
name: ts.repository.Name(),
tag: tag,
})
@ -87,10 +89,11 @@ func (ts *tagStore) tag(tag string, revision digest.Digest) error {
// resolve the current revision for name and tag.
func (ts *tagStore) resolve(tag string) (digest.Digest, error) {
currentPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{
currentPath, err := pathFor(manifestTagCurrentPathSpec{
name: ts.repository.Name(),
tag: tag,
})
if err != nil {
return "", err
}
@ -111,10 +114,11 @@ func (ts *tagStore) resolve(tag string) (digest.Digest, error) {
// delete removes the tag from repository, including the history of all
// revisions that have the specified tag.
func (ts *tagStore) delete(tag string) error {
tagPath, err := ts.blobStore.pm.path(manifestTagPathSpec{
tagPath, err := pathFor(manifestTagPathSpec{
name: ts.repository.Name(),
tag: tag,
})
if err != nil {
return err
}
@ -131,12 +135,13 @@ func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlob
blobStore: ts.blobStore,
repository: ts.repository,
ctx: ctx,
linkPathFns: []linkPathFunc{func(pm *pathMapper, name string, dgst digest.Digest) (string, error) {
return pm.path(manifestTagIndexEntryLinkPathSpec{
linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) {
return pathFor(manifestTagIndexEntryLinkPathSpec{
name: name,
tag: tag,
revision: dgst,
})
}},
}
}

View file

@ -18,13 +18,11 @@ func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum {
return Vacuum{
ctx: ctx,
driver: driver,
pm: defaultPathMapper,
}
}
// Vacuum removes content from the filesystem
type Vacuum struct {
pm *pathMapper
driver driver.StorageDriver
ctx context.Context
}
@ -36,7 +34,7 @@ func (v Vacuum) RemoveBlob(dgst string) error {
return err
}
blobPath, err := v.pm.path(blobDataPathSpec{digest: d})
blobPath, err := pathFor(blobDataPathSpec{digest: d})
if err != nil {
return err
}
@ -52,7 +50,7 @@ func (v Vacuum) RemoveBlob(dgst string) error {
// RemoveRepository removes a repository directory from the
// filesystem
func (v Vacuum) RemoveRepository(repoName string) error {
rootForRepository, err := v.pm.path(repositoriesRootPathSpec{})
rootForRepository, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return err
}