forked from TrueCloudLab/distribution
Merge pull request #64 from stevvooe/separate-signature-storage
Refactor backend storage layout to meet new requirements (addresses #25, #46)
This commit is contained in:
commit
e5f0622a14
14 changed files with 1148 additions and 258 deletions
|
@ -36,6 +36,11 @@ func NewDigest(alg string, h hash.Hash) Digest {
|
|||
return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil)))
|
||||
}
|
||||
|
||||
// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
|
||||
func NewDigestFromHex(alg, hex string) Digest {
|
||||
return Digest(fmt.Sprintf("%s:%s", alg, hex))
|
||||
}
|
||||
|
||||
// DigestRegexp matches valid digest types.
|
||||
var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-zA-Z0-9-_+.=]+`)
|
||||
|
||||
|
@ -57,33 +62,24 @@ func ParseDigest(s string) (Digest, error) {
|
|||
|
||||
// FromReader returns the most valid digest for the underlying content.
|
||||
func FromReader(rd io.Reader) (Digest, error) {
|
||||
|
||||
// TODO(stevvooe): This is pretty inefficient to always be calculating a
|
||||
// sha256 hash to provide fallback, but it provides some nice semantics in
|
||||
// that we never worry about getting the right digest for a given reader.
|
||||
// For the most part, we can detect tar vs non-tar with only a few bytes,
|
||||
// so a scheme that saves those bytes would probably be better here.
|
||||
|
||||
h := sha256.New()
|
||||
tr := io.TeeReader(rd, h)
|
||||
|
||||
ts, err := tarsum.NewTarSum(tr, true, tarsum.Version1)
|
||||
if _, err := io.Copy(h, rd); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return NewDigest("sha256", h), nil
|
||||
}
|
||||
|
||||
// FromTarArchive produces a tarsum digest from reader rd.
|
||||
func FromTarArchive(rd io.Reader) (Digest, error) {
|
||||
ts, err := tarsum.NewTarSum(rd, true, tarsum.Version1)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Try to copy from the tarsum, if we fail, copy the remaining bytes into
|
||||
// hash directly.
|
||||
if _, err := io.Copy(ioutil.Discard, ts); err != nil {
|
||||
if err.Error() != "archive/tar: invalid tar header" {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(h, rd); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return NewDigest("sha256", h), nil
|
||||
return "", err
|
||||
}
|
||||
|
||||
d, err := ParseDigest(ts.Sum(nil))
|
||||
|
|
|
@ -30,7 +30,7 @@ func TestDigestVerifier(t *testing.T) {
|
|||
t.Fatalf("error creating tarfile: %v", err)
|
||||
}
|
||||
|
||||
digest, err = FromReader(tf)
|
||||
digest, err = FromTarArchive(tf)
|
||||
if err != nil {
|
||||
t.Fatalf("error digesting tarsum: %v", err)
|
||||
}
|
||||
|
|
159
storage/blobstore.go
Normal file
159
storage/blobstore.go
Normal file
|
@ -0,0 +1,159 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/storagedriver"
|
||||
)
|
||||
|
||||
// TODO(stevvooe): Currently, the blobStore implementation used by the
|
||||
// manifest store. The layer store should be refactored to better leverage the
|
||||
// blobStore, reducing duplicated code.
|
||||
|
||||
// blobStore implements a generalized blob store over a driver, supporting the
|
||||
// read side and link management. This object is intentionally a leaky
|
||||
// abstraction, providing utility methods that support creating and traversing
|
||||
// backend links.
|
||||
type blobStore struct {
|
||||
driver storagedriver.StorageDriver
|
||||
pm *pathMapper
|
||||
}
|
||||
|
||||
// exists reports whether or not the path exists. If the driver returns error
|
||||
// other than storagedriver.PathNotFound, an error may be returned.
|
||||
func (bs *blobStore) exists(dgst digest.Digest) (bool, error) {
|
||||
path, err := bs.path(dgst)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
ok, err := exists(bs.driver, path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
// get retrieves the blob by digest, returning it a byte slice. This should
|
||||
// only be used for small objects.
|
||||
func (bs *blobStore) get(dgst digest.Digest) ([]byte, error) {
|
||||
bp, err := bs.path(dgst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bs.driver.GetContent(bp)
|
||||
}
|
||||
|
||||
// link links the path to the provided digest by writing the digest into the
|
||||
// target file.
|
||||
func (bs *blobStore) link(path string, dgst digest.Digest) error {
|
||||
if exists, err := bs.exists(dgst); err != nil {
|
||||
return err
|
||||
} else if !exists {
|
||||
return fmt.Errorf("cannot link non-existent blob")
|
||||
}
|
||||
|
||||
// The contents of the "link" file are the exact string contents of the
|
||||
// digest, which is specified in that package.
|
||||
return bs.driver.PutContent(path, []byte(dgst))
|
||||
}
|
||||
|
||||
// linked reads the link at path and returns the content.
|
||||
func (bs *blobStore) linked(path string) ([]byte, error) {
|
||||
linked, err := bs.readlink(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bs.get(linked)
|
||||
}
|
||||
|
||||
// readlink returns the linked digest at path.
|
||||
func (bs *blobStore) readlink(path string) (digest.Digest, error) {
|
||||
content, err := bs.driver.GetContent(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
linked, err := digest.ParseDigest(string(content))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if exists, err := bs.exists(linked); err != nil {
|
||||
return "", err
|
||||
} else if !exists {
|
||||
return "", fmt.Errorf("link %q invalid: blob %s does not exist", path, linked)
|
||||
}
|
||||
|
||||
return linked, nil
|
||||
}
|
||||
|
||||
// resolve reads the digest link at path and returns the blob store link.
|
||||
func (bs *blobStore) resolve(path string) (string, error) {
|
||||
dgst, err := bs.readlink(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return bs.path(dgst)
|
||||
}
|
||||
|
||||
// put stores the content p in the blob store, calculating the digest. If the
|
||||
// content is already present, only the digest will be returned. This should
|
||||
// only be used for small objects, such as manifests.
|
||||
func (bs *blobStore) put(p []byte) (digest.Digest, error) {
|
||||
dgst, err := digest.FromBytes(p)
|
||||
if err != nil {
|
||||
logrus.Errorf("error digesting content: %v, %s", err, string(p))
|
||||
return "", err
|
||||
}
|
||||
|
||||
bp, err := bs.path(dgst)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If the content already exists, just return the digest.
|
||||
if exists, err := bs.exists(dgst); err != nil {
|
||||
return "", err
|
||||
} else if exists {
|
||||
return dgst, nil
|
||||
}
|
||||
|
||||
return dgst, bs.driver.PutContent(bp, p)
|
||||
}
|
||||
|
||||
// path returns the canonical path for the blob identified by digest. The blob
|
||||
// may or may not exist.
|
||||
func (bs *blobStore) path(dgst digest.Digest) (string, error) {
|
||||
bp, err := bs.pm.path(blobDataPathSpec{
|
||||
digest: dgst,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return bp, nil
|
||||
}
|
||||
|
||||
// exists provides a utility method to test whether or not
|
||||
func exists(driver storagedriver.StorageDriver, path string) (bool, error) {
|
||||
if _, err := driver.Stat(path); err != nil {
|
||||
switch err := err.(type) {
|
||||
case storagedriver.PathNotFoundError:
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
|
@ -54,12 +54,17 @@ func (lh *delegateLayerHandler) Resolve(layer Layer) (http.Handler, error) {
|
|||
// urlFor returns a download URL for the given layer, or the empty string if
|
||||
// unsupported.
|
||||
func (lh *delegateLayerHandler) urlFor(layer Layer) (string, error) {
|
||||
blobPath, err := resolveBlobPath(lh.storageDriver, lh.pathMapper, layer.Name(), layer.Digest())
|
||||
if err != nil {
|
||||
return "", err
|
||||
// Crack open the layer to get at the layerStore
|
||||
layerRd, ok := layer.(*layerReader)
|
||||
if !ok {
|
||||
// TODO(stevvooe): We probably want to find a better way to get at the
|
||||
// underlying filesystem path for a given layer. Perhaps, the layer
|
||||
// handler should have its own layer store but right now, it is not
|
||||
// request scoped.
|
||||
return "", fmt.Errorf("unsupported layer type: cannot resolve blob path: %v", layer)
|
||||
}
|
||||
|
||||
layerURL, err := lh.storageDriver.URLFor(blobPath, map[string]interface{}{"expiry": time.Now().Add(lh.duration)})
|
||||
layerURL, err := lh.storageDriver.URLFor(layerRd.path, map[string]interface{}{"expiry": time.Now().Add(lh.duration)})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -31,13 +31,18 @@ func TestSimpleLayerUpload(t *testing.T) {
|
|||
}
|
||||
|
||||
imageName := "foo/bar"
|
||||
|
||||
driver := inmemory.New()
|
||||
pm := &pathMapper{
|
||||
root: "/storage/testing",
|
||||
version: storagePathVersion,
|
||||
}
|
||||
ls := &layerStore{
|
||||
driver: inmemory.New(),
|
||||
pathMapper: &pathMapper{
|
||||
root: "/storage/testing",
|
||||
version: storagePathVersion,
|
||||
driver: driver,
|
||||
blobStore: &blobStore{
|
||||
driver: driver,
|
||||
pm: pm,
|
||||
},
|
||||
pathMapper: pm,
|
||||
}
|
||||
|
||||
h := sha256.New()
|
||||
|
@ -140,12 +145,17 @@ func TestSimpleLayerUpload(t *testing.T) {
|
|||
func TestSimpleLayerRead(t *testing.T) {
|
||||
imageName := "foo/bar"
|
||||
driver := inmemory.New()
|
||||
pm := &pathMapper{
|
||||
root: "/storage/testing",
|
||||
version: storagePathVersion,
|
||||
}
|
||||
ls := &layerStore{
|
||||
driver: driver,
|
||||
pathMapper: &pathMapper{
|
||||
root: "/storage/testing",
|
||||
version: storagePathVersion,
|
||||
blobStore: &blobStore{
|
||||
driver: driver,
|
||||
pm: pm,
|
||||
},
|
||||
pathMapper: pm,
|
||||
}
|
||||
|
||||
randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile()
|
||||
|
@ -307,7 +317,7 @@ func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper,
|
|||
|
||||
blobDigestSHA := digest.NewDigest("sha256", h)
|
||||
|
||||
blobPath, err := pathMapper.path(blobPathSpec{
|
||||
blobPath, err := pathMapper.path(blobDataPathSpec{
|
||||
digest: dgst,
|
||||
})
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
type layerStore struct {
|
||||
driver storagedriver.StorageDriver
|
||||
pathMapper *pathMapper
|
||||
blobStore *blobStore
|
||||
}
|
||||
|
||||
func (ls *layerStore) Exists(name string, digest digest.Digest) (bool, error) {
|
||||
|
@ -31,31 +32,21 @@ func (ls *layerStore) Exists(name string, digest digest.Digest) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (ls *layerStore) Fetch(name string, digest digest.Digest) (Layer, error) {
|
||||
blobPath, err := resolveBlobPath(ls.driver, ls.pathMapper, name, digest)
|
||||
func (ls *layerStore) Fetch(name string, dgst digest.Digest) (Layer, error) {
|
||||
bp, err := ls.path(name, dgst)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError:
|
||||
return nil, ErrUnknownLayer{manifest.FSLayer{BlobSum: digest}}
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fr, err := newFileReader(ls.driver, blobPath)
|
||||
fr, err := newFileReader(ls.driver, bp)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError:
|
||||
return nil, ErrUnknownLayer{manifest.FSLayer{BlobSum: digest}}
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &layerReader{
|
||||
fileReader: *fr,
|
||||
name: name,
|
||||
digest: digest,
|
||||
digest: dgst,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -151,3 +142,24 @@ func (ls *layerStore) newLayerUpload(name, uuid, path string, startedAt time.Tim
|
|||
fileWriter: *fw,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ls *layerStore) path(name string, dgst digest.Digest) (string, error) {
|
||||
// We must traverse this path through the link to enforce ownership.
|
||||
layerLinkPath, err := ls.pathMapper.path(layerLinkPathSpec{name: name, digest: dgst})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
blobPath, err := ls.blobStore.resolve(layerLinkPath)
|
||||
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case storagedriver.PathNotFoundError:
|
||||
return "", ErrUnknownLayer{manifest.FSLayer{BlobSum: dgst}}
|
||||
default:
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return blobPath, nil
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Dige
|
|||
// sink. Instead, its read driven. This might be okay.
|
||||
|
||||
// Calculate an updated digest with the latest version.
|
||||
canonical, err := digest.FromReader(tr)
|
||||
canonical, err := digest.FromTarArchive(tr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Dige
|
|||
// identified by dgst. The layer should be validated before commencing the
|
||||
// move.
|
||||
func (luc *layerUploadController) moveLayer(dgst digest.Digest) error {
|
||||
blobPath, err := luc.layerStore.pathMapper.path(blobPathSpec{
|
||||
blobPath, err := luc.layerStore.pathMapper.path(blobDataPathSpec{
|
||||
digest: dgst,
|
||||
})
|
||||
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/storagedriver"
|
||||
"github.com/docker/libtrust"
|
||||
|
@ -32,6 +31,17 @@ func (err ErrUnknownManifest) Error() string {
|
|||
return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag)
|
||||
}
|
||||
|
||||
// ErrUnknownManifestRevision is returned when a manifest cannot be found by
|
||||
// revision within a repository.
|
||||
type ErrUnknownManifestRevision struct {
|
||||
Name string
|
||||
Revision digest.Digest
|
||||
}
|
||||
|
||||
func (err ErrUnknownManifestRevision) Error() string {
|
||||
return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision)
|
||||
}
|
||||
|
||||
// ErrManifestUnverified is returned when the registry is unable to verify
|
||||
// the manifest.
|
||||
type ErrManifestUnverified struct{}
|
||||
|
@ -55,143 +65,73 @@ func (errs ErrManifestVerification) Error() string {
|
|||
}
|
||||
|
||||
type manifestStore struct {
|
||||
driver storagedriver.StorageDriver
|
||||
pathMapper *pathMapper
|
||||
layerService LayerService
|
||||
driver storagedriver.StorageDriver
|
||||
pathMapper *pathMapper
|
||||
revisionStore *revisionStore
|
||||
tagStore *tagStore
|
||||
blobStore *blobStore
|
||||
layerService LayerService
|
||||
}
|
||||
|
||||
var _ ManifestService = &manifestStore{}
|
||||
|
||||
func (ms *manifestStore) Tags(name string) ([]string, error) {
|
||||
p, err := ms.pathMapper.path(manifestTagsPath{
|
||||
name: name,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tags []string
|
||||
entries, err := ms.driver.List(p)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case storagedriver.PathNotFoundError:
|
||||
return nil, ErrUnknownRepository{Name: name}
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
_, filename := path.Split(entry)
|
||||
|
||||
tags = append(tags, filename)
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
return ms.tagStore.tags(name)
|
||||
}
|
||||
|
||||
func (ms *manifestStore) Exists(name, tag string) (bool, error) {
|
||||
p, err := ms.path(name, tag)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
fi, err := ms.driver.Stat(p)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case storagedriver.PathNotFoundError:
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
return false, fmt.Errorf("unexpected directory at path: %v, name=%s tag=%s", p, name, tag)
|
||||
}
|
||||
|
||||
if fi.Size() == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
return ms.tagStore.exists(name, tag)
|
||||
}
|
||||
|
||||
func (ms *manifestStore) Get(name, tag string) (*manifest.SignedManifest, error) {
|
||||
p, err := ms.path(name, tag)
|
||||
dgst, err := ms.tagStore.resolve(name, tag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
content, err := ms.driver.GetContent(p)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError:
|
||||
return nil, ErrUnknownManifest{Name: name, Tag: tag}
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var manifest manifest.SignedManifest
|
||||
|
||||
if err := json.Unmarshal(content, &manifest); err != nil {
|
||||
// TODO(stevvooe): Corrupted manifest error?
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Verify the manifest here?
|
||||
|
||||
return &manifest, nil
|
||||
return ms.revisionStore.get(name, dgst)
|
||||
}
|
||||
|
||||
func (ms *manifestStore) Put(name, tag string, manifest *manifest.SignedManifest) error {
|
||||
p, err := ms.path(name, tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify the manifest.
|
||||
if err := ms.verifyManifest(name, tag, manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Should we get old manifest first? Perhaps, write, then
|
||||
// move to ensure a valid manifest?
|
||||
|
||||
return ms.driver.PutContent(p, manifest.Raw)
|
||||
}
|
||||
|
||||
func (ms *manifestStore) Delete(name, tag string) error {
|
||||
p, err := ms.path(name, tag)
|
||||
// Store the revision of the manifest
|
||||
revision, err := ms.revisionStore.put(name, manifest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ms.driver.Delete(p); err != nil {
|
||||
switch err := err.(type) {
|
||||
case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError:
|
||||
return ErrUnknownManifest{Name: name, Tag: tag}
|
||||
default:
|
||||
// Now, tag the manifest
|
||||
return ms.tagStore.tag(name, tag, revision)
|
||||
}
|
||||
|
||||
// Delete removes all revisions of the given tag. We may want to change these
|
||||
// semantics in the future, but this will maintain consistency. The underlying
|
||||
// blobs are left alone.
|
||||
func (ms *manifestStore) Delete(name, tag string) error {
|
||||
revisions, err := ms.tagStore.revisions(name, tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, revision := range revisions {
|
||||
if err := ms.revisionStore.delete(name, revision); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *manifestStore) path(name, tag string) (string, error) {
|
||||
return ms.pathMapper.path(manifestPathSpec{
|
||||
name: name,
|
||||
tag: tag,
|
||||
})
|
||||
return ms.tagStore.delete(name, tag)
|
||||
}
|
||||
|
||||
// verifyManifest ensures that the manifest content is valid from the
|
||||
// perspective of the registry. It ensures that the name and tag match and
|
||||
// that the signature is valid for the enclosed payload. As a policy, the
|
||||
// registry only tries to store valid content, leaving trust policies of that
|
||||
// content up to consumers.
|
||||
func (ms *manifestStore) verifyManifest(name, tag string, mnfst *manifest.SignedManifest) error {
|
||||
// TODO(stevvooe): This verification is present here, but this needs to be
|
||||
// lifted out of the storage infrastructure and moved into a package
|
||||
// oriented towards defining verifiers and reporting them with
|
||||
// granularity.
|
||||
|
||||
var errs ErrManifestVerification
|
||||
if mnfst.Name != name {
|
||||
// TODO(stevvooe): This needs to be an exported error
|
||||
|
@ -203,10 +143,6 @@ func (ms *manifestStore) verifyManifest(name, tag string, mnfst *manifest.Signed
|
|||
errs = append(errs, fmt.Errorf("tag does not match manifest tag"))
|
||||
}
|
||||
|
||||
// TODO(stevvooe): These pubkeys need to be checked with either Verify or
|
||||
// VerifyWithChains. We need to define the exact source of the CA.
|
||||
// Perhaps, its a configuration value injected into manifest store.
|
||||
|
||||
if _, err := manifest.Verify(mnfst); err != nil {
|
||||
switch err {
|
||||
case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey:
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
|
@ -12,12 +13,28 @@ import (
|
|||
|
||||
func TestManifestStorage(t *testing.T) {
|
||||
driver := inmemory.New()
|
||||
ms := &manifestStore{
|
||||
pm := pathMapper{
|
||||
root: "/storage/testing",
|
||||
version: storagePathVersion,
|
||||
}
|
||||
bs := blobStore{
|
||||
driver: driver,
|
||||
pathMapper: &pathMapper{
|
||||
root: "/storage/testing",
|
||||
version: storagePathVersion,
|
||||
pm: &pm,
|
||||
}
|
||||
ms := &manifestStore{
|
||||
driver: driver,
|
||||
pathMapper: &pm,
|
||||
revisionStore: &revisionStore{
|
||||
driver: driver,
|
||||
pathMapper: &pm,
|
||||
blobStore: &bs,
|
||||
},
|
||||
tagStore: &tagStore{
|
||||
driver: driver,
|
||||
pathMapper: &pm,
|
||||
blobStore: &bs,
|
||||
},
|
||||
blobStore: &bs,
|
||||
layerService: newMockedLayerService(),
|
||||
}
|
||||
|
||||
|
@ -100,6 +117,25 @@ func TestManifestStorage(t *testing.T) {
|
|||
t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest, sm)
|
||||
}
|
||||
|
||||
fetchedJWS, err := libtrust.ParsePrettySignature(fetchedManifest.Raw, "signatures")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error parsing jws: %v", err)
|
||||
}
|
||||
|
||||
payload, err := fetchedJWS.Payload()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error extracting payload: %v", err)
|
||||
}
|
||||
|
||||
sigs, err := fetchedJWS.Signatures()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to extract signatures: %v", err)
|
||||
}
|
||||
|
||||
if len(sigs) != 1 {
|
||||
t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1)
|
||||
}
|
||||
|
||||
// Grabs the tags and check that this tagged manifest is present
|
||||
tags, err := ms.Tags(name)
|
||||
if err != nil {
|
||||
|
@ -113,6 +149,84 @@ func TestManifestStorage(t *testing.T) {
|
|||
if tags[0] != tag {
|
||||
t.Fatalf("unexpected tag found in tags: %v != %v", tags, []string{tag})
|
||||
}
|
||||
|
||||
// Now, push the same manifest with a different key
|
||||
pk2, err := libtrust.GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error generating private key: %v", err)
|
||||
}
|
||||
|
||||
sm2, err := manifest.Sign(&m, pk2)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error signing manifest: %v", err)
|
||||
}
|
||||
|
||||
jws2, err := libtrust.ParsePrettySignature(sm2.Raw, "signatures")
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing signature: %v", err)
|
||||
}
|
||||
|
||||
sigs2, err := jws2.Signatures()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to extract signatures: %v", err)
|
||||
}
|
||||
|
||||
if len(sigs2) != 1 {
|
||||
t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1)
|
||||
}
|
||||
|
||||
if err = ms.Put(name, tag, sm2); err != nil {
|
||||
t.Fatalf("unexpected error putting manifest: %v", err)
|
||||
}
|
||||
|
||||
fetched, err := ms.Get(name, tag)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error fetching manifest: %v", err)
|
||||
}
|
||||
|
||||
if _, err := manifest.Verify(fetched); err != nil {
|
||||
t.Fatalf("unexpected error verifying manifest: %v", err)
|
||||
}
|
||||
|
||||
// Assemble our payload and two signatures to get what we expect!
|
||||
expectedJWS, err := libtrust.NewJSONSignature(payload, sigs[0], sigs2[0])
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error merging jws: %v", err)
|
||||
}
|
||||
|
||||
expectedSigs, err := expectedJWS.Signatures()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting expected signatures: %v", err)
|
||||
}
|
||||
|
||||
receivedJWS, err := libtrust.ParsePrettySignature(fetched.Raw, "signatures")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error parsing jws: %v", err)
|
||||
}
|
||||
|
||||
receivedPayload, err := receivedJWS.Payload()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error extracting received payload: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(receivedPayload, payload) {
|
||||
t.Fatalf("payloads are not equal")
|
||||
}
|
||||
|
||||
receivedSigs, err := receivedJWS.Signatures()
|
||||
if err != nil {
|
||||
t.Fatalf("error getting signatures: %v", err)
|
||||
}
|
||||
|
||||
for i, sig := range receivedSigs {
|
||||
if !bytes.Equal(sig, expectedSigs[i]) {
|
||||
t.Fatalf("mismatched signatures from remote: %v != %v", string(sig), string(expectedSigs[i]))
|
||||
}
|
||||
}
|
||||
|
||||
if err := ms.Delete(name, tag); err != nil {
|
||||
t.Fatalf("unexpected error deleting manifest: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
type layerKey struct {
|
||||
|
|
343
storage/paths.go
343
storage/paths.go
|
@ -6,7 +6,6 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/storagedriver"
|
||||
)
|
||||
|
||||
const storagePathVersion = "v2"
|
||||
|
@ -14,13 +13,21 @@ const storagePathVersion = "v2"
|
|||
// pathMapper maps paths based on "object names" and their ids. The "object
|
||||
// names" mapped by pathMapper are internal to the storage system.
|
||||
//
|
||||
// The path layout in the storage backend will be roughly as follows:
|
||||
// The path layout in the storage backend is roughly as follows:
|
||||
//
|
||||
// <root>/v2
|
||||
// -> repositories/
|
||||
// -><name>/
|
||||
// -> manifests/
|
||||
// <manifests by tag name>
|
||||
// revisions
|
||||
// -> <manifest digest path>
|
||||
// -> link
|
||||
// -> signatures
|
||||
// <algorithm>/<digest>/link
|
||||
// tags/<tag>
|
||||
// -> current/link
|
||||
// -> index
|
||||
// -> <algorithm>/<hex digest>/link
|
||||
// -> layers/
|
||||
// <layer links to blob store>
|
||||
// -> uploads/<uuid>
|
||||
|
@ -29,20 +36,61 @@ const storagePathVersion = "v2"
|
|||
// -> blob/<algorithm>
|
||||
// <split directory content addressable storage>
|
||||
//
|
||||
// There are few important components to this path layout. First, we have the
|
||||
// repository store identified by name. This contains the image manifests and
|
||||
// a layer store with links to CAS blob ids. Upload coordination data is also
|
||||
// stored here. Outside of the named repo area, we have the the blob store. It
|
||||
// contains the actual layer data and any other data that can be referenced by
|
||||
// a CAS id.
|
||||
// The storage backend layout is broken up into a content- addressable blob
|
||||
// store and repositories. The content-addressable blob store holds most data
|
||||
// throughout the backend, keyed by algorithm and digests of the underlying
|
||||
// content. Access to the blob store is controled through links from the
|
||||
// repository to blobstore.
|
||||
//
|
||||
// A repository is made up of layers, manifests and tags. The layers component
|
||||
// is just a directory of layers which are "linked" into a repository. A layer
|
||||
// can only be accessed through a qualified repository name if it is linked in
|
||||
// the repository. Uploads of layers are managed in the uploads directory,
|
||||
// which is key by upload uuid. When all data for an upload is received, the
|
||||
// data is moved into the blob store and the upload directory is deleted.
|
||||
// Abandoned uploads can be garbage collected by reading the startedat file
|
||||
// and removing uploads that have been active for longer than a certain time.
|
||||
//
|
||||
// The third component of the repository directory is the manifests store,
|
||||
// which is made up of a revision store and tag store. Manifests are stored in
|
||||
// the blob store and linked into the revision store. Signatures are separated
|
||||
// from the manifest payload data and linked into the blob store, as well.
|
||||
// While the registry can save all revisions of a manifest, no relationship is
|
||||
// implied as to the ordering of changes to a manifest. The tag store provides
|
||||
// support for name, tag lookups of manifests, using "current/link" under a
|
||||
// named tag directory. An index is maintained to support deletions of all
|
||||
// revisions of a given manifest tag.
|
||||
//
|
||||
// We cover the path formats implemented by this path mapper below.
|
||||
//
|
||||
// manifestPathSpec: <root>/v2/repositories/<name>/manifests/<tag>
|
||||
// layerLinkPathSpec: <root>/v2/repositories/<name>/layers/tarsum/<tarsum version>/<tarsum hash alg>/<tarsum hash>
|
||||
// blobPathSpec: <root>/v2/blob/<algorithm>/<first two hex bytes of digest>/<hex digest>
|
||||
// uploadDataPathSpec: <root>/v2/repositories/<name>/uploads/<uuid>/data
|
||||
// uploadStartedAtPathSpec: <root>/v2/repositories/<name>/uploads/<uuid>/startedat
|
||||
// Manifests:
|
||||
//
|
||||
// manifestRevisionPathSpec: <root>/v2/repositories/<name>/manifests/revisions/<algorithm>/<hex digest>/
|
||||
// manifestRevisionLinkPathSpec: <root>/v2/repositories/<name>/manifests/revisions/<algorithm>/<hex digest>/link
|
||||
// manifestSignaturesPathSpec: <root>/v2/repositories/<name>/manifests/revisions/<algorithm>/<hex digest>/signatures/
|
||||
// manifestSignatureLinkPathSpec: <root>/v2/repositories/<name>/manifests/revisions/<algorithm>/<hex digest>/signatures/<algorithm>/<hex digest>/link
|
||||
//
|
||||
// Tags:
|
||||
//
|
||||
// manifestTagsPathSpec: <root>/v2/repositories/<name>/manifests/tags/
|
||||
// manifestTagPathSpec: <root>/v2/repositories/<name>/manifests/tags/<tag>/
|
||||
// manifestTagCurrentPathSpec: <root>/v2/repositories/<name>/manifests/tags/<tag>/current/link
|
||||
// manifestTagIndexPathSpec: <root>/v2/repositories/<name>/manifests/tags/<tag>/index/
|
||||
// manifestTagIndexEntryPathSpec: <root>/v2/repositories/<name>/manifests/tags/<tag>/index/<algorithm>/<hex digest>/link
|
||||
//
|
||||
// Layers:
|
||||
//
|
||||
// layerLinkPathSpec: <root>/v2/repositories/<name>/layers/tarsum/<tarsum version>/<tarsum hash alg>/<tarsum hash>/link
|
||||
//
|
||||
// Uploads:
|
||||
//
|
||||
// uploadDataPathSpec: <root>/v2/repositories/<name>/uploads/<uuid>/data
|
||||
// uploadStartedAtPathSpec: <root>/v2/repositories/<name>/uploads/<uuid>/startedat
|
||||
//
|
||||
// Blob Store:
|
||||
//
|
||||
// blobPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>
|
||||
// blobDataPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
|
||||
//
|
||||
// For more information on the semantic meaning of each path and their
|
||||
// contents, please see the path spec documentation.
|
||||
|
@ -75,13 +123,99 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) {
|
|||
repoPrefix := append(rootPrefix, "repositories")
|
||||
|
||||
switch v := spec.(type) {
|
||||
case manifestTagsPath:
|
||||
return path.Join(append(repoPrefix, v.name, "manifests")...), nil
|
||||
case manifestPathSpec:
|
||||
// TODO(sday): May need to store manifest by architecture.
|
||||
return path.Join(append(repoPrefix, v.name, "manifests", v.tag)...), nil
|
||||
|
||||
case manifestRevisionPathSpec:
|
||||
components, err := digestPathComponents(v.revision, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(append(append(repoPrefix, v.name, "manifests", "revisions"), components...)...), nil
|
||||
case manifestRevisionLinkPathSpec:
|
||||
root, err := pm.path(manifestRevisionPathSpec{
|
||||
name: v.name,
|
||||
revision: v.revision,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(root, "link"), nil
|
||||
case manifestSignaturesPathSpec:
|
||||
root, err := pm.path(manifestRevisionPathSpec{
|
||||
name: v.name,
|
||||
revision: v.revision,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(root, "signatures"), nil
|
||||
case manifestSignatureLinkPathSpec:
|
||||
root, err := pm.path(manifestSignaturesPathSpec{
|
||||
name: v.name,
|
||||
revision: v.revision,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
signatureComponents, err := digestPathComponents(v.signature, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(root, path.Join(append(signatureComponents, "link")...)), nil
|
||||
case manifestTagsPathSpec:
|
||||
return path.Join(append(repoPrefix, v.name, "manifests", "tags")...), nil
|
||||
case manifestTagPathSpec:
|
||||
root, err := pm.path(manifestTagsPathSpec{
|
||||
name: v.name,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(root, v.tag), nil
|
||||
case manifestTagCurrentPathSpec:
|
||||
root, err := pm.path(manifestTagPathSpec{
|
||||
name: v.name,
|
||||
tag: v.tag,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(root, "current", "link"), nil
|
||||
case manifestTagIndexPathSpec:
|
||||
root, err := pm.path(manifestTagPathSpec{
|
||||
name: v.name,
|
||||
tag: v.tag,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(root, "index"), nil
|
||||
case manifestTagIndexEntryPathSpec:
|
||||
root, err := pm.path(manifestTagIndexPathSpec{
|
||||
name: v.name,
|
||||
tag: v.tag,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
components, err := digestPathComponents(v.revision, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(root, path.Join(append(components, "link")...)), nil
|
||||
case layerLinkPathSpec:
|
||||
components, err := digestPathComoponents(v.digest)
|
||||
components, err := digestPathComponents(v.digest, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -94,21 +228,17 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) {
|
|||
|
||||
layerLinkPathComponents := append(repoPrefix, v.name, "layers")
|
||||
|
||||
return path.Join(append(layerLinkPathComponents, components...)...), nil
|
||||
case blobPathSpec:
|
||||
components, err := digestPathComoponents(v.digest)
|
||||
return path.Join(path.Join(append(layerLinkPathComponents, components...)...), "link"), nil
|
||||
case blobDataPathSpec:
|
||||
components, err := digestPathComponents(v.digest, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// For now, only map tarsum paths.
|
||||
if components[0] != "tarsum" {
|
||||
// Only tarsum is supported, for now
|
||||
return "", fmt.Errorf("unsupported content digest: %v", v.digest)
|
||||
}
|
||||
|
||||
blobPathPrefix := append(rootPrefix, "blob")
|
||||
components = append(components, "data")
|
||||
blobPathPrefix := append(rootPrefix, "blobs")
|
||||
return path.Join(append(blobPathPrefix, components...)...), nil
|
||||
|
||||
case uploadDataPathSpec:
|
||||
return path.Join(append(repoPrefix, v.name, "uploads", v.uuid, "data")...), nil
|
||||
case uploadStartedAtPathSpec:
|
||||
|
@ -126,22 +256,91 @@ type pathSpec interface {
|
|||
pathSpec()
|
||||
}
|
||||
|
||||
// manifestTagsPath describes the path elements required to point to the
|
||||
// directory with all manifest tags under the repository.
|
||||
type manifestTagsPath struct {
|
||||
// manifestRevisionPathSpec describes the components of the directory path for
|
||||
// a manifest revision.
|
||||
type manifestRevisionPathSpec struct {
|
||||
name string
|
||||
revision digest.Digest
|
||||
}
|
||||
|
||||
func (manifestRevisionPathSpec) pathSpec() {}
|
||||
|
||||
// manifestRevisionLinkPathSpec describes the path components required to look
|
||||
// up the data link for a revision of a manifest. If this file is not present,
|
||||
// the manifest blob is not available in the given repo. The contents of this
|
||||
// file should just be the digest.
|
||||
type manifestRevisionLinkPathSpec struct {
|
||||
name string
|
||||
revision digest.Digest
|
||||
}
|
||||
|
||||
func (manifestRevisionLinkPathSpec) pathSpec() {}
|
||||
|
||||
// manifestSignaturesPathSpec decribes the path components for the directory
|
||||
// containing all the signatures for the target blob. Entries are named with
|
||||
// the underlying key id.
|
||||
type manifestSignaturesPathSpec struct {
|
||||
name string
|
||||
revision digest.Digest
|
||||
}
|
||||
|
||||
func (manifestSignaturesPathSpec) pathSpec() {}
|
||||
|
||||
// manifestSignatureLinkPathSpec decribes the path components used to look up
|
||||
// a signature file by the hash of its blob.
|
||||
type manifestSignatureLinkPathSpec struct {
|
||||
name string
|
||||
revision digest.Digest
|
||||
signature digest.Digest
|
||||
}
|
||||
|
||||
func (manifestSignatureLinkPathSpec) pathSpec() {}
|
||||
|
||||
// manifestTagsPathSpec describes the path elements required to point to the
|
||||
// manifest tags directory.
|
||||
type manifestTagsPathSpec struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (manifestTagsPath) pathSpec() {}
|
||||
func (manifestTagsPathSpec) pathSpec() {}
|
||||
|
||||
// manifestPathSpec describes the path elements used to build a manifest path.
|
||||
// The contents should be a signed manifest json file.
|
||||
type manifestPathSpec struct {
|
||||
// manifestTagPathSpec describes the path elements required to point to the
|
||||
// manifest tag links files under a repository. These contain a blob id that
|
||||
// can be used to look up the data and signatures.
|
||||
type manifestTagPathSpec struct {
|
||||
name string
|
||||
tag string
|
||||
}
|
||||
|
||||
func (manifestPathSpec) pathSpec() {}
|
||||
func (manifestTagPathSpec) pathSpec() {}
|
||||
|
||||
// manifestTagCurrentPathSpec describes the link to the current revision for a
|
||||
// given tag.
|
||||
type manifestTagCurrentPathSpec struct {
|
||||
name string
|
||||
tag string
|
||||
}
|
||||
|
||||
func (manifestTagCurrentPathSpec) pathSpec() {}
|
||||
|
||||
// manifestTagCurrentPathSpec describes the link to the index of revisions
|
||||
// with the given tag.
|
||||
type manifestTagIndexPathSpec struct {
|
||||
name string
|
||||
tag string
|
||||
}
|
||||
|
||||
func (manifestTagIndexPathSpec) pathSpec() {}
|
||||
|
||||
// manifestTagIndexEntryPathSpec describes the link to a revisions of a
|
||||
// manifest with given tag within the index.
|
||||
type manifestTagIndexEntryPathSpec struct {
|
||||
name string
|
||||
tag string
|
||||
revision digest.Digest
|
||||
}
|
||||
|
||||
func (manifestTagIndexEntryPathSpec) pathSpec() {}
|
||||
|
||||
// layerLink specifies a path for a layer link, which is a file with a blob
|
||||
// id. The layer link will contain a content addressable blob id reference
|
||||
|
@ -172,13 +371,20 @@ var blobAlgorithmReplacer = strings.NewReplacer(
|
|||
";", "/",
|
||||
)
|
||||
|
||||
// blobPath contains the path for the registry global blob store. For now,
|
||||
// this contains layer data, exclusively.
|
||||
type blobPathSpec struct {
|
||||
// // blobPathSpec contains the path for the registry global blob store.
|
||||
// type blobPathSpec struct {
|
||||
// digest digest.Digest
|
||||
// }
|
||||
|
||||
// func (blobPathSpec) pathSpec() {}
|
||||
|
||||
// blobDataPathSpec contains the path for the registry global blob store. For
|
||||
// now, this contains layer data, exclusively.
|
||||
type blobDataPathSpec struct {
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (blobPathSpec) pathSpec() {}
|
||||
func (blobDataPathSpec) pathSpec() {}
|
||||
|
||||
// uploadDataPathSpec defines the path parameters of the data file for
|
||||
// uploads.
|
||||
|
@ -203,18 +409,21 @@ type uploadStartedAtPathSpec struct {
|
|||
|
||||
func (uploadStartedAtPathSpec) pathSpec() {}
|
||||
|
||||
// digestPathComoponents provides a consistent path breakdown for a given
|
||||
// digestPathComponents provides a consistent path breakdown for a given
|
||||
// digest. For a generic digest, it will be as follows:
|
||||
//
|
||||
// <algorithm>/<first two bytes of digest>/<full digest>
|
||||
// <algorithm>/<hex digest>
|
||||
//
|
||||
// Most importantly, for tarsum, the layout looks like this:
|
||||
//
|
||||
// tarsum/<version>/<digest algorithm>/<first two bytes of digest>/<full digest>
|
||||
// tarsum/<version>/<digest algorithm>/<full digest>
|
||||
//
|
||||
// This is slightly specialized to store an extra version path for version 0
|
||||
// tarsums.
|
||||
func digestPathComoponents(dgst digest.Digest) ([]string, error) {
|
||||
// If multilevel is true, the first two bytes of the digest will separate
|
||||
// groups of digest folder. It will be as follows:
|
||||
//
|
||||
// <algorithm>/<first two bytes of digest>/<full digest>
|
||||
//
|
||||
func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) {
|
||||
if err := dgst.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -222,11 +431,15 @@ func digestPathComoponents(dgst digest.Digest) ([]string, error) {
|
|||
algorithm := blobAlgorithmReplacer.Replace(dgst.Algorithm())
|
||||
hex := dgst.Hex()
|
||||
prefix := []string{algorithm}
|
||||
suffix := []string{
|
||||
hex[:2], // Breaks heirarchy up.
|
||||
hex,
|
||||
|
||||
var suffix []string
|
||||
|
||||
if multilevel {
|
||||
suffix = append(suffix, hex[:2])
|
||||
}
|
||||
|
||||
suffix = append(suffix, hex)
|
||||
|
||||
if tsi, err := digest.ParseTarSum(dgst.String()); err == nil {
|
||||
// We have a tarsum!
|
||||
version := tsi.Version
|
||||
|
@ -243,31 +456,3 @@ func digestPathComoponents(dgst digest.Digest) ([]string, error) {
|
|||
|
||||
return append(prefix, suffix...), nil
|
||||
}
|
||||
|
||||
// resolveBlobPath looks up the blob location in the repositories from a
|
||||
// layer/blob link file, returning blob path or an error on failure.
|
||||
func resolveBlobPath(driver storagedriver.StorageDriver, pm *pathMapper, name string, dgst digest.Digest) (string, error) {
|
||||
pathSpec := layerLinkPathSpec{name: name, digest: dgst}
|
||||
layerLinkPath, err := pm.path(pathSpec)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
layerLinkContent, err := driver.GetContent(layerLinkPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// NOTE(stevvooe): The content of the layer link should match the digest.
|
||||
// This layer of indirection is for name-based content protection.
|
||||
|
||||
linked, err := digest.ParseDigest(string(layerLinkContent))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
bp := blobPathSpec{digest: linked}
|
||||
|
||||
return pm.path(bp)
|
||||
}
|
||||
|
|
|
@ -17,31 +17,89 @@ func TestPathMapper(t *testing.T) {
|
|||
err error
|
||||
}{
|
||||
{
|
||||
spec: manifestPathSpec{
|
||||
spec: manifestRevisionPathSpec{
|
||||
name: "foo/bar",
|
||||
revision: "sha256:abcdef0123456789",
|
||||
},
|
||||
expected: "/pathmapper-test/repositories/foo/bar/manifests/revisions/sha256/abcdef0123456789",
|
||||
},
|
||||
{
|
||||
spec: manifestRevisionLinkPathSpec{
|
||||
name: "foo/bar",
|
||||
revision: "sha256:abcdef0123456789",
|
||||
},
|
||||
expected: "/pathmapper-test/repositories/foo/bar/manifests/revisions/sha256/abcdef0123456789/link",
|
||||
},
|
||||
{
|
||||
spec: manifestSignatureLinkPathSpec{
|
||||
name: "foo/bar",
|
||||
revision: "sha256:abcdef0123456789",
|
||||
signature: "sha256:abcdef0123456789",
|
||||
},
|
||||
expected: "/pathmapper-test/repositories/foo/bar/manifests/revisions/sha256/abcdef0123456789/signatures/sha256/abcdef0123456789/link",
|
||||
},
|
||||
{
|
||||
spec: manifestSignaturesPathSpec{
|
||||
name: "foo/bar",
|
||||
revision: "sha256:abcdef0123456789",
|
||||
},
|
||||
expected: "/pathmapper-test/repositories/foo/bar/manifests/revisions/sha256/abcdef0123456789/signatures",
|
||||
},
|
||||
{
|
||||
spec: manifestTagsPathSpec{
|
||||
name: "foo/bar",
|
||||
},
|
||||
expected: "/pathmapper-test/repositories/foo/bar/manifests/tags",
|
||||
},
|
||||
{
|
||||
spec: manifestTagPathSpec{
|
||||
name: "foo/bar",
|
||||
tag: "thetag",
|
||||
},
|
||||
expected: "/pathmapper-test/repositories/foo/bar/manifests/thetag",
|
||||
expected: "/pathmapper-test/repositories/foo/bar/manifests/tags/thetag",
|
||||
},
|
||||
{
|
||||
spec: manifestTagCurrentPathSpec{
|
||||
name: "foo/bar",
|
||||
tag: "thetag",
|
||||
},
|
||||
expected: "/pathmapper-test/repositories/foo/bar/manifests/tags/thetag/current/link",
|
||||
},
|
||||
{
|
||||
spec: manifestTagIndexPathSpec{
|
||||
name: "foo/bar",
|
||||
tag: "thetag",
|
||||
},
|
||||
expected: "/pathmapper-test/repositories/foo/bar/manifests/tags/thetag/index",
|
||||
},
|
||||
{
|
||||
spec: manifestTagIndexEntryPathSpec{
|
||||
name: "foo/bar",
|
||||
tag: "thetag",
|
||||
revision: "sha256:abcdef0123456789",
|
||||
},
|
||||
expected: "/pathmapper-test/repositories/foo/bar/manifests/tags/thetag/index/sha256/abcdef0123456789/link",
|
||||
},
|
||||
{
|
||||
spec: layerLinkPathSpec{
|
||||
name: "foo/bar",
|
||||
digest: digest.Digest("tarsum.v1+test:abcdef"),
|
||||
digest: "tarsum.v1+test:abcdef",
|
||||
},
|
||||
expected: "/pathmapper-test/repositories/foo/bar/layers/tarsum/v1/test/ab/abcdef",
|
||||
expected: "/pathmapper-test/repositories/foo/bar/layers/tarsum/v1/test/abcdef/link",
|
||||
},
|
||||
{
|
||||
spec: blobPathSpec{
|
||||
spec: blobDataPathSpec{
|
||||
digest: digest.Digest("tarsum.dev+sha512:abcdefabcdefabcdef908909909"),
|
||||
},
|
||||
expected: "/pathmapper-test/blob/tarsum/dev/sha512/ab/abcdefabcdefabcdef908909909",
|
||||
expected: "/pathmapper-test/blobs/tarsum/dev/sha512/ab/abcdefabcdefabcdef908909909/data",
|
||||
},
|
||||
{
|
||||
spec: blobPathSpec{
|
||||
spec: blobDataPathSpec{
|
||||
digest: digest.Digest("tarsum.v1+sha256:abcdefabcdefabcdef908909909"),
|
||||
},
|
||||
expected: "/pathmapper-test/blob/tarsum/v1/sha256/ab/abcdefabcdefabcdef908909909",
|
||||
expected: "/pathmapper-test/blobs/tarsum/v1/sha256/ab/abcdefabcdefabcdef908909909/data",
|
||||
},
|
||||
|
||||
{
|
||||
spec: uploadDataPathSpec{
|
||||
name: "foo/bar",
|
||||
|
@ -59,11 +117,22 @@ func TestPathMapper(t *testing.T) {
|
|||
} {
|
||||
p, err := pm.path(testcase.spec)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Fatalf("unexpected generating path (%T): %v", testcase.spec, err)
|
||||
}
|
||||
|
||||
if p != testcase.expected {
|
||||
t.Fatalf("unexpected path generated: %q != %q", p, testcase.expected)
|
||||
t.Fatalf("unexpected path generated (%T): %q != %q", testcase.spec, p, testcase.expected)
|
||||
}
|
||||
}
|
||||
|
||||
// Add a few test cases to ensure we cover some errors
|
||||
|
||||
// Specify a path that requires a revision and get a digest validation error.
|
||||
badpath, err := pm.path(manifestSignaturesPathSpec{
|
||||
name: "foo/bar",
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("expected an error when mapping an invalid revision: %s", badpath)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
217
storage/revisionstore.go
Normal file
217
storage/revisionstore.go
Normal file
|
@ -0,0 +1,217 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/storagedriver"
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
// revisionStore supports storing and managing manifest revisions.
|
||||
type revisionStore struct {
|
||||
driver storagedriver.StorageDriver
|
||||
pathMapper *pathMapper
|
||||
blobStore *blobStore
|
||||
}
|
||||
|
||||
// exists returns true if the revision is available in the named repository.
|
||||
func (rs *revisionStore) exists(name string, revision digest.Digest) (bool, error) {
|
||||
revpath, err := rs.pathMapper.path(manifestRevisionPathSpec{
|
||||
name: name,
|
||||
revision: revision,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
exists, err := exists(rs.driver, revpath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
// get retrieves the manifest, keyed by revision digest.
|
||||
func (rs *revisionStore) get(name string, revision digest.Digest) (*manifest.SignedManifest, error) {
|
||||
// Ensure that this revision is available in this repository.
|
||||
if exists, err := rs.exists(name, revision); err != nil {
|
||||
return nil, err
|
||||
} else if !exists {
|
||||
return nil, ErrUnknownManifestRevision{
|
||||
Name: name,
|
||||
Revision: revision,
|
||||
}
|
||||
}
|
||||
|
||||
content, err := rs.blobStore.get(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fetch the signatures for the manifest
|
||||
signatures, err := rs.getSignatures(name, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Infof("retrieved signatures: %v", string(signatures[0]))
|
||||
|
||||
jsig, err := libtrust.NewJSONSignature(content, signatures...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Extract the pretty JWS
|
||||
raw, err := jsig.PrettySignature("signatures")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var sm manifest.SignedManifest
|
||||
if err := json.Unmarshal(raw, &sm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &sm, nil
|
||||
}
|
||||
|
||||
// put stores the manifest in the repository, if not already present. Any
|
||||
// updated signatures will be stored, as well.
|
||||
func (rs *revisionStore) put(name string, sm *manifest.SignedManifest) (digest.Digest, error) {
|
||||
jsig, err := libtrust.ParsePrettySignature(sm.Raw, "signatures")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Resolve the payload in the manifest.
|
||||
payload, err := jsig.Payload()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Digest and store the manifest payload in the blob store.
|
||||
revision, err := rs.blobStore.put(payload)
|
||||
if err != nil {
|
||||
logrus.Errorf("error putting payload into blobstore: %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Link the revision into the repository.
|
||||
if err := rs.link(name, revision); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Grab each json signature and store them.
|
||||
signatures, err := jsig.Signatures()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, signature := range signatures {
|
||||
if err := rs.putSignature(name, revision, signature); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return revision, nil
|
||||
}
|
||||
|
||||
// link links the revision into the repository.
|
||||
func (rs *revisionStore) link(name string, revision digest.Digest) error {
|
||||
revisionPath, err := rs.pathMapper.path(manifestRevisionLinkPathSpec{
|
||||
name: name,
|
||||
revision: revision,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists, err := exists(rs.driver, revisionPath); err != nil {
|
||||
return err
|
||||
} else if exists {
|
||||
// Revision has already been linked!
|
||||
return nil
|
||||
}
|
||||
|
||||
return rs.blobStore.link(revisionPath, revision)
|
||||
}
|
||||
|
||||
// delete removes the specified manifest revision from storage.
|
||||
func (rs *revisionStore) delete(name string, revision digest.Digest) error {
|
||||
revisionPath, err := rs.pathMapper.path(manifestRevisionPathSpec{
|
||||
name: name,
|
||||
revision: revision,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return rs.driver.Delete(revisionPath)
|
||||
}
|
||||
|
||||
// getSignatures retrieves all of the signature blobs for the specified
|
||||
// manifest revision.
|
||||
func (rs *revisionStore) getSignatures(name string, revision digest.Digest) ([][]byte, error) {
|
||||
signaturesPath, err := rs.pathMapper.path(manifestSignaturesPathSpec{
|
||||
name: name,
|
||||
revision: revision,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Need to append signature digest algorithm to path to get all items.
|
||||
// Perhaps, this should be in the pathMapper but it feels awkward. This
|
||||
// can be eliminated by implementing listAll on drivers.
|
||||
signaturesPath = path.Join(signaturesPath, "sha256")
|
||||
|
||||
signaturePaths, err := rs.driver.List(signaturesPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var signatures [][]byte
|
||||
for _, sigPath := range signaturePaths {
|
||||
// Append the link portion
|
||||
sigPath = path.Join(sigPath, "link")
|
||||
|
||||
// TODO(stevvooe): These fetches should be parallelized for performance.
|
||||
p, err := rs.blobStore.linked(sigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signatures = append(signatures, p)
|
||||
}
|
||||
|
||||
return signatures, nil
|
||||
}
|
||||
|
||||
// putSignature stores the signature for the provided manifest revision.
|
||||
func (rs *revisionStore) putSignature(name string, revision digest.Digest, signature []byte) error {
|
||||
signatureDigest, err := rs.blobStore.put(signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signaturePath, err := rs.pathMapper.path(manifestSignatureLinkPathSpec{
|
||||
name: name,
|
||||
revision: revision,
|
||||
signature: signatureDigest,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return rs.blobStore.link(signaturePath, signatureDigest)
|
||||
}
|
|
@ -28,14 +28,42 @@ func NewServices(driver storagedriver.StorageDriver) *Services {
|
|||
// may be context sensitive in the future. The instance should be used similar
|
||||
// to a request local.
|
||||
func (ss *Services) Layers() LayerService {
|
||||
return &layerStore{driver: ss.driver, pathMapper: ss.pathMapper}
|
||||
return &layerStore{
|
||||
driver: ss.driver,
|
||||
blobStore: &blobStore{
|
||||
driver: ss.driver,
|
||||
pm: ss.pathMapper,
|
||||
},
|
||||
pathMapper: ss.pathMapper,
|
||||
}
|
||||
}
|
||||
|
||||
// Manifests returns an instance of ManifestService. Instantiation is cheap and
|
||||
// may be context sensitive in the future. The instance should be used similar
|
||||
// to a request local.
|
||||
func (ss *Services) Manifests() ManifestService {
|
||||
return &manifestStore{driver: ss.driver, pathMapper: ss.pathMapper, layerService: ss.Layers()}
|
||||
// TODO(stevvooe): Lose this kludge. An intermediary object is clearly
|
||||
// missing here. This initialization is a mess.
|
||||
bs := &blobStore{
|
||||
driver: ss.driver,
|
||||
pm: ss.pathMapper,
|
||||
}
|
||||
|
||||
return &manifestStore{
|
||||
driver: ss.driver,
|
||||
pathMapper: ss.pathMapper,
|
||||
revisionStore: &revisionStore{
|
||||
driver: ss.driver,
|
||||
pathMapper: ss.pathMapper,
|
||||
blobStore: bs,
|
||||
},
|
||||
tagStore: &tagStore{
|
||||
driver: ss.driver,
|
||||
blobStore: bs,
|
||||
pathMapper: ss.pathMapper,
|
||||
},
|
||||
blobStore: bs,
|
||||
layerService: ss.Layers()}
|
||||
}
|
||||
|
||||
// ManifestService provides operations on image manifests.
|
||||
|
@ -43,7 +71,7 @@ type ManifestService interface {
|
|||
// Tags lists the tags under the named repository.
|
||||
Tags(name string) ([]string, error)
|
||||
|
||||
// Exists returns true if the layer exists.
|
||||
// Exists returns true if the manifest exists.
|
||||
Exists(name, tag string) (bool, error)
|
||||
|
||||
// Get retrieves the named manifest, if it exists.
|
||||
|
|
159
storage/tagstore.go
Normal file
159
storage/tagstore.go
Normal file
|
@ -0,0 +1,159 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/storagedriver"
|
||||
)
|
||||
|
||||
// tagStore provides methods to manage manifest tags in a backend storage driver.
|
||||
type tagStore struct {
|
||||
driver storagedriver.StorageDriver
|
||||
blobStore *blobStore
|
||||
pathMapper *pathMapper
|
||||
}
|
||||
|
||||
// tags lists the manifest tags for the specified repository.
|
||||
func (ts *tagStore) tags(name string) ([]string, error) {
|
||||
p, err := ts.pathMapper.path(manifestTagPathSpec{
|
||||
name: name,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tags []string
|
||||
entries, err := ts.driver.List(p)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case storagedriver.PathNotFoundError:
|
||||
return nil, ErrUnknownRepository{Name: name}
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
_, filename := path.Split(entry)
|
||||
|
||||
tags = append(tags, filename)
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
// exists returns true if the specified manifest tag exists in the repository.
|
||||
func (ts *tagStore) exists(name, tag string) (bool, error) {
|
||||
tagPath, err := ts.pathMapper.path(manifestTagCurrentPathSpec{
|
||||
name: name,
|
||||
tag: tag,
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
exists, err := exists(ts.driver, tagPath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
// tag tags the digest with the given tag, updating the the store to point at
|
||||
// the current tag. The digest must point to a manifest.
|
||||
func (ts *tagStore) tag(name, tag string, revision digest.Digest) error {
|
||||
indexEntryPath, err := ts.pathMapper.path(manifestTagIndexEntryPathSpec{
|
||||
name: name,
|
||||
tag: tag,
|
||||
revision: revision,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
currentPath, err := ts.pathMapper.path(manifestTagCurrentPathSpec{
|
||||
name: name,
|
||||
tag: tag,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Link into the index
|
||||
if err := ts.blobStore.link(indexEntryPath, revision); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Overwrite the current link
|
||||
return ts.blobStore.link(currentPath, revision)
|
||||
}
|
||||
|
||||
// resolve the current revision for name and tag.
|
||||
func (ts *tagStore) resolve(name, tag string) (digest.Digest, error) {
|
||||
currentPath, err := ts.pathMapper.path(manifestTagCurrentPathSpec{
|
||||
name: name,
|
||||
tag: tag,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if exists, err := exists(ts.driver, currentPath); err != nil {
|
||||
return "", err
|
||||
} else if !exists {
|
||||
return "", ErrUnknownManifest{Name: name, Tag: tag}
|
||||
}
|
||||
|
||||
revision, err := ts.blobStore.readlink(currentPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return revision, nil
|
||||
}
|
||||
|
||||
// revisions returns all revisions with the specified name and tag.
|
||||
func (ts *tagStore) revisions(name, tag string) ([]digest.Digest, error) {
|
||||
manifestTagIndexPath, err := ts.pathMapper.path(manifestTagIndexPathSpec{
|
||||
name: name,
|
||||
tag: tag,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Need to append digest alg to get listing of revisions.
|
||||
manifestTagIndexPath = path.Join(manifestTagIndexPath, "sha256")
|
||||
|
||||
entries, err := ts.driver.List(manifestTagIndexPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var revisions []digest.Digest
|
||||
for _, entry := range entries {
|
||||
revisions = append(revisions, digest.NewDigestFromHex("sha256", path.Base(entry)))
|
||||
}
|
||||
|
||||
return revisions, nil
|
||||
}
|
||||
|
||||
// delete removes the tag from repository, including the history of all
|
||||
// revisions that have the specified tag.
|
||||
func (ts *tagStore) delete(name, tag string) error {
|
||||
tagPath, err := ts.pathMapper.path(manifestTagPathSpec{
|
||||
name: name,
|
||||
tag: tag,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ts.driver.Delete(tagPath)
|
||||
}
|
Loading…
Reference in a new issue