Merge pull request #238 from endophage/canonical_sha256
digest, registry/storage, registry/handlers: switch to SHA256 as canonical digest
This commit is contained in:
commit
5a8bedcc9f
4 changed files with 41 additions and 41 deletions
|
@ -573,7 +573,9 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Diges
|
||||||
|
|
||||||
// pushLayer pushes the layer content returning the url on success.
|
// pushLayer pushes the layer content returning the url on success.
|
||||||
func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string {
|
func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string {
|
||||||
resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, body)
|
digester := digest.NewCanonicalDigester()
|
||||||
|
|
||||||
|
resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, &digester))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error doing push layer request: %v", err)
|
t.Fatalf("unexpected error doing push layer request: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -581,7 +583,13 @@ func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest,
|
||||||
|
|
||||||
checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated)
|
checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated)
|
||||||
|
|
||||||
expectedLayerURL, err := ub.BuildBlobURL(name, dgst)
|
if err != nil {
|
||||||
|
t.Fatalf("error generating sha256 digest of body")
|
||||||
|
}
|
||||||
|
|
||||||
|
sha256Dgst := digester.Digest()
|
||||||
|
|
||||||
|
expectedLayerURL, err := ub.BuildBlobURL(name, sha256Dgst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error building expected layer url: %v", err)
|
t.Fatalf("error building expected layer url: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -589,7 +597,7 @@ func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest,
|
||||||
checkHeaders(t, resp, http.Header{
|
checkHeaders(t, resp, http.Header{
|
||||||
"Location": []string{expectedLayerURL},
|
"Location": []string{expectedLayerURL},
|
||||||
"Content-Length": []string{"0"},
|
"Content-Length": []string{"0"},
|
||||||
"Docker-Content-Digest": []string{dgst.String()},
|
"Docker-Content-Digest": []string{sha256Dgst.String()},
|
||||||
})
|
})
|
||||||
|
|
||||||
return resp.Header.Get("Location")
|
return resp.Header.Get("Location")
|
||||||
|
@ -682,7 +690,7 @@ func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) {
|
||||||
|
|
||||||
for _, hv := range resp.Header[k] {
|
for _, hv := range resp.Header[k] {
|
||||||
if hv != v {
|
if hv != v {
|
||||||
t.Fatalf("header value not matched in response: %q != %q", hv, v)
|
t.Fatalf("%v header value not matched in response: %q != %q", k, hv, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -266,12 +266,12 @@ func TestLayerUploadZeroLength(t *testing.T) {
|
||||||
|
|
||||||
io.Copy(upload, bytes.NewReader([]byte{}))
|
io.Copy(upload, bytes.NewReader([]byte{}))
|
||||||
|
|
||||||
dgst, err := digest.FromTarArchive(bytes.NewReader([]byte{}))
|
dgst, err := digest.FromReader(bytes.NewReader([]byte{}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error getting zero digest: %v", err)
|
t.Fatalf("error getting zero digest: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if dgst != digest.DigestTarSumV1EmptyTar {
|
if dgst != digest.DigestSha256EmptyTar {
|
||||||
// sanity check on zero digest
|
// sanity check on zero digest
|
||||||
t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar)
|
t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar)
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
ctxu "github.com/docker/distribution/context"
|
ctxu "github.com/docker/distribution/context"
|
||||||
"github.com/docker/distribution/digest"
|
"github.com/docker/distribution/digest"
|
||||||
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
||||||
"github.com/docker/docker/pkg/tarsum"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// layerUploadController is used to control the various aspects of resumable
|
// layerUploadController is used to control the various aspects of resumable
|
||||||
|
@ -61,7 +60,7 @@ func (luc *layerUploadController) Finish(digest digest.Digest) (distribution.Lay
|
||||||
}
|
}
|
||||||
|
|
||||||
// Link the layer blob into the repository.
|
// Link the layer blob into the repository.
|
||||||
if err := luc.linkLayer(canonical); err != nil {
|
if err := luc.linkLayer(canonical, digest); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,23 +85,6 @@ func (luc *layerUploadController) Cancel() error {
|
||||||
// validateLayer checks the layer data against the digest, returning an error
|
// validateLayer checks the layer data against the digest, returning an error
|
||||||
// if it does not match. The canonical digest is returned.
|
// if it does not match. The canonical digest is returned.
|
||||||
func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Digest, error) {
|
func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Digest, error) {
|
||||||
// First, check the incoming tarsum version of the digest.
|
|
||||||
version, err := tarsum.GetVersionFromTarsum(dgst.String())
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(stevvooe): Should we push this down into the digest type?
|
|
||||||
switch version {
|
|
||||||
case tarsum.Version1:
|
|
||||||
default:
|
|
||||||
// version 0 and dev, for now.
|
|
||||||
return "", distribution.ErrLayerInvalidDigest{
|
|
||||||
Digest: dgst,
|
|
||||||
Reason: distribution.ErrLayerTarSumVersionUnsupported,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
digestVerifier := digest.NewDigestVerifier(dgst)
|
digestVerifier := digest.NewDigestVerifier(dgst)
|
||||||
|
|
||||||
// TODO(stevvooe): Store resumable hash calculations in upload directory
|
// TODO(stevvooe): Store resumable hash calculations in upload directory
|
||||||
|
@ -122,7 +104,7 @@ func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Dige
|
||||||
// sink. Instead, its read driven. This might be okay.
|
// sink. Instead, its read driven. This might be okay.
|
||||||
|
|
||||||
// Calculate an updated digest with the latest version.
|
// Calculate an updated digest with the latest version.
|
||||||
canonical, err := digest.FromTarArchive(tr)
|
canonical, err := digest.FromReader(tr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -177,7 +159,7 @@ func (luc *layerUploadController) moveLayer(dgst digest.Digest) error {
|
||||||
// a zero-length blob into a nonzero-length blob location. To
|
// a zero-length blob into a nonzero-length blob location. To
|
||||||
// prevent this horrid thing, we employ the hack of only allowing
|
// prevent this horrid thing, we employ the hack of only allowing
|
||||||
// to this happen for the zero tarsum.
|
// to this happen for the zero tarsum.
|
||||||
if dgst == digest.DigestTarSumV1EmptyTar {
|
if dgst == digest.DigestSha256EmptyTar {
|
||||||
return luc.driver.PutContent(blobPath, []byte{})
|
return luc.driver.PutContent(blobPath, []byte{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -195,17 +177,33 @@ func (luc *layerUploadController) moveLayer(dgst digest.Digest) error {
|
||||||
|
|
||||||
// linkLayer links a valid, written layer blob into the registry under the
|
// linkLayer links a valid, written layer blob into the registry under the
|
||||||
// named repository for the upload controller.
|
// named repository for the upload controller.
|
||||||
func (luc *layerUploadController) linkLayer(digest digest.Digest) error {
|
func (luc *layerUploadController) linkLayer(canonical digest.Digest, aliases ...digest.Digest) error {
|
||||||
layerLinkPath, err := luc.layerStore.repository.registry.pm.path(layerLinkPathSpec{
|
dgsts := append([]digest.Digest{canonical}, aliases...)
|
||||||
name: luc.layerStore.repository.Name(),
|
|
||||||
digest: digest,
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
// Don't make duplicate links.
|
||||||
return err
|
seenDigests := make(map[digest.Digest]struct{}, len(dgsts))
|
||||||
|
|
||||||
|
for _, dgst := range dgsts {
|
||||||
|
if _, seen := seenDigests[dgst]; seen {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seenDigests[dgst] = struct{}{}
|
||||||
|
|
||||||
|
layerLinkPath, err := luc.layerStore.repository.registry.pm.path(layerLinkPathSpec{
|
||||||
|
name: luc.layerStore.repository.Name(),
|
||||||
|
digest: dgst,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := luc.layerStore.repository.registry.driver.PutContent(layerLinkPath, []byte(canonical)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return luc.layerStore.repository.registry.driver.PutContent(layerLinkPath, []byte(digest))
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// removeResources should clean up all resources associated with the upload
|
// removeResources should clean up all resources associated with the upload
|
||||||
|
|
|
@ -232,12 +232,6 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// For now, only map tarsum paths.
|
|
||||||
if components[0] != "tarsum" {
|
|
||||||
// Only tarsum is supported, for now
|
|
||||||
return "", fmt.Errorf("unsupported content digest: %v", v.digest)
|
|
||||||
}
|
|
||||||
|
|
||||||
layerLinkPathComponents := append(repoPrefix, v.name, "_layers")
|
layerLinkPathComponents := append(repoPrefix, v.name, "_layers")
|
||||||
|
|
||||||
return path.Join(path.Join(append(layerLinkPathComponents, components...)...), "link"), nil
|
return path.Join(path.Join(append(layerLinkPathComponents, components...)...), "link"), nil
|
||||||
|
|
Loading…
Reference in a new issue