Merge pull request #1271 from aaronlehmann/remove-tarsum

Remove tarsum support for digest package
This commit is contained in:
Stephen Day 2015-12-16 14:24:11 -08:00
commit 0fef25389d
10 changed files with 22 additions and 90 deletions

View file

@ -87,14 +87,6 @@ func TestRouter(t *testing.T) {
"name": "docker.com/foo/bar/baz", "name": "docker.com/foo/bar/baz",
}, },
}, },
{
RouteName: RouteNameBlob,
RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234",
Vars: map[string]string{
"name": "foo/bar",
"digest": "tarsum.dev+foo:abcdef0919234",
},
},
{ {
RouteName: RouteNameBlob, RouteName: RouteNameBlob,
RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234",

View file

@ -35,9 +35,9 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase {
}, },
{ {
description: "build blob url", description: "build blob url",
expectedPath: "/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789", expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5",
build: func() (string, error) { build: func() (string, error) {
return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789") return urlBuilder.BuildBlobURL("foo/bar", "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5")
}, },
}, },
{ {
@ -49,11 +49,11 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase {
}, },
{ {
description: "build blob upload url with digest and size", description: "build blob upload url with digest and size",
expectedPath: "/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000",
build: func() (string, error) { build: func() (string, error) {
return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{ return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{
"size": []string{"10000"}, "size": []string{"10000"},
"digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"},
}) })
}, },
}, },
@ -66,11 +66,11 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase {
}, },
{ {
description: "build blob upload chunk url with digest and size", description: "build blob upload chunk url with digest and size",
expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000",
build: func() (string, error) { build: func() (string, error) {
return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{ return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{
"size": []string{"10000"}, "size": []string{"10000"},
"digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"},
}) })
}, },
}, },

View file

@ -251,22 +251,18 @@ type blobArgs struct {
imageName string imageName string
layerFile io.ReadSeeker layerFile io.ReadSeeker
layerDigest digest.Digest layerDigest digest.Digest
tarSumStr string
} }
func makeBlobArgs(t *testing.T) blobArgs { func makeBlobArgs(t *testing.T) blobArgs {
layerFile, tarSumStr, err := testutil.CreateRandomTarFile() layerFile, layerDigest, err := testutil.CreateRandomTarFile()
if err != nil { if err != nil {
t.Fatalf("error creating random layer file: %v", err) t.Fatalf("error creating random layer file: %v", err)
} }
layerDigest := digest.Digest(tarSumStr)
args := blobArgs{ args := blobArgs{
imageName: "foo/bar", imageName: "foo/bar",
layerFile: layerFile, layerFile: layerFile,
layerDigest: layerDigest, layerDigest: layerDigest,
tarSumStr: tarSumStr,
} }
return args return args
} }
@ -393,7 +389,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
// ----------------------------------------- // -----------------------------------------
// Do layer push with an empty body and correct digest // Do layer push with an empty body and correct digest
zeroDigest, err := digest.FromTarArchive(bytes.NewReader([]byte{})) zeroDigest, err := digest.FromReader(bytes.NewReader([]byte{}))
if err != nil { if err != nil {
t.Fatalf("unexpected error digesting empty buffer: %v", err) t.Fatalf("unexpected error digesting empty buffer: %v", err)
} }
@ -406,7 +402,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
// This is a valid but empty tarfile! // This is a valid but empty tarfile!
emptyTar := bytes.Repeat([]byte("\x00"), 1024) emptyTar := bytes.Repeat([]byte("\x00"), 1024)
emptyDigest, err := digest.FromTarArchive(bytes.NewReader(emptyTar)) emptyDigest, err := digest.FromReader(bytes.NewReader(emptyTar))
if err != nil { if err != nil {
t.Fatalf("unexpected error digesting empty tar: %v", err) t.Fatalf("unexpected error digesting empty tar: %v", err)
} }
@ -476,7 +472,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
// ---------------- // ----------------
// Fetch the layer with an invalid digest // Fetch the layer with an invalid digest
badURL := strings.Replace(layerURL, "tarsum", "trsum", 1) badURL := strings.Replace(layerURL, "sha256", "sha257", 1)
resp, err = http.Get(badURL) resp, err = http.Get(badURL)
if err != nil { if err != nil {
t.Fatalf("unexpected error fetching layer: %v", err) t.Fatalf("unexpected error fetching layer: %v", err)
@ -523,7 +519,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK) checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK)
// Missing tests: // Missing tests:
// - Upload the same tarsum file under and different repository and // - Upload the same tar file under and different repository and
// ensure the content remains uncorrupted. // ensure the content remains uncorrupted.
return env return env
} }
@ -570,7 +566,7 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) {
// ---------------- // ----------------
// Attempt to delete a layer with an invalid digest // Attempt to delete a layer with an invalid digest
badURL := strings.Replace(layerURL, "tarsum", "trsum", 1) badURL := strings.Replace(layerURL, "sha256", "sha257", 1)
resp, err = httpDelete(badURL) resp, err = httpDelete(badURL)
if err != nil { if err != nil {
t.Fatalf("unexpected error fetching layer: %v", err) t.Fatalf("unexpected error fetching layer: %v", err)
@ -612,12 +608,11 @@ func TestDeleteDisabled(t *testing.T) {
imageName := "foo/bar" imageName := "foo/bar"
// "build" our layer file // "build" our layer file
layerFile, tarSumStr, err := testutil.CreateRandomTarFile() layerFile, layerDigest, err := testutil.CreateRandomTarFile()
if err != nil { if err != nil {
t.Fatalf("error creating random layer file: %v", err) t.Fatalf("error creating random layer file: %v", err)
} }
layerDigest := digest.Digest(tarSumStr)
layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest)
if err != nil { if err != nil {
t.Fatalf("Error building blob URL") t.Fatalf("Error building blob URL")
@ -638,12 +633,11 @@ func TestDeleteReadOnly(t *testing.T) {
imageName := "foo/bar" imageName := "foo/bar"
// "build" our layer file // "build" our layer file
layerFile, tarSumStr, err := testutil.CreateRandomTarFile() layerFile, layerDigest, err := testutil.CreateRandomTarFile()
if err != nil { if err != nil {
t.Fatalf("error creating random layer file: %v", err) t.Fatalf("error creating random layer file: %v", err)
} }
layerDigest := digest.Digest(tarSumStr)
layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest)
if err != nil { if err != nil {
t.Fatalf("Error building blob URL") t.Fatalf("Error building blob URL")

View file

@ -102,13 +102,6 @@ func TestAppDispatcher(t *testing.T) {
"name", "foo/bar", "name", "foo/bar",
}, },
}, },
{
endpoint: v2.RouteNameBlob,
vars: []string{
"name", "foo/bar",
"digest", "tarsum.v1+bogus:abcdef0123456789",
},
},
{ {
endpoint: v2.RouteNameBlobUpload, endpoint: v2.RouteNameBlobUpload,
vars: []string{ vars: []string{

View file

@ -20,16 +20,11 @@ import (
// TestSimpleBlobUpload covers the blob upload process, exercising common // TestSimpleBlobUpload covers the blob upload process, exercising common
// error paths that might be seen during an upload. // error paths that might be seen during an upload.
func TestSimpleBlobUpload(t *testing.T) { func TestSimpleBlobUpload(t *testing.T) {
randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile() randomDataReader, dgst, err := testutil.CreateRandomTarFile()
if err != nil { if err != nil {
t.Fatalf("error creating random reader: %v", err) t.Fatalf("error creating random reader: %v", err)
} }
dgst := digest.Digest(tarSumStr)
if err != nil {
t.Fatalf("error allocating upload store: %v", err)
}
ctx := context.Background() ctx := context.Background()
imageName := "foo/bar" imageName := "foo/bar"
driver := inmemory.New() driver := inmemory.New()
@ -225,13 +220,11 @@ func TestSimpleBlobRead(t *testing.T) {
} }
bs := repository.Blobs(ctx) bs := repository.Blobs(ctx)
randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. randomLayerReader, dgst, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string.
if err != nil { if err != nil {
t.Fatalf("error creating random data: %v", err) t.Fatalf("error creating random data: %v", err)
} }
dgst := digest.Digest(tarSumStr)
// Test for existence. // Test for existence.
desc, err := bs.Stat(ctx, dgst) desc, err := bs.Stat(ctx, dgst)
if err != distribution.ErrBlobUnknown { if err != distribution.ErrBlobUnknown {
@ -358,7 +351,7 @@ func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expec
if dgst != expectedDigest { if dgst != expectedDigest {
// sanity check on zero digest // sanity check on zero digest
t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar) t.Fatalf("digest not as expected: %v != %v", dgst, expectedDigest)
} }
desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}) desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst})

View file

@ -302,7 +302,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor
// get a hash, then the underlying file is deleted, we risk moving // get a hash, then the underlying file is deleted, we risk moving
// a zero-length blob into a nonzero-length blob location. To // a zero-length blob into a nonzero-length blob location. To
// prevent this horrid thing, we employ the hack of only allowing // prevent this horrid thing, we employ the hack of only allowing
// to this happen for the zero tarsum. // to this happen for the digest of an empty tar.
if desc.Digest == digest.DigestSha256EmptyTar { if desc.Digest == digest.DigestSha256EmptyTar {
return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{})
} }

View file

@ -249,7 +249,7 @@ func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx cont
} }
// Also set the values for the primary descriptor, if they differ by // Also set the values for the primary descriptor, if they differ by
// algorithm (ie sha256 vs tarsum). // algorithm (ie sha256 vs sha512).
if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() { if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() {
if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil { if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil {
return err return err

View file

@ -282,7 +282,7 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis
} }
if target != dgst { if target != dgst {
// Track when we are doing cross-digest domain lookups. ie, tarsum to sha256. // Track when we are doing cross-digest domain lookups. ie, sha512 to sha256.
context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target)
} }

View file

@ -396,9 +396,8 @@ type layerLinkPathSpec struct {
func (layerLinkPathSpec) pathSpec() {} func (layerLinkPathSpec) pathSpec() {}
// blobAlgorithmReplacer does some very simple path sanitization for user // blobAlgorithmReplacer does some very simple path sanitization for user
// input. Mostly, this is to provide some hierarchy for tarsum digests. Paths // input. Paths should be "safe" before getting this far due to strict digest
// should be "safe" before getting this far due to strict digest requirements // requirements but we can add further path conversion here, if needed.
// but we can add further path conversion here, if needed.
var blobAlgorithmReplacer = strings.NewReplacer( var blobAlgorithmReplacer = strings.NewReplacer(
"+", "/", "+", "/",
".", "/", ".", "/",
@ -468,10 +467,6 @@ func (repositoriesRootPathSpec) pathSpec() {}
// //
// <algorithm>/<hex digest> // <algorithm>/<hex digest>
// //
// Most importantly, for tarsum, the layout looks like this:
//
// tarsum/<version>/<digest algorithm>/<full digest>
//
// If multilevel is true, the first two bytes of the digest will separate // If multilevel is true, the first two bytes of the digest will separate
// groups of digest folder. It will be as follows: // groups of digest folder. It will be as follows:
// //
@ -494,19 +489,5 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error)
suffix = append(suffix, hex) suffix = append(suffix, hex)
if tsi, err := digest.ParseTarSum(dgst.String()); err == nil {
// We have a tarsum!
version := tsi.Version
if version == "" {
version = "v0"
}
prefix = []string{
"tarsum",
version,
tsi.Algorithm,
}
}
return append(prefix, suffix...), nil return append(prefix, suffix...), nil
} }

View file

@ -2,8 +2,6 @@ package storage
import ( import (
"testing" "testing"
"github.com/docker/distribution/digest"
) )
func TestPathMapper(t *testing.T) { func TestPathMapper(t *testing.T) {
@ -84,25 +82,6 @@ func TestPathMapper(t *testing.T) {
}, },
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link",
}, },
{
spec: layerLinkPathSpec{
name: "foo/bar",
digest: "tarsum.v1+test:abcdef",
},
expected: "/docker/registry/v2/repositories/foo/bar/_layers/tarsum/v1/test/abcdef/link",
},
{
spec: blobDataPathSpec{
digest: digest.Digest("tarsum.dev+sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"),
},
expected: "/docker/registry/v2/blobs/tarsum/dev/sha512/ab/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/data",
},
{
spec: blobDataPathSpec{
digest: digest.Digest("tarsum.v1+sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"),
},
expected: "/docker/registry/v2/blobs/tarsum/v1/sha256/ab/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/data",
},
{ {
spec: uploadDataPathSpec{ spec: uploadDataPathSpec{