forked from TrueCloudLab/distribution
Remove tarsum support for digest package
tarsum is not actually used by the registry. Remove support for it. Convert numerous uses in unit tests to SHA256. Update docs to remove mentions of tarsums (which were often inaccurate). Remove tarsum dependency. Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
This commit is contained in:
parent
58232e50cf
commit
a077202f88
10 changed files with 22 additions and 90 deletions
|
@ -87,14 +87,6 @@ func TestRouter(t *testing.T) {
|
|||
"name": "docker.com/foo/bar/baz",
|
||||
},
|
||||
},
|
||||
{
|
||||
RouteName: RouteNameBlob,
|
||||
RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234",
|
||||
Vars: map[string]string{
|
||||
"name": "foo/bar",
|
||||
"digest": "tarsum.dev+foo:abcdef0919234",
|
||||
},
|
||||
},
|
||||
{
|
||||
RouteName: RouteNameBlob,
|
||||
RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234",
|
||||
|
|
|
@ -35,9 +35,9 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase {
|
|||
},
|
||||
{
|
||||
description: "build blob url",
|
||||
expectedPath: "/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789",
|
||||
expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5",
|
||||
build: func() (string, error) {
|
||||
return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789")
|
||||
return urlBuilder.BuildBlobURL("foo/bar", "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5")
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -49,11 +49,11 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase {
|
|||
},
|
||||
{
|
||||
description: "build blob upload url with digest and size",
|
||||
expectedPath: "/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000",
|
||||
expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000",
|
||||
build: func() (string, error) {
|
||||
return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{
|
||||
"size": []string{"10000"},
|
||||
"digest": []string{"tarsum.v1+sha256:abcdef0123456789"},
|
||||
"digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"},
|
||||
})
|
||||
},
|
||||
},
|
||||
|
@ -66,11 +66,11 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase {
|
|||
},
|
||||
{
|
||||
description: "build blob upload chunk url with digest and size",
|
||||
expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000",
|
||||
expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000",
|
||||
build: func() (string, error) {
|
||||
return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{
|
||||
"size": []string{"10000"},
|
||||
"digest": []string{"tarsum.v1+sha256:abcdef0123456789"},
|
||||
"digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"},
|
||||
})
|
||||
},
|
||||
},
|
||||
|
|
|
@ -251,22 +251,18 @@ type blobArgs struct {
|
|||
imageName string
|
||||
layerFile io.ReadSeeker
|
||||
layerDigest digest.Digest
|
||||
tarSumStr string
|
||||
}
|
||||
|
||||
func makeBlobArgs(t *testing.T) blobArgs {
|
||||
layerFile, tarSumStr, err := testutil.CreateRandomTarFile()
|
||||
layerFile, layerDigest, err := testutil.CreateRandomTarFile()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating random layer file: %v", err)
|
||||
}
|
||||
|
||||
layerDigest := digest.Digest(tarSumStr)
|
||||
|
||||
args := blobArgs{
|
||||
imageName: "foo/bar",
|
||||
layerFile: layerFile,
|
||||
layerDigest: layerDigest,
|
||||
tarSumStr: tarSumStr,
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
@ -393,7 +389,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
|
|||
|
||||
// -----------------------------------------
|
||||
// Do layer push with an empty body and correct digest
|
||||
zeroDigest, err := digest.FromTarArchive(bytes.NewReader([]byte{}))
|
||||
zeroDigest, err := digest.FromReader(bytes.NewReader([]byte{}))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error digesting empty buffer: %v", err)
|
||||
}
|
||||
|
@ -406,7 +402,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
|
|||
|
||||
// This is a valid but empty tarfile!
|
||||
emptyTar := bytes.Repeat([]byte("\x00"), 1024)
|
||||
emptyDigest, err := digest.FromTarArchive(bytes.NewReader(emptyTar))
|
||||
emptyDigest, err := digest.FromReader(bytes.NewReader(emptyTar))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error digesting empty tar: %v", err)
|
||||
}
|
||||
|
@ -476,7 +472,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
|
|||
|
||||
// ----------------
|
||||
// Fetch the layer with an invalid digest
|
||||
badURL := strings.Replace(layerURL, "tarsum", "trsum", 1)
|
||||
badURL := strings.Replace(layerURL, "sha256", "sha257", 1)
|
||||
resp, err = http.Get(badURL)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error fetching layer: %v", err)
|
||||
|
@ -523,7 +519,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
|
|||
checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK)
|
||||
|
||||
// Missing tests:
|
||||
// - Upload the same tarsum file under and different repository and
|
||||
// - Upload the same tar file under and different repository and
|
||||
// ensure the content remains uncorrupted.
|
||||
return env
|
||||
}
|
||||
|
@ -570,7 +566,7 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) {
|
|||
|
||||
// ----------------
|
||||
// Attempt to delete a layer with an invalid digest
|
||||
badURL := strings.Replace(layerURL, "tarsum", "trsum", 1)
|
||||
badURL := strings.Replace(layerURL, "sha256", "sha257", 1)
|
||||
resp, err = httpDelete(badURL)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error fetching layer: %v", err)
|
||||
|
@ -612,12 +608,11 @@ func TestDeleteDisabled(t *testing.T) {
|
|||
|
||||
imageName := "foo/bar"
|
||||
// "build" our layer file
|
||||
layerFile, tarSumStr, err := testutil.CreateRandomTarFile()
|
||||
layerFile, layerDigest, err := testutil.CreateRandomTarFile()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating random layer file: %v", err)
|
||||
}
|
||||
|
||||
layerDigest := digest.Digest(tarSumStr)
|
||||
layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest)
|
||||
if err != nil {
|
||||
t.Fatalf("Error building blob URL")
|
||||
|
@ -638,12 +633,11 @@ func TestDeleteReadOnly(t *testing.T) {
|
|||
|
||||
imageName := "foo/bar"
|
||||
// "build" our layer file
|
||||
layerFile, tarSumStr, err := testutil.CreateRandomTarFile()
|
||||
layerFile, layerDigest, err := testutil.CreateRandomTarFile()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating random layer file: %v", err)
|
||||
}
|
||||
|
||||
layerDigest := digest.Digest(tarSumStr)
|
||||
layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest)
|
||||
if err != nil {
|
||||
t.Fatalf("Error building blob URL")
|
||||
|
|
|
@ -102,13 +102,6 @@ func TestAppDispatcher(t *testing.T) {
|
|||
"name", "foo/bar",
|
||||
},
|
||||
},
|
||||
{
|
||||
endpoint: v2.RouteNameBlob,
|
||||
vars: []string{
|
||||
"name", "foo/bar",
|
||||
"digest", "tarsum.v1+bogus:abcdef0123456789",
|
||||
},
|
||||
},
|
||||
{
|
||||
endpoint: v2.RouteNameBlobUpload,
|
||||
vars: []string{
|
||||
|
|
|
@ -20,16 +20,11 @@ import (
|
|||
// TestSimpleBlobUpload covers the blob upload process, exercising common
|
||||
// error paths that might be seen during an upload.
|
||||
func TestSimpleBlobUpload(t *testing.T) {
|
||||
randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile()
|
||||
randomDataReader, dgst, err := testutil.CreateRandomTarFile()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating random reader: %v", err)
|
||||
}
|
||||
|
||||
dgst := digest.Digest(tarSumStr)
|
||||
if err != nil {
|
||||
t.Fatalf("error allocating upload store: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
imageName := "foo/bar"
|
||||
driver := inmemory.New()
|
||||
|
@ -225,13 +220,11 @@ func TestSimpleBlobRead(t *testing.T) {
|
|||
}
|
||||
bs := repository.Blobs(ctx)
|
||||
|
||||
randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string.
|
||||
randomLayerReader, dgst, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string.
|
||||
if err != nil {
|
||||
t.Fatalf("error creating random data: %v", err)
|
||||
}
|
||||
|
||||
dgst := digest.Digest(tarSumStr)
|
||||
|
||||
// Test for existence.
|
||||
desc, err := bs.Stat(ctx, dgst)
|
||||
if err != distribution.ErrBlobUnknown {
|
||||
|
@ -358,7 +351,7 @@ func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expec
|
|||
|
||||
if dgst != expectedDigest {
|
||||
// sanity check on zero digest
|
||||
t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar)
|
||||
t.Fatalf("digest not as expected: %v != %v", dgst, expectedDigest)
|
||||
}
|
||||
|
||||
desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst})
|
||||
|
|
|
@ -302,7 +302,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor
|
|||
// get a hash, then the underlying file is deleted, we risk moving
|
||||
// a zero-length blob into a nonzero-length blob location. To
|
||||
// prevent this horrid thing, we employ the hack of only allowing
|
||||
// to this happen for the zero tarsum.
|
||||
// to this happen for the digest of an empty tar.
|
||||
if desc.Digest == digest.DigestSha256EmptyTar {
|
||||
return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{})
|
||||
}
|
||||
|
|
2
docs/storage/cache/redis/redis.go
vendored
2
docs/storage/cache/redis/redis.go
vendored
|
@ -249,7 +249,7 @@ func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx cont
|
|||
}
|
||||
|
||||
// Also set the values for the primary descriptor, if they differ by
|
||||
// algorithm (ie sha256 vs tarsum).
|
||||
// algorithm (ie sha256 vs sha512).
|
||||
if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() {
|
||||
if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil {
|
||||
return err
|
||||
|
|
|
@ -282,7 +282,7 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis
|
|||
}
|
||||
|
||||
if target != dgst {
|
||||
// Track when we are doing cross-digest domain lookups. ie, tarsum to sha256.
|
||||
// Track when we are doing cross-digest domain lookups. ie, sha512 to sha256.
|
||||
context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target)
|
||||
}
|
||||
|
||||
|
|
|
@ -396,9 +396,8 @@ type layerLinkPathSpec struct {
|
|||
func (layerLinkPathSpec) pathSpec() {}
|
||||
|
||||
// blobAlgorithmReplacer does some very simple path sanitization for user
|
||||
// input. Mostly, this is to provide some hierarchy for tarsum digests. Paths
|
||||
// should be "safe" before getting this far due to strict digest requirements
|
||||
// but we can add further path conversion here, if needed.
|
||||
// input. Paths should be "safe" before getting this far due to strict digest
|
||||
// requirements but we can add further path conversion here, if needed.
|
||||
var blobAlgorithmReplacer = strings.NewReplacer(
|
||||
"+", "/",
|
||||
".", "/",
|
||||
|
@ -468,10 +467,6 @@ func (repositoriesRootPathSpec) pathSpec() {}
|
|||
//
|
||||
// <algorithm>/<hex digest>
|
||||
//
|
||||
// Most importantly, for tarsum, the layout looks like this:
|
||||
//
|
||||
// tarsum/<version>/<digest algorithm>/<full digest>
|
||||
//
|
||||
// If multilevel is true, the first two bytes of the digest will separate
|
||||
// groups of digest folder. It will be as follows:
|
||||
//
|
||||
|
@ -494,19 +489,5 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error)
|
|||
|
||||
suffix = append(suffix, hex)
|
||||
|
||||
if tsi, err := digest.ParseTarSum(dgst.String()); err == nil {
|
||||
// We have a tarsum!
|
||||
version := tsi.Version
|
||||
if version == "" {
|
||||
version = "v0"
|
||||
}
|
||||
|
||||
prefix = []string{
|
||||
"tarsum",
|
||||
version,
|
||||
tsi.Algorithm,
|
||||
}
|
||||
}
|
||||
|
||||
return append(prefix, suffix...), nil
|
||||
}
|
||||
|
|
|
@ -2,8 +2,6 @@ package storage
|
|||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
)
|
||||
|
||||
func TestPathMapper(t *testing.T) {
|
||||
|
@ -84,25 +82,6 @@ func TestPathMapper(t *testing.T) {
|
|||
},
|
||||
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link",
|
||||
},
|
||||
{
|
||||
spec: layerLinkPathSpec{
|
||||
name: "foo/bar",
|
||||
digest: "tarsum.v1+test:abcdef",
|
||||
},
|
||||
expected: "/docker/registry/v2/repositories/foo/bar/_layers/tarsum/v1/test/abcdef/link",
|
||||
},
|
||||
{
|
||||
spec: blobDataPathSpec{
|
||||
digest: digest.Digest("tarsum.dev+sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"),
|
||||
},
|
||||
expected: "/docker/registry/v2/blobs/tarsum/dev/sha512/ab/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/data",
|
||||
},
|
||||
{
|
||||
spec: blobDataPathSpec{
|
||||
digest: digest.Digest("tarsum.v1+sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"),
|
||||
},
|
||||
expected: "/docker/registry/v2/blobs/tarsum/v1/sha256/ab/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/data",
|
||||
},
|
||||
|
||||
{
|
||||
spec: uploadDataPathSpec{
|
||||
|
|
Loading…
Reference in a new issue