fixing up tests to work with for non-tarsum future
Signed-off-by: David Lawrence <david.lawrence@docker.com> (github: endophage)
This commit is contained in:
parent
d3bc4c4b38
commit
b777e389b9
5 changed files with 61 additions and 7 deletions
|
@ -16,6 +16,8 @@ import (
|
|||
const (
|
||||
// DigestTarSumV1EmptyTar is the digest for the empty tar file.
|
||||
DigestTarSumV1EmptyTar = "tarsum.v1+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
// DigestSha256EmptyTar is the canonical sha256 digest of empty data
|
||||
DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
)
|
||||
|
||||
// Digest allows simple protection of hex formatted digest strings, prefixed
|
||||
|
|
44
digest/digester.go
Normal file
44
digest/digester.go
Normal file
|
@ -0,0 +1,44 @@
|
|||
package digest
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// Digester calculates the digest of written data. It is functionally
|
||||
// equivalent to hash.Hash but provides methods for returning the Digest type
|
||||
// rather than raw bytes.
|
||||
type Digester struct {
|
||||
alg string
|
||||
hash hash.Hash
|
||||
}
|
||||
|
||||
// NewDigester create a new Digester with the given hashing algorithm and instance
|
||||
// of that algo's hasher.
|
||||
func NewDigester(alg string, h hash.Hash) Digester {
|
||||
return Digester{
|
||||
alg: alg,
|
||||
hash: h,
|
||||
}
|
||||
}
|
||||
|
||||
// NewCanonicalDigester is a convenience function to create a new Digester with
|
||||
// out default settings.
|
||||
func NewCanonicalDigester() Digester {
|
||||
return NewDigester("sha256", sha256.New())
|
||||
}
|
||||
|
||||
// Write data to the digester. These writes cannot fail.
|
||||
func (d *Digester) Write(p []byte) (n int, err error) {
|
||||
return d.hash.Write(p)
|
||||
}
|
||||
|
||||
// Digest returns the current digest for this digester.
|
||||
func (d *Digester) Digest() Digest {
|
||||
return NewDigest(d.alg, d.hash)
|
||||
}
|
||||
|
||||
// Reset the state of the digester.
|
||||
func (d *Digester) Reset() {
|
||||
d.hash.Reset()
|
||||
}
|
|
@ -573,7 +573,9 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Diges
|
|||
|
||||
// pushLayer pushes the layer content returning the url on success.
|
||||
func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string {
|
||||
resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, body)
|
||||
digester := digest.NewCanonicalDigester()
|
||||
|
||||
resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, &digester))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error doing push layer request: %v", err)
|
||||
}
|
||||
|
@ -581,7 +583,13 @@ func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest,
|
|||
|
||||
checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated)
|
||||
|
||||
expectedLayerURL, err := ub.BuildBlobURL(name, dgst)
|
||||
if err != nil {
|
||||
t.Fatalf("error generating sha256 digest of body")
|
||||
}
|
||||
|
||||
sha256Dgst := digester.Digest()
|
||||
|
||||
expectedLayerURL, err := ub.BuildBlobURL(name, sha256Dgst)
|
||||
if err != nil {
|
||||
t.Fatalf("error building expected layer url: %v", err)
|
||||
}
|
||||
|
@ -589,7 +597,7 @@ func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest,
|
|||
checkHeaders(t, resp, http.Header{
|
||||
"Location": []string{expectedLayerURL},
|
||||
"Content-Length": []string{"0"},
|
||||
"Docker-Content-Digest": []string{dgst.String()},
|
||||
"Docker-Content-Digest": []string{sha256Dgst.String()},
|
||||
})
|
||||
|
||||
return resp.Header.Get("Location")
|
||||
|
@ -682,7 +690,7 @@ func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) {
|
|||
|
||||
for _, hv := range resp.Header[k] {
|
||||
if hv != v {
|
||||
t.Fatalf("header value not matched in response: %q != %q", hv, v)
|
||||
t.Fatalf("%v header value not matched in response: %q != %q", k, hv, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -266,12 +266,12 @@ func TestLayerUploadZeroLength(t *testing.T) {
|
|||
|
||||
io.Copy(upload, bytes.NewReader([]byte{}))
|
||||
|
||||
dgst, err := digest.FromTarArchive(bytes.NewReader([]byte{}))
|
||||
dgst, err := digest.FromReader(bytes.NewReader([]byte{}))
|
||||
if err != nil {
|
||||
t.Fatalf("error getting zero digest: %v", err)
|
||||
}
|
||||
|
||||
if dgst != digest.DigestTarSumV1EmptyTar {
|
||||
if dgst != digest.DigestSha256EmptyTar {
|
||||
// sanity check on zero digest
|
||||
t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar)
|
||||
}
|
||||
|
|
|
@ -159,7 +159,7 @@ func (luc *layerUploadController) moveLayer(dgst digest.Digest) error {
|
|||
// a zero-length blob into a nonzero-length blob location. To
|
||||
// prevent this horrid thing, we employ the hack of only allowing
|
||||
// to this happen for the zero tarsum.
|
||||
if dgst == digest.DigestTarSumV1EmptyTar {
|
||||
if dgst == digest.DigestSha256EmptyTar {
|
||||
return luc.driver.PutContent(blobPath, []byte{})
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue