diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 3a4f8736..23a0e786 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -86,7 +86,7 @@ }, { "ImportPath": "github.com/stevvooe/resumable", - "Rev": "cf61dd331ceba0ab845444fdb626b9a465704e49" + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" }, { "ImportPath": "github.com/yvasiyarov/go-metrics", diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/resume.go b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/resume.go index 514bff26..426d78ad 100644 --- a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/resume.go +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/resume.go @@ -3,6 +3,9 @@ package sha256 import ( "bytes" "encoding/gob" + + // import to ensure that our init function runs after the standard package + _ "crypto/sha256" ) // Len returns the number of bytes which have been written to the digest. diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/resume.go b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/resume.go index 165f6e20..55b433e7 100644 --- a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/resume.go +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/resume.go @@ -3,6 +3,9 @@ package sha512 import ( "bytes" "encoding/gob" + + // import to ensure that our init function runs after the standard package + _ "crypto/sha512" ) // Len returns the number of bytes which have been written to the digest. diff --git a/digest/digest.go b/digest/digest.go index 42a1fbf2..68991685 100644 --- a/digest/digest.go +++ b/digest/digest.go @@ -2,7 +2,6 @@ package digest import ( "bytes" - "crypto" "fmt" "hash" "io" @@ -19,9 +18,6 @@ const ( // DigestSha256EmptyTar is the canonical sha256 digest of empty data DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - - CanonicalAlgorithm = "sha256" - CanonicalHash = crypto.SHA256 // main digest algorithm used through distribution ) // Digest allows simple protection of hex formatted digest strings, prefixed @@ -43,7 +39,7 @@ const ( type Digest string // NewDigest returns a Digest from alg and a hash.Hash object. -func NewDigest(alg string, h hash.Hash) Digest { +func NewDigest(alg Algorithm, h hash.Hash) Digest { return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil))) } @@ -76,7 +72,7 @@ func ParseDigest(s string) (Digest, error) { // FromReader returns the most valid digest for the underlying content. func FromReader(rd io.Reader) (Digest, error) { - digester := NewCanonicalDigester() + digester := Canonical.New() if _, err := io.Copy(digester.Hash(), rd); err != nil { return "", err @@ -135,8 +131,8 @@ func (d Digest) Validate() error { return ErrDigestInvalidFormat } - switch s[:i] { - case "sha256", "sha384", "sha512": + switch Algorithm(s[:i]) { + case SHA256, SHA384, SHA512: break default: return ErrDigestUnsupported @@ -147,8 +143,8 @@ func (d Digest) Validate() error { // Algorithm returns the algorithm portion of the digest. This will panic if // the underlying digest is not in a valid format. -func (d Digest) Algorithm() string { - return string(d[:d.sepIndex()]) +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) } // Hex returns the hex digest portion of the digest. This will panic if the diff --git a/digest/digest_test.go b/digest/digest_test.go index 9e9ae356..41c8bee8 100644 --- a/digest/digest_test.go +++ b/digest/digest_test.go @@ -10,7 +10,7 @@ func TestParseDigest(t *testing.T) { for _, testcase := range []struct { input string err error - algorithm string + algorithm Algorithm hex string }{ { diff --git a/digest/digester.go b/digest/digester.go index 68d0b60b..556dd93a 100644 --- a/digest/digester.go +++ b/digest/digester.go @@ -1,6 +1,76 @@ package digest -import "hash" +import ( + "crypto" + "hash" +) + +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding + SHA384 Algorithm = "sha384" // sha384 with hex encoding + SHA512 Algorithm = "sha512" // sha512 with hex encoding + TarsumV1SHA256 Algorithm = "tarsum+v1+sha256" // supported tarsum version, verification only + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, New and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +// New returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling New. +func (a Algorithm) New() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } +} + +// Hash returns a new hash as used by the algorithm. If not available, nil is +// returned. Make sure to check Available before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + return nil + } + + return algorithms[a].New() +} + +// TODO(stevvooe): Allow resolution of verifiers using the digest type and +// this registration system. // Digester calculates the digest of written data. Writes should go directly // to the return value of Hash, while calling Digest will return the current @@ -10,24 +80,9 @@ type Digester interface { Digest() Digest } -// NewDigester create a new Digester with the given hashing algorithm and -// instance of that algo's hasher. -func NewDigester(alg string, h hash.Hash) Digester { - return &digester{ - alg: alg, - hash: h, - } -} - -// NewCanonicalDigester is a convenience function to create a new Digester with -// our default settings. -func NewCanonicalDigester() Digester { - return NewDigester(CanonicalAlgorithm, CanonicalHash.New()) -} - // digester provides a simple digester definition that embeds a hasher. type digester struct { - alg string + alg Algorithm hash hash.Hash } @@ -36,5 +91,5 @@ func (d *digester) Hash() hash.Hash { } func (d *digester) Digest() Digest { - return NewDigest(d.alg, d.Hash()) + return NewDigest(d.alg, d.hash) } diff --git a/digest/digester_resumable_test.go b/digest/digester_resumable_test.go index 114268aa..6ba21c80 100644 --- a/digest/digester_resumable_test.go +++ b/digest/digester_resumable_test.go @@ -13,9 +13,9 @@ import ( // is exposed through the digester type, which is just a hash plus a Digest // method. func TestResumableDetection(t *testing.T) { - d := NewCanonicalDigester() + d := Canonical.New() if _, ok := d.Hash().(resumable.Hash); !ok { - t.Fatalf("expected digester to implement resumable: %#v, %v", d, d.Hash()) + t.Fatalf("expected digester to implement resumable.Hash: %#v, %v", d, d.Hash()) } } diff --git a/digest/set.go b/digest/set.go index 7a9a0380..271d35db 100644 --- a/digest/set.go +++ b/digest/set.go @@ -42,17 +42,17 @@ func NewSet() *Set { // values or short values. This function does not test equality, // rather whether the second value could match against the first // value. -func checkShortMatch(alg, hex, shortAlg, shortHex string) bool { +func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { if len(hex) == len(shortHex) { if hex != shortHex { return false } - if len(shortAlg) > 0 && alg != shortAlg { + if len(shortAlg) > 0 && string(alg) != shortAlg { return false } } else if !strings.HasPrefix(hex, shortHex) { return false - } else if len(shortAlg) > 0 && alg != shortAlg { + } else if len(shortAlg) > 0 && string(alg) != shortAlg { return false } return true @@ -68,7 +68,7 @@ func (dst *Set) Lookup(d string) (Digest, error) { } var ( searchFunc func(int) bool - alg string + alg Algorithm hex string ) dgst, err := ParseDigest(d) @@ -88,13 +88,13 @@ func (dst *Set) Lookup(d string) (Digest, error) { } } idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, alg, hex) { + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { return "", ErrDigestNotFound } if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { return dst.entries[idx].digest, nil } - if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, alg, hex) { + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { return "", ErrDigestAmbiguous } @@ -172,7 +172,7 @@ func ShortCodeTable(dst *Set, length int) map[Digest]string { } type digestEntry struct { - alg string + alg Algorithm val string digest Digest } diff --git a/digest/verifiers.go b/digest/verifiers.go index 11d9d7ae..f8c75b53 100644 --- a/digest/verifiers.go +++ b/digest/verifiers.go @@ -1,8 +1,6 @@ package digest import ( - "crypto/sha256" - "crypto/sha512" "hash" "io" "io/ioutil" @@ -33,7 +31,7 @@ func NewDigestVerifier(d Digest) (Verifier, error) { switch alg { case "sha256", "sha384", "sha512": return hashVerifier{ - hash: newHash(alg), + hash: alg.Hash(), digest: d, }, nil default: @@ -95,19 +93,6 @@ func (lv *lengthVerifier) Verified() bool { return lv.expected == lv.len } -func newHash(name string) hash.Hash { - switch name { - case "sha256": - return sha256.New() - case "sha384": - return sha512.New384() - case "sha512": - return sha512.New() - default: - panic("unsupport algorithm: " + name) - } -} - type hashVerifier struct { digest Digest hash hash.Hash diff --git a/registry/client/repository.go b/registry/client/repository.go index cd93cd1a..a806aea4 100644 --- a/registry/client/repository.go +++ b/registry/client/repository.go @@ -321,7 +321,7 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut if err != nil { return distribution.Descriptor{}, err } - dgstr := digest.NewCanonicalDigester() + dgstr := digest.Canonical.New() n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) if err != nil { return distribution.Descriptor{}, err diff --git a/registry/handlers/api_test.go b/registry/handlers/api_test.go index 1a41cfb8..5132f72e 100644 --- a/registry/handlers/api_test.go +++ b/registry/handlers/api_test.go @@ -213,7 +213,7 @@ func TestBlobAPI(t *testing.T) { // Now, push just a chunk layerFile.Seek(0, 0) - canonicalDigester := digest.NewCanonicalDigester() + canonicalDigester := digest.Canonical.New() if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { t.Fatalf("error copying to digest: %v", err) } @@ -637,7 +637,7 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Diges // pushLayer pushes the layer content returning the url on success. func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string { - digester := digest.NewCanonicalDigester() + digester := digest.Canonical.New() resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) if err != nil { @@ -702,7 +702,7 @@ func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Resp uploadURL := u.String() - digester := digest.NewCanonicalDigester() + digester := digest.Canonical.New() req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash())) if err != nil { diff --git a/registry/storage/blobwriter.go b/registry/storage/blobwriter.go index 40841a5e..6a37e81d 100644 --- a/registry/storage/blobwriter.go +++ b/registry/storage/blobwriter.go @@ -1,6 +1,7 @@ package storage import ( + "errors" "fmt" "io" "time" @@ -13,7 +14,7 @@ import ( ) var ( - errResumableDigestNotAvailable = fmt.Errorf("resumable digest not available") + errResumableDigestNotAvailable = errors.New("resumable digest not available") ) // layerWriter is used to control the various aspects of resumable @@ -197,7 +198,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri // the same, we don't need to read the data from the backend. This is // because we've written the entire file in the lifecycle of the // current instance. - if bw.written == bw.size && digest.CanonicalAlgorithm == desc.Digest.Algorithm() { + if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() { canonical = bw.digester.Digest() verified = desc.Digest == canonical } @@ -206,7 +207,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri // paths. We may be able to make the size-based check a stronger // guarantee, so this may be defensive. if !verified { - digester := digest.NewCanonicalDigester() + digester := digest.Canonical.New() digestVerifier, err := digest.NewDigestVerifier(desc.Digest) if err != nil { diff --git a/registry/storage/linkedblobstore.go b/registry/storage/linkedblobstore.go index ceb53fa8..cb06e354 100644 --- a/registry/storage/linkedblobstore.go +++ b/registry/storage/linkedblobstore.go @@ -164,7 +164,7 @@ func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string blobStore: lbs, id: uuid, startedAt: startedAt, - digester: digest.NewCanonicalDigester(), + digester: digest.Canonical.New(), bufferedFileWriter: *fw, } diff --git a/registry/storage/paths.go b/registry/storage/paths.go index 9e150d3b..35debddf 100644 --- a/registry/storage/paths.go +++ b/registry/storage/paths.go @@ -262,7 +262,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { if v.list { offset = "" // Limit to the prefix for listing offsets. } - return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", v.alg, offset)...), nil + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil case repositoriesRootPathSpec: return path.Join(repoPrefix...), nil default: @@ -447,7 +447,7 @@ func (uploadStartedAtPathSpec) pathSpec() {} type uploadHashStatePathSpec struct { name string id string - alg string + alg digest.Algorithm offset int64 list bool } @@ -479,7 +479,7 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) return nil, err } - algorithm := blobAlgorithmReplacer.Replace(dgst.Algorithm()) + algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) hex := dgst.Hex() prefix := []string{algorithm}