Merge pull request #364 from ncdc/resumable-digest-optional

Use a build flag to disable resumable digests
This commit is contained in:
Stephen Day 2015-04-15 23:22:25 -07:00
commit 75983a4a7f
6 changed files with 133 additions and 61 deletions

View file

@ -2,12 +2,7 @@ package digest
import ( import (
"crypto/sha256" "crypto/sha256"
"fmt"
"hash" "hash"
"github.com/jlhawn/go-crypto" // For ResumableHash
_ "github.com/jlhawn/go-crypto/sha256" // For Resumable SHA256
_ "github.com/jlhawn/go-crypto/sha512" // For Resumable SHA384, SHA512
) )
// Digester calculates the digest of written data. It is functionally // Digester calculates the digest of written data. It is functionally
@ -38,43 +33,22 @@ func (d *Digester) Digest() Digest {
return NewDigest(d.alg, d.Hash) return NewDigest(d.alg, d.Hash)
} }
// ResumableHash is the common interface implemented by all resumable hash
// functions.
type ResumableHash interface {
// ResumableHash is a superset of hash.Hash
hash.Hash
// Len returns the number of bytes written to the Hash so far.
Len() uint64
// State returns a snapshot of the state of the Hash.
State() ([]byte, error)
// Restore resets the Hash to the given state.
Restore(state []byte) error
}
// ResumableDigester is a digester that can export its internal state and be // ResumableDigester is a digester that can export its internal state and be
// restored from saved state. // restored from saved state.
type ResumableDigester struct { type ResumableDigester interface {
alg string ResumableHash
crypto.ResumableHash Digest() Digest
}
var resumableHashAlgs = map[string]crypto.Hash{
"sha256": crypto.SHA256,
"sha384": crypto.SHA384,
"sha512": crypto.SHA512,
}
// NewResumableDigester creates a new ResumableDigester with the given hashing
// algorithm.
func NewResumableDigester(alg string) (ResumableDigester, error) {
hash, supported := resumableHashAlgs[alg]
if !supported {
return ResumableDigester{}, fmt.Errorf("unsupported resumable hash algorithm: %s", alg)
}
return ResumableDigester{
alg: alg,
ResumableHash: hash.New(),
}, nil
}
// NewCanonicalResumableDigester creates a ResumableDigester using the default
// digest algorithm.
func NewCanonicalResumableDigester() ResumableDigester {
return ResumableDigester{
alg: "sha256",
ResumableHash: crypto.SHA256.New(),
}
}
// Digest returns the current digest for this resumable digester.
func (d ResumableDigester) Digest() Digest {
return NewDigest(d.alg, d.ResumableHash)
} }

View file

@ -0,0 +1,52 @@
// +build !noresumabledigest
package digest
import (
"fmt"
"github.com/jlhawn/go-crypto"
// For ResumableHash
_ "github.com/jlhawn/go-crypto/sha256" // For Resumable SHA256
_ "github.com/jlhawn/go-crypto/sha512" // For Resumable SHA384, SHA512
)
// resumableDigester implements ResumableDigester.
type resumableDigester struct {
alg string
crypto.ResumableHash
}
var resumableHashAlgs = map[string]crypto.Hash{
"sha256": crypto.SHA256,
"sha384": crypto.SHA384,
"sha512": crypto.SHA512,
}
// NewResumableDigester creates a new ResumableDigester with the given hashing
// algorithm.
func NewResumableDigester(alg string) (ResumableDigester, error) {
hash, supported := resumableHashAlgs[alg]
if !supported {
return resumableDigester{}, fmt.Errorf("unsupported resumable hash algorithm: %s", alg)
}
return resumableDigester{
alg: alg,
ResumableHash: hash.New(),
}, nil
}
// NewCanonicalResumableDigester creates a ResumableDigester using the default
// digest algorithm.
func NewCanonicalResumableDigester() ResumableDigester {
return resumableDigester{
alg: "sha256",
ResumableHash: crypto.SHA256.New(),
}
}
// Digest returns the current digest for this resumable digester.
func (d resumableDigester) Digest() Digest {
return NewDigest(d.alg, d.ResumableHash)
}

View file

@ -138,13 +138,16 @@ func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (di
return nil, err return nil, err
} }
return &layerWriter{ lw := &layerWriter{
layerStore: ls, layerStore: ls,
uuid: uuid, uuid: uuid,
startedAt: startedAt, startedAt: startedAt,
resumableDigester: digest.NewCanonicalResumableDigester(),
bufferedFileWriter: *fw, bufferedFileWriter: *fw,
}, nil }
lw.setupResumableDigester()
return lw, nil
} }
func (ls *layerStore) path(dgst digest.Digest) (string, error) { func (ls *layerStore) path(dgst digest.Digest) (string, error) {

View file

@ -87,6 +87,10 @@ func (lw *layerWriter) Cancel() error {
} }
func (lw *layerWriter) Write(p []byte) (int, error) { func (lw *layerWriter) Write(p []byte) (int, error) {
if lw.resumableDigester == nil {
return lw.bufferedFileWriter.Write(p)
}
// Ensure that the current write offset matches how many bytes have been // Ensure that the current write offset matches how many bytes have been
// written to the digester. If not, we need to update the digest state to // written to the digester. If not, we need to update the digest state to
// match the current write position. // match the current write position.
@ -98,6 +102,10 @@ func (lw *layerWriter) Write(p []byte) (int, error) {
} }
func (lw *layerWriter) ReadFrom(r io.Reader) (n int64, err error) { func (lw *layerWriter) ReadFrom(r io.Reader) (n int64, err error) {
if lw.resumableDigester == nil {
return lw.bufferedFileWriter.ReadFrom(r)
}
// Ensure that the current write offset matches how many bytes have been // Ensure that the current write offset matches how many bytes have been
// written to the digester. If not, we need to update the digest state to // written to the digester. If not, we need to update the digest state to
// match the current write position. // match the current write position.
@ -113,9 +121,11 @@ func (lw *layerWriter) Close() error {
return lw.err return lw.err
} }
if lw.resumableDigester != nil {
if err := lw.storeHashState(); err != nil { if err := lw.storeHashState(); err != nil {
return err return err
} }
}
return lw.bufferedFileWriter.Close() return lw.bufferedFileWriter.Close()
} }
@ -261,13 +271,18 @@ func (lw *layerWriter) storeHashState() error {
// validateLayer checks the layer data against the digest, returning an error // validateLayer checks the layer data against the digest, returning an error
// if it does not match. The canonical digest is returned. // if it does not match. The canonical digest is returned.
func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) { func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) {
var (
verified, fullHash bool
canonical digest.Digest
)
if lw.resumableDigester != nil {
// Restore the hasher state to the end of the upload. // Restore the hasher state to the end of the upload.
if err := lw.resumeHashAt(lw.size); err != nil { if err := lw.resumeHashAt(lw.size); err != nil {
return "", err return "", err
} }
var verified bool canonical = lw.resumableDigester.Digest()
canonical := lw.resumableDigester.Digest()
if canonical.Algorithm() == dgst.Algorithm() { if canonical.Algorithm() == dgst.Algorithm() {
// Common case: client and server prefer the same canonical digest // Common case: client and server prefer the same canonical digest
@ -277,6 +292,16 @@ func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error)
// The client wants to use a different digest algorithm. They'll just // The client wants to use a different digest algorithm. They'll just
// have to be patient and wait for us to download and re-hash the // have to be patient and wait for us to download and re-hash the
// uploaded content using that digest algorithm. // uploaded content using that digest algorithm.
fullHash = true
}
} else {
// Not using resumable digests, so we need to hash the entire layer.
fullHash = true
}
if fullHash {
digester := digest.NewCanonicalDigester()
digestVerifier, err := digest.NewDigestVerifier(dgst) digestVerifier, err := digest.NewDigestVerifier(dgst)
if err != nil { if err != nil {
return "", err return "", err
@ -288,10 +313,13 @@ func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error)
return "", err return "", err
} }
if _, err = io.Copy(digestVerifier, fr); err != nil { tr := io.TeeReader(fr, digester)
if _, err = io.Copy(digestVerifier, tr); err != nil {
return "", err return "", err
} }
canonical = digester.Digest()
verified = digestVerifier.Verified() verified = digestVerifier.Verified()
} }

View file

@ -0,0 +1,6 @@
// +build noresumabledigest
package storage
func (lw *layerWriter) setupResumableDigester() {
}

View file

@ -0,0 +1,9 @@
// +build !noresumabledigest
package storage
import "github.com/docker/distribution/digest"
func (lw *layerWriter) setupResumableDigester() {
lw.resumableDigester = digest.NewCanonicalResumableDigester()
}