Uses new digest package instead of string digests

Also drops extraneous test package and uses testutil instead
This commit is contained in:
Brian Bland 2014-11-19 18:52:09 -08:00
parent 1336ced030
commit 64c8bd29cc
6 changed files with 54 additions and 48 deletions

View file

@ -11,6 +11,7 @@ import (
"strconv" "strconv"
"github.com/docker/docker-registry" "github.com/docker/docker-registry"
"github.com/docker/docker-registry/digest"
) )
// Client implements the client interface to the registry http api // Client implements the client interface to the registry http api
@ -33,13 +34,13 @@ type Client interface {
// BlobLength returns the length of the blob stored at the given name, // BlobLength returns the length of the blob stored at the given name,
// digest pair. // digest pair.
// Returns a length value of -1 on error or if the blob does not exist. // Returns a length value of -1 on error or if the blob does not exist.
BlobLength(name, digest string) (int, error) BlobLength(name string, dgst digest.Digest) (int, error)
// GetBlob returns the blob stored at the given name, digest pair in the // GetBlob returns the blob stored at the given name, digest pair in the
// form of an io.ReadCloser with the length of this blob. // form of an io.ReadCloser with the length of this blob.
// A nonzero byteOffset can be provided to receive a partial blob beginning // A nonzero byteOffset can be provided to receive a partial blob beginning
// at the given offset. // at the given offset.
GetBlob(name, digest string, byteOffset int) (io.ReadCloser, int, error) GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error)
// InitiateBlobUpload starts a blob upload in the given repository namespace // InitiateBlobUpload starts a blob upload in the given repository namespace
// and returns a unique location url to use for other blob upload methods. // and returns a unique location url to use for other blob upload methods.
@ -50,7 +51,7 @@ type Client interface {
GetBlobUploadStatus(location string) (int, int, error) GetBlobUploadStatus(location string) (int, int, error)
// UploadBlob uploads a full blob to the registry. // UploadBlob uploads a full blob to the registry.
UploadBlob(location string, blob io.ReadCloser, length int, digest string) error UploadBlob(location string, blob io.ReadCloser, length int, dgst digest.Digest) error
// UploadBlobChunk uploads a blob chunk with a given length and startByte to // UploadBlobChunk uploads a blob chunk with a given length and startByte to
// the registry. // the registry.
@ -59,7 +60,7 @@ type Client interface {
// FinishChunkedBlobUpload completes a chunked blob upload at a given // FinishChunkedBlobUpload completes a chunked blob upload at a given
// location. // location.
FinishChunkedBlobUpload(location string, length int, digest string) error FinishChunkedBlobUpload(location string, length int, dgst digest.Digest) error
// CancelBlobUpload deletes all content at the unfinished blob upload // CancelBlobUpload deletes all content at the unfinished blob upload
// location and invalidates any future calls to this blob upload. // location and invalidates any future calls to this blob upload.
@ -222,8 +223,8 @@ func (r *clientImpl) ListImageTags(name string) ([]string, error) {
return tags.Tags, nil return tags.Tags, nil
} }
func (r *clientImpl) BlobLength(name, digest string) (int, error) { func (r *clientImpl) BlobLength(name string, dgst digest.Digest) (int, error) {
response, err := http.Head(fmt.Sprintf("%s/v2/%s/blob/%s", r.Endpoint, name, digest)) response, err := http.Head(fmt.Sprintf("%s/v2/%s/blob/%s", r.Endpoint, name, dgst))
if err != nil { if err != nil {
return -1, err return -1, err
} }
@ -254,9 +255,9 @@ func (r *clientImpl) BlobLength(name, digest string) (int, error) {
} }
} }
func (r *clientImpl) GetBlob(name, digest string, byteOffset int) (io.ReadCloser, int, error) { func (r *clientImpl) GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error) {
getRequest, err := http.NewRequest("GET", getRequest, err := http.NewRequest("GET",
fmt.Sprintf("%s/v2/%s/blob/%s", r.Endpoint, name, digest), nil) fmt.Sprintf("%s/v2/%s/blob/%s", r.Endpoint, name, dgst), nil)
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }
@ -278,7 +279,7 @@ func (r *clientImpl) GetBlob(name, digest string, byteOffset int) (io.ReadCloser
return response.Body, int(length), nil return response.Body, int(length), nil
case response.StatusCode == http.StatusNotFound: case response.StatusCode == http.StatusNotFound:
response.Body.Close() response.Body.Close()
return nil, 0, &registry.BlobNotFoundError{Name: name, Digest: digest} return nil, 0, &registry.BlobNotFoundError{Name: name, Digest: dgst}
case response.StatusCode >= 400 && response.StatusCode < 500: case response.StatusCode >= 400 && response.StatusCode < 500:
errors := new(registry.Errors) errors := new(registry.Errors)
decoder := json.NewDecoder(response.Body) decoder := json.NewDecoder(response.Body)
@ -351,7 +352,7 @@ func (r *clientImpl) GetBlobUploadStatus(location string) (int, int, error) {
} }
} }
func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, digest string) error { func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, dgst digest.Digest) error {
defer blob.Close() defer blob.Close()
putRequest, err := http.NewRequest("PUT", putRequest, err := http.NewRequest("PUT",
@ -362,7 +363,7 @@ func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int,
queryValues := url.Values{} queryValues := url.Values{}
queryValues.Set("length", fmt.Sprint(length)) queryValues.Set("length", fmt.Sprint(length))
queryValues.Set("digest", digest) queryValues.Set("digest", dgst.String())
putRequest.URL.RawQuery = queryValues.Encode() putRequest.URL.RawQuery = queryValues.Encode()
putRequest.Header.Set("Content-Type", "application/octet-stream") putRequest.Header.Set("Content-Type", "application/octet-stream")
@ -444,7 +445,7 @@ func (r *clientImpl) UploadBlobChunk(location string, blobChunk io.ReadCloser, l
} }
} }
func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, digest string) error { func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, dgst digest.Digest) error {
putRequest, err := http.NewRequest("PUT", putRequest, err := http.NewRequest("PUT",
fmt.Sprintf("%s%s", r.Endpoint, location), nil) fmt.Sprintf("%s%s", r.Endpoint, location), nil)
if err != nil { if err != nil {
@ -453,7 +454,7 @@ func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, digest
queryValues := new(url.Values) queryValues := new(url.Values)
queryValues.Set("length", fmt.Sprint(length)) queryValues.Set("length", fmt.Sprint(length))
queryValues.Set("digest", digest) queryValues.Set("digest", dgst.String())
putRequest.URL.RawQuery = queryValues.Encode() putRequest.URL.RawQuery = queryValues.Encode()
putRequest.Header.Set("Content-Type", "application/octet-stream") putRequest.Header.Set("Content-Type", "application/octet-stream")

View file

@ -10,11 +10,12 @@ import (
"testing" "testing"
"github.com/docker/docker-registry" "github.com/docker/docker-registry"
"github.com/docker/docker-registry/test" "github.com/docker/docker-registry/common/testutil"
"github.com/docker/docker-registry/digest"
) )
type testBlob struct { type testBlob struct {
digest string digest digest.Digest
contents []byte contents []byte
} }
@ -42,7 +43,7 @@ func TestPush(t *testing.T) {
// to change at some point. // to change at some point.
uploadLocations[i] = fmt.Sprintf("/v2/%s/blob/test-uuid", name) uploadLocations[i] = fmt.Sprintf("/v2/%s/blob/test-uuid", name)
blobs[i] = registry.FSLayer{BlobSum: blob.digest} blobs[i] = registry.FSLayer{BlobSum: blob.digest}
history[i] = registry.ManifestHistory{V1Compatibility: blob.digest} history[i] = registry.ManifestHistory{V1Compatibility: blob.digest.String()}
} }
manifest := &registry.ImageManifest{ manifest := &registry.ImageManifest{
@ -55,44 +56,44 @@ func TestPush(t *testing.T) {
} }
manifestBytes, err := json.Marshal(manifest) manifestBytes, err := json.Marshal(manifest)
blobRequestResponseMappings := make([]test.RequestResponseMapping, 2*len(testBlobs)) blobRequestResponseMappings := make([]testutil.RequestResponseMapping, 2*len(testBlobs))
for i, blob := range testBlobs { for i, blob := range testBlobs {
blobRequestResponseMappings[2*i] = test.RequestResponseMapping{ blobRequestResponseMappings[2*i] = testutil.RequestResponseMapping{
Request: test.Request{ Request: testutil.Request{
Method: "POST", Method: "POST",
Route: "/v2/" + name + "/blob/upload/", Route: "/v2/" + name + "/blob/upload/",
}, },
Response: test.Response{ Response: testutil.Response{
StatusCode: http.StatusAccepted, StatusCode: http.StatusAccepted,
Headers: http.Header(map[string][]string{ Headers: http.Header(map[string][]string{
"Location": {uploadLocations[i]}, "Location": {uploadLocations[i]},
}), }),
}, },
} }
blobRequestResponseMappings[2*i+1] = test.RequestResponseMapping{ blobRequestResponseMappings[2*i+1] = testutil.RequestResponseMapping{
Request: test.Request{ Request: testutil.Request{
Method: "PUT", Method: "PUT",
Route: uploadLocations[i], Route: uploadLocations[i],
QueryParams: map[string][]string{ QueryParams: map[string][]string{
"length": {fmt.Sprint(len(blob.contents))}, "length": {fmt.Sprint(len(blob.contents))},
"digest": {blob.digest}, "digest": {blob.digest.String()},
}, },
Body: blob.contents, Body: blob.contents,
}, },
Response: test.Response{ Response: testutil.Response{
StatusCode: http.StatusCreated, StatusCode: http.StatusCreated,
}, },
} }
} }
handler := test.NewHandler(append(blobRequestResponseMappings, test.RequestResponseMap{ handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMap{
test.RequestResponseMapping{ testutil.RequestResponseMapping{
Request: test.Request{ Request: testutil.Request{
Method: "PUT", Method: "PUT",
Route: "/v2/" + name + "/manifest/" + tag, Route: "/v2/" + name + "/manifest/" + tag,
Body: manifestBytes, Body: manifestBytes,
}, },
Response: test.Response{ Response: testutil.Response{
StatusCode: http.StatusOK, StatusCode: http.StatusOK,
}, },
}, },
@ -102,7 +103,7 @@ func TestPush(t *testing.T) {
objectStore := &memoryObjectStore{ objectStore := &memoryObjectStore{
mutex: new(sync.Mutex), mutex: new(sync.Mutex),
manifestStorage: make(map[string]*registry.ImageManifest), manifestStorage: make(map[string]*registry.ImageManifest),
layerStorage: make(map[string]Layer), layerStorage: make(map[digest.Digest]Layer),
} }
for _, blob := range testBlobs { for _, blob := range testBlobs {
@ -146,7 +147,7 @@ func TestPull(t *testing.T) {
for i, blob := range testBlobs { for i, blob := range testBlobs {
blobs[i] = registry.FSLayer{BlobSum: blob.digest} blobs[i] = registry.FSLayer{BlobSum: blob.digest}
history[i] = registry.ManifestHistory{V1Compatibility: blob.digest} history[i] = registry.ManifestHistory{V1Compatibility: blob.digest.String()}
} }
manifest := &registry.ImageManifest{ manifest := &registry.ImageManifest{
@ -159,27 +160,27 @@ func TestPull(t *testing.T) {
} }
manifestBytes, err := json.Marshal(manifest) manifestBytes, err := json.Marshal(manifest)
blobRequestResponseMappings := make([]test.RequestResponseMapping, len(testBlobs)) blobRequestResponseMappings := make([]testutil.RequestResponseMapping, len(testBlobs))
for i, blob := range testBlobs { for i, blob := range testBlobs {
blobRequestResponseMappings[i] = test.RequestResponseMapping{ blobRequestResponseMappings[i] = testutil.RequestResponseMapping{
Request: test.Request{ Request: testutil.Request{
Method: "GET", Method: "GET",
Route: "/v2/" + name + "/blob/" + blob.digest, Route: "/v2/" + name + "/blob/" + blob.digest.String(),
}, },
Response: test.Response{ Response: testutil.Response{
StatusCode: http.StatusOK, StatusCode: http.StatusOK,
Body: blob.contents, Body: blob.contents,
}, },
} }
} }
handler := test.NewHandler(append(blobRequestResponseMappings, test.RequestResponseMap{ handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMap{
test.RequestResponseMapping{ testutil.RequestResponseMapping{
Request: test.Request{ Request: testutil.Request{
Method: "GET", Method: "GET",
Route: "/v2/" + name + "/manifest/" + tag, Route: "/v2/" + name + "/manifest/" + tag,
}, },
Response: test.Response{ Response: testutil.Response{
StatusCode: http.StatusOK, StatusCode: http.StatusOK,
Body: manifestBytes, Body: manifestBytes,
}, },
@ -190,7 +191,7 @@ func TestPull(t *testing.T) {
objectStore := &memoryObjectStore{ objectStore := &memoryObjectStore{
mutex: new(sync.Mutex), mutex: new(sync.Mutex),
manifestStorage: make(map[string]*registry.ImageManifest), manifestStorage: make(map[string]*registry.ImageManifest),
layerStorage: make(map[string]Layer), layerStorage: make(map[digest.Digest]Layer),
} }
err = Pull(client, objectStore, name, tag) err = Pull(client, objectStore, name, tag)

View file

@ -9,6 +9,7 @@ import (
"sync" "sync"
"github.com/docker/docker-registry" "github.com/docker/docker-registry"
"github.com/docker/docker-registry/digest"
) )
var ( var (
@ -34,7 +35,7 @@ type ObjectStore interface {
WriteManifest(name, tag string, manifest *registry.ImageManifest) error WriteManifest(name, tag string, manifest *registry.ImageManifest) error
// Layer returns a handle to a layer for reading and writing // Layer returns a handle to a layer for reading and writing
Layer(blobSum string) (Layer, error) Layer(dgst digest.Digest) (Layer, error)
} }
// Layer is a generic image layer interface. // Layer is a generic image layer interface.
@ -56,7 +57,7 @@ type Layer interface {
type memoryObjectStore struct { type memoryObjectStore struct {
mutex *sync.Mutex mutex *sync.Mutex
manifestStorage map[string]*registry.ImageManifest manifestStorage map[string]*registry.ImageManifest
layerStorage map[string]Layer layerStorage map[digest.Digest]Layer
} }
func (objStore *memoryObjectStore) Manifest(name, tag string) (*registry.ImageManifest, error) { func (objStore *memoryObjectStore) Manifest(name, tag string) (*registry.ImageManifest, error) {
@ -78,14 +79,14 @@ func (objStore *memoryObjectStore) WriteManifest(name, tag string, manifest *reg
return nil return nil
} }
func (objStore *memoryObjectStore) Layer(blobSum string) (Layer, error) { func (objStore *memoryObjectStore) Layer(dgst digest.Digest) (Layer, error) {
objStore.mutex.Lock() objStore.mutex.Lock()
defer objStore.mutex.Unlock() defer objStore.mutex.Unlock()
layer, ok := objStore.layerStorage[blobSum] layer, ok := objStore.layerStorage[dgst]
if !ok { if !ok {
layer = &memoryLayer{cond: sync.NewCond(new(sync.Mutex))} layer = &memoryLayer{cond: sync.NewCond(new(sync.Mutex))}
objStore.layerStorage[blobSum] = layer objStore.layerStorage[dgst] = layer
} }
return layer, nil return layer, nil

View file

@ -1,4 +1,4 @@
package test package testutil
import ( import (
"bytes" "bytes"

View file

@ -3,6 +3,8 @@ package registry
import ( import (
"fmt" "fmt"
"strings" "strings"
"github.com/docker/docker-registry/digest"
) )
// ErrorCode represents the error type. The errors are serialized via strings // ErrorCode represents the error type. The errors are serialized via strings
@ -228,7 +230,7 @@ func (e *ImageManifestNotFoundError) Error() string {
// layer that does not exist in the registry. // layer that does not exist in the registry.
type BlobNotFoundError struct { type BlobNotFoundError struct {
Name string Name string
Digest string Digest digest.Digest
} }
func (e *BlobNotFoundError) Error() string { func (e *BlobNotFoundError) Error() string {

View file

@ -4,6 +4,7 @@ import (
"encoding/json" "encoding/json"
"net/http" "net/http"
"github.com/docker/docker-registry/digest"
"github.com/gorilla/handlers" "github.com/gorilla/handlers"
) )
@ -52,7 +53,7 @@ func (m *ImageManifest) UnmarshalJSON(b []byte) error {
// FSLayer is a container struct for BlobSums defined in an image manifest // FSLayer is a container struct for BlobSums defined in an image manifest
type FSLayer struct { type FSLayer struct {
// BlobSum is the tarsum of the referenced filesystem image layer // BlobSum is the tarsum of the referenced filesystem image layer
BlobSum string `json:"blobSum"` BlobSum digest.Digest `json:"blobSum"`
} }
// ManifestHistory stores unstructured v1 compatibility information // ManifestHistory stores unstructured v1 compatibility information