2014-11-18 00:29:42 +00:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"crypto/sha256"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"testing"
|
|
|
|
|
2014-12-24 00:01:38 +00:00
|
|
|
"github.com/docker/distribution/common/testutil"
|
|
|
|
"github.com/docker/distribution/digest"
|
|
|
|
"github.com/docker/distribution/storagedriver"
|
|
|
|
"github.com/docker/distribution/storagedriver/inmemory"
|
2014-11-18 00:29:42 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// TestSimpleLayerUpload covers the layer upload process, exercising common
|
|
|
|
// error paths that might be seen during an upload.
|
|
|
|
func TestSimpleLayerUpload(t *testing.T) {
|
2014-11-19 22:39:32 +00:00
|
|
|
randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile()
|
2014-11-18 00:29:42 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating random reader: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
dgst := digest.Digest(tarSumStr)
|
|
|
|
|
2014-11-18 00:29:42 +00:00
|
|
|
uploadStore, err := newTemporaryLocalFSLayerUploadStore()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error allocating upload store: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
imageName := "foo/bar"
|
|
|
|
driver := inmemory.New()
|
|
|
|
|
|
|
|
ls := &layerStore{
|
|
|
|
driver: driver,
|
|
|
|
pathMapper: &pathMapper{
|
|
|
|
root: "/storage/testing",
|
|
|
|
version: storagePathVersion,
|
|
|
|
},
|
|
|
|
uploadStore: uploadStore,
|
|
|
|
}
|
|
|
|
|
|
|
|
h := sha256.New()
|
|
|
|
rd := io.TeeReader(randomDataReader, h)
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
layerUpload, err := ls.Upload(imageName)
|
2014-11-18 00:29:42 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error starting layer upload: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cancel the upload then restart it
|
|
|
|
if err := layerUpload.Cancel(); err != nil {
|
|
|
|
t.Fatalf("unexpected error during upload cancellation: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do a resume, get unknown upload
|
2014-11-19 22:39:32 +00:00
|
|
|
layerUpload, err = ls.Resume(layerUpload.UUID())
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != ErrLayerUploadUnknown {
|
|
|
|
t.Fatalf("unexpected error resuming upload, should be unkown: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Restart!
|
2014-11-19 22:39:32 +00:00
|
|
|
layerUpload, err = ls.Upload(imageName)
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error starting layer upload: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the size of our random tarfile
|
|
|
|
randomDataSize, err := seekerSize(randomDataReader)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error getting seeker size of random data: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
nn, err := io.Copy(layerUpload, rd)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error uploading layer data: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if nn != randomDataSize {
|
|
|
|
t.Fatalf("layer data write incomplete")
|
|
|
|
}
|
|
|
|
|
|
|
|
if layerUpload.Offset() != nn {
|
|
|
|
t.Fatalf("layerUpload not updated with correct offset: %v != %v", layerUpload.Offset(), nn)
|
|
|
|
}
|
|
|
|
layerUpload.Close()
|
|
|
|
|
|
|
|
// Do a resume, for good fun
|
2014-11-19 22:39:32 +00:00
|
|
|
layerUpload, err = ls.Resume(layerUpload.UUID())
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error resuming upload: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
sha256Digest := digest.NewDigest("sha256", h)
|
|
|
|
layer, err := layerUpload.Finish(randomDataSize, dgst)
|
2014-11-18 00:29:42 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error finishing layer upload: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// After finishing an upload, it should no longer exist.
|
2014-11-19 22:39:32 +00:00
|
|
|
if _, err := ls.Resume(layerUpload.UUID()); err != ErrLayerUploadUnknown {
|
2014-11-18 00:29:42 +00:00
|
|
|
t.Fatalf("expected layer upload to be unknown, got %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test for existence.
|
2014-11-19 22:39:32 +00:00
|
|
|
exists, err := ls.Exists(layer.Name(), layer.Digest())
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error checking for existence: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !exists {
|
|
|
|
t.Fatalf("layer should now exist")
|
|
|
|
}
|
|
|
|
|
|
|
|
h.Reset()
|
|
|
|
nn, err = io.Copy(h, layer)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error reading layer: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if nn != randomDataSize {
|
|
|
|
t.Fatalf("incorrect read length")
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
if digest.NewDigest("sha256", h) != sha256Digest {
|
|
|
|
t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest)
|
2014-11-18 00:29:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestSimpleLayerRead just creates a simple layer file and ensures that basic
|
|
|
|
// open, read, seek, read works. More specific edge cases should be covered in
|
|
|
|
// other tests.
|
|
|
|
func TestSimpleLayerRead(t *testing.T) {
|
|
|
|
imageName := "foo/bar"
|
|
|
|
driver := inmemory.New()
|
|
|
|
ls := &layerStore{
|
|
|
|
driver: driver,
|
|
|
|
pathMapper: &pathMapper{
|
|
|
|
root: "/storage/testing",
|
|
|
|
version: storagePathVersion,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile()
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating random data: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
dgst := digest.Digest(tarSumStr)
|
|
|
|
|
2014-11-18 00:29:42 +00:00
|
|
|
// Test for existence.
|
2014-11-19 22:39:32 +00:00
|
|
|
exists, err := ls.Exists(imageName, dgst)
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error checking for existence: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if exists {
|
|
|
|
t.Fatalf("layer should not exist")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to get the layer and make sure we get a not found error
|
2014-11-19 22:39:32 +00:00
|
|
|
layer, err := ls.Fetch(imageName, dgst)
|
2014-11-18 00:29:42 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("error expected fetching unknown layer")
|
|
|
|
}
|
|
|
|
|
2014-11-26 20:52:52 +00:00
|
|
|
switch err.(type) {
|
|
|
|
case ErrUnknownLayer:
|
2014-11-18 00:29:42 +00:00
|
|
|
err = nil
|
2014-11-26 20:52:52 +00:00
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected error fetching non-existent layer: %v", err)
|
2014-11-18 00:29:42 +00:00
|
|
|
}
|
2014-11-26 20:52:52 +00:00
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
randomLayerDigest, err := writeTestLayer(driver, ls.pathMapper, imageName, dgst, randomLayerReader)
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error writing test layer: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
randomLayerSize, err := seekerSize(randomLayerReader)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error getting seeker size for random layer: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
layer, err = ls.Fetch(imageName, dgst)
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer layer.Close()
|
|
|
|
|
|
|
|
// Now check the sha digest and ensure its the same
|
|
|
|
h := sha256.New()
|
|
|
|
nn, err := io.Copy(h, layer)
|
|
|
|
if err != nil && err != io.EOF {
|
|
|
|
t.Fatalf("unexpected error copying to hash: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if nn != randomLayerSize {
|
|
|
|
t.Fatalf("stored incorrect number of bytes in layer: %d != %d", nn, randomLayerSize)
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
sha256Digest := digest.NewDigest("sha256", h)
|
|
|
|
if sha256Digest != randomLayerDigest {
|
|
|
|
t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, randomLayerDigest)
|
2014-11-18 00:29:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now seek back the layer, read the whole thing and check against randomLayerData
|
|
|
|
offset, err := layer.Seek(0, os.SEEK_SET)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error seeking layer: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if offset != 0 {
|
|
|
|
t.Fatalf("seek failed: expected 0 offset, got %d", offset)
|
|
|
|
}
|
|
|
|
|
|
|
|
p, err := ioutil.ReadAll(layer)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error reading all of layer: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(p) != int(randomLayerSize) {
|
|
|
|
t.Fatalf("layer data read has different length: %v != %v", len(p), randomLayerSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reset the randomLayerReader and read back the buffer
|
|
|
|
_, err = randomLayerReader.Seek(0, os.SEEK_SET)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error resetting layer reader: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
randomLayerData, err := ioutil.ReadAll(randomLayerReader)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("random layer read failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(p, randomLayerData) {
|
|
|
|
t.Fatalf("layer data not equal")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// writeRandomLayer creates a random layer under name and tarSum using driver
|
|
|
|
// and pathMapper. An io.ReadSeeker with the data is returned, along with the
|
|
|
|
// sha256 hex digest.
|
2014-11-19 22:39:32 +00:00
|
|
|
func writeRandomLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string) (rs io.ReadSeeker, tarSum digest.Digest, sha256digest digest.Digest, err error) {
|
|
|
|
reader, tarSumStr, err := testutil.CreateRandomTarFile()
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, "", "", err
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
tarSum = digest.Digest(tarSumStr)
|
|
|
|
|
2014-11-18 00:29:42 +00:00
|
|
|
// Now, actually create the layer.
|
|
|
|
randomLayerDigest, err := writeTestLayer(driver, pathMapper, name, tarSum, ioutil.NopCloser(reader))
|
|
|
|
|
|
|
|
if _, err := reader.Seek(0, os.SEEK_SET); err != nil {
|
|
|
|
return nil, "", "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return reader, tarSum, randomLayerDigest, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// seekerSize seeks to the end of seeker, checks the size and returns it to
|
|
|
|
// the original state, returning the size. The state of the seeker should be
|
|
|
|
// treated as unknown if an error is returned.
|
|
|
|
func seekerSize(seeker io.ReadSeeker) (int64, error) {
|
|
|
|
current, err := seeker.Seek(0, os.SEEK_CUR)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
end, err := seeker.Seek(0, os.SEEK_END)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resumed, err := seeker.Seek(current, os.SEEK_SET)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if resumed != current {
|
|
|
|
return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location")
|
|
|
|
}
|
|
|
|
|
|
|
|
return end, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createTestLayer creates a simple test layer in the provided driver under
|
2014-11-19 22:39:32 +00:00
|
|
|
// tarsum dgst, returning the sha256 digest location. This is implemented
|
|
|
|
// peicemeal and should probably be replaced by the uploader when it's ready.
|
|
|
|
func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) {
|
2014-11-18 00:29:42 +00:00
|
|
|
h := sha256.New()
|
|
|
|
rd := io.TeeReader(content, h)
|
|
|
|
|
|
|
|
p, err := ioutil.ReadAll(rd)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
blobDigestSHA := digest.NewDigest("sha256", h)
|
2014-11-18 00:29:42 +00:00
|
|
|
|
|
|
|
blobPath, err := pathMapper.path(blobPathSpec{
|
2014-11-25 00:21:02 +00:00
|
|
|
digest: dgst,
|
2014-11-18 00:29:42 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
if err := driver.PutContent(blobPath, p); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
layerLinkPath, err := pathMapper.path(layerLinkPathSpec{
|
|
|
|
name: name,
|
2014-11-19 22:39:32 +00:00
|
|
|
digest: dgst,
|
2014-11-18 00:29:42 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2014-11-25 00:21:02 +00:00
|
|
|
if err := driver.PutContent(layerLinkPath, []byte(dgst)); err != nil {
|
2014-11-18 00:29:42 +00:00
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
return blobDigestSHA, err
|
2014-11-18 00:29:42 +00:00
|
|
|
}
|