forked from TrueCloudLab/distribution
Compare commits
63 commits
main
...
release/2.
Author | SHA1 | Date | |
---|---|---|---|
|
e57c13f3cb | ||
|
ca16795e85 | ||
|
b3685f4ea1 | ||
|
1341222284 | ||
|
874e1fdf3c | ||
|
6fc30199fe | ||
|
b5b69a053b | ||
|
9ad3ab1d4f | ||
|
b824f2ac39 | ||
|
d18399f0bf | ||
|
ab492bb962 | ||
|
05a9847f53 | ||
|
7bfb2c1e19 | ||
|
edda3edb26 | ||
|
1a181e8887 | ||
|
fb499fd607 | ||
|
71b07878ef | ||
|
a1fc110891 | ||
|
03a529171b | ||
|
4a3e107c92 | ||
|
ccef5cc0a6 | ||
|
a588f49425 | ||
|
7cae65efd0 | ||
|
4d232aaa4f | ||
|
e8675e69ad | ||
|
e38fa8bff8 | ||
|
61ce5f91ba | ||
|
c0db47e76e | ||
|
e0b3f40994 | ||
|
e04c70235a | ||
|
269286192d | ||
|
f132ff7702 | ||
|
a72fb20b85 | ||
|
84559affdc | ||
|
46e1d28070 | ||
|
1c51db293d | ||
|
92ee0fa837 | ||
|
b6def3be1a | ||
|
81c465cef0 | ||
|
b645555422 | ||
|
f3443f8f64 | ||
|
af0c2625e0 | ||
|
bccca791ad | ||
|
3020aa0fe8 | ||
|
6cb2104945 | ||
|
56b18134fa | ||
|
4686b3c0f4 | ||
|
96f1e85396 | ||
|
c06c6ba3bf | ||
|
dd0effe29a | ||
|
7ad5bf7912 | ||
|
454a57ad42 | ||
|
ef74e93987 | ||
|
d58bf9ed47 | ||
|
ecf9af9ec3 | ||
|
a1801b0ebe | ||
|
560a10e6ac | ||
|
e5bba7fef0 | ||
|
ec0f53a8f6 | ||
|
06405082cb | ||
|
d180626e0d | ||
|
a61ba68bda | ||
|
b46949acc3 |
68 changed files with 1828 additions and 411 deletions
|
@ -49,7 +49,7 @@ Then immediately submit this new file as a pull-request, in order to get early f
|
|||
|
||||
Eventually, you will have to update your proposal to accommodate the feedback you received.
|
||||
|
||||
Usually, it's not advisable to start working too much on the implementation itself before the proposal receives sufficient feedback, since it can significantly altered (or rejected).
|
||||
Usually, it's not advisable to start working too much on the implementation itself before the proposal receives sufficient feedback, since it can be significantly altered (or rejected).
|
||||
|
||||
Your implementation should then be submitted as a separate PR, that will be reviewed as well.
|
||||
|
||||
|
|
6
Godeps/Godeps.json
generated
6
Godeps/Godeps.json
generated
|
@ -12,15 +12,15 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/AdRoll/goamz/aws",
|
||||
"Rev": "d3664b76d90508cdda5a6c92042f26eab5db3103"
|
||||
"Rev": "cc210f45dcb9889c2769a274522be2bf70edfb99"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/AdRoll/goamz/cloudfront",
|
||||
"Rev": "d3664b76d90508cdda5a6c92042f26eab5db3103"
|
||||
"Rev": "cc210f45dcb9889c2769a274522be2bf70edfb99"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/AdRoll/goamz/s3",
|
||||
"Rev": "d3664b76d90508cdda5a6c92042f26eab5db3103"
|
||||
"Rev": "cc210f45dcb9889c2769a274522be2bf70edfb99"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/MSOpenTech/azure-sdk-for-go/storage",
|
||||
|
|
7
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/aws.go
generated
vendored
7
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/aws.go
generated
vendored
|
@ -62,6 +62,7 @@ type Region struct {
|
|||
SESEndpoint string
|
||||
IAMEndpoint string
|
||||
ELBEndpoint string
|
||||
KMSEndpoint string
|
||||
DynamoDBEndpoint string
|
||||
CloudWatchServicepoint ServiceInfo
|
||||
AutoScalingEndpoint string
|
||||
|
@ -83,6 +84,7 @@ var Regions = map[string]Region{
|
|||
USWest2.Name: USWest2,
|
||||
USGovWest.Name: USGovWest,
|
||||
SAEast.Name: SAEast,
|
||||
CNNorth1.Name: CNNorth1,
|
||||
}
|
||||
|
||||
// Designates a signer interface suitable for signing AWS requests, params
|
||||
|
@ -208,7 +210,10 @@ func (a *Auth) Token() string {
|
|||
return ""
|
||||
}
|
||||
if time.Since(a.expiration) >= -30*time.Second { //in an ideal world this should be zero assuming the instance is synching it's clock
|
||||
*a, _ = GetAuth("", "", "", time.Time{})
|
||||
auth, err := GetAuth("", "", "", time.Time{})
|
||||
if err == nil {
|
||||
*a = auth
|
||||
}
|
||||
}
|
||||
return a.token
|
||||
}
|
||||
|
|
34
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/regions.go
generated
vendored
34
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/regions.go
generated
vendored
|
@ -13,6 +13,7 @@ var USGovWest = Region{
|
|||
"",
|
||||
"https://iam.us-gov.amazonaws.com",
|
||||
"https://elasticloadbalancing.us-gov-west-1.amazonaws.com",
|
||||
"",
|
||||
"https://dynamodb.us-gov-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.us-gov-west-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.us-gov-west-1.amazonaws.com",
|
||||
|
@ -36,6 +37,7 @@ var USEast = Region{
|
|||
"https://email.us-east-1.amazonaws.com",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.us-east-1.amazonaws.com",
|
||||
"https://kms.us-east-1.amazonaws.com",
|
||||
"https://dynamodb.us-east-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.us-east-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.us-east-1.amazonaws.com",
|
||||
|
@ -59,6 +61,7 @@ var USWest = Region{
|
|||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.us-west-1.amazonaws.com",
|
||||
"https://kms.us-west-1.amazonaws.com",
|
||||
"https://dynamodb.us-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.us-west-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.us-west-1.amazonaws.com",
|
||||
|
@ -82,6 +85,7 @@ var USWest2 = Region{
|
|||
"https://email.us-west-2.amazonaws.com",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.us-west-2.amazonaws.com",
|
||||
"https://kms.us-west-2.amazonaws.com",
|
||||
"https://dynamodb.us-west-2.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.us-west-2.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.us-west-2.amazonaws.com",
|
||||
|
@ -105,6 +109,7 @@ var EUWest = Region{
|
|||
"https://email.eu-west-1.amazonaws.com",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.eu-west-1.amazonaws.com",
|
||||
"https://kms.eu-west-1.amazonaws.com",
|
||||
"https://dynamodb.eu-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.eu-west-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.eu-west-1.amazonaws.com",
|
||||
|
@ -128,6 +133,7 @@ var EUCentral = Region{
|
|||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.eu-central-1.amazonaws.com",
|
||||
"https://kms.eu-central-1.amazonaws.com",
|
||||
"https://dynamodb.eu-central-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.eu-central-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.eu-central-1.amazonaws.com",
|
||||
|
@ -151,6 +157,7 @@ var APSoutheast = Region{
|
|||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.ap-southeast-1.amazonaws.com",
|
||||
"https://kms.ap-southeast-1.amazonaws.com",
|
||||
"https://dynamodb.ap-southeast-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.ap-southeast-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.ap-southeast-1.amazonaws.com",
|
||||
|
@ -174,6 +181,7 @@ var APSoutheast2 = Region{
|
|||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.ap-southeast-2.amazonaws.com",
|
||||
"https://kms.ap-southeast-2.amazonaws.com",
|
||||
"https://dynamodb.ap-southeast-2.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.ap-southeast-2.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.ap-southeast-2.amazonaws.com",
|
||||
|
@ -197,6 +205,7 @@ var APNortheast = Region{
|
|||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.ap-northeast-1.amazonaws.com",
|
||||
"https://kms.ap-northeast-1.amazonaws.com",
|
||||
"https://dynamodb.ap-northeast-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.ap-northeast-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.ap-northeast-1.amazonaws.com",
|
||||
|
@ -220,6 +229,7 @@ var SAEast = Region{
|
|||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.sa-east-1.amazonaws.com",
|
||||
"https://kms.sa-east-1.amazonaws.com",
|
||||
"https://dynamodb.sa-east-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.sa-east-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.sa-east-1.amazonaws.com",
|
||||
|
@ -229,3 +239,27 @@ var SAEast = Region{
|
|||
"https://cloudformation.sa-east-1.amazonaws.com",
|
||||
"https://elasticache.sa-east-1.amazonaws.com",
|
||||
}
|
||||
|
||||
var CNNorth1 = Region{
|
||||
"cn-north-1",
|
||||
"https://ec2.cn-north-1.amazonaws.com.cn",
|
||||
"https://s3.cn-north-1.amazonaws.com.cn",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"",
|
||||
"https://sns.cn-north-1.amazonaws.com.cn",
|
||||
"https://sqs.cn-north-1.amazonaws.com.cn",
|
||||
"",
|
||||
"https://iam.cn-north-1.amazonaws.com.cn",
|
||||
"https://elasticloadbalancing.cn-north-1.amazonaws.com.cn",
|
||||
"",
|
||||
"https://dynamodb.cn-north-1.amazonaws.com.cn",
|
||||
ServiceInfo{"https://monitoring.cn-north-1.amazonaws.com.cn", V4Signature},
|
||||
"https://autoscaling.cn-north-1.amazonaws.com.cn",
|
||||
ServiceInfo{"https://rds.cn-north-1.amazonaws.com.cn", V4Signature},
|
||||
"",
|
||||
"https://sts.cn-north-1.amazonaws.com.cn",
|
||||
"",
|
||||
"",
|
||||
}
|
||||
|
|
25
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3.go
generated
vendored
25
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3.go
generated
vendored
|
@ -25,6 +25,7 @@ import (
|
|||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -70,9 +71,8 @@ type Options struct {
|
|||
ContentMD5 string
|
||||
ContentDisposition string
|
||||
Range string
|
||||
StorageClass StorageClass
|
||||
// What else?
|
||||
//// The following become headers so they are []strings rather than strings... I think
|
||||
// x-amz-storage-class []string
|
||||
}
|
||||
|
||||
type CopyOptions struct {
|
||||
|
@ -96,7 +96,7 @@ var attempts = aws.AttemptStrategy{
|
|||
|
||||
// New creates a new S3.
|
||||
func New(auth aws.Auth, region aws.Region) *S3 {
|
||||
return &S3{auth, region, 0, 0, 0, aws.V2Signature}
|
||||
return &S3{auth, region, 0, 0, aws.V2Signature, 0}
|
||||
}
|
||||
|
||||
// Bucket returns a Bucket with the given name.
|
||||
|
@ -164,6 +164,13 @@ const (
|
|||
BucketOwnerFull = ACL("bucket-owner-full-control")
|
||||
)
|
||||
|
||||
type StorageClass string
|
||||
|
||||
const (
|
||||
ReducedRedundancy = StorageClass("REDUCED_REDUNDANCY")
|
||||
StandardStorage = StorageClass("STANDARD")
|
||||
)
|
||||
|
||||
// PutBucket creates a new bucket.
|
||||
//
|
||||
// See http://goo.gl/ndjnR for details.
|
||||
|
@ -401,6 +408,10 @@ func (o Options) addHeaders(headers map[string][]string) {
|
|||
if len(o.ContentDisposition) != 0 {
|
||||
headers["Content-Disposition"] = []string{o.ContentDisposition}
|
||||
}
|
||||
if len(o.StorageClass) != 0 {
|
||||
headers["x-amz-storage-class"] = []string{string(o.StorageClass)}
|
||||
|
||||
}
|
||||
for k, v := range o.Meta {
|
||||
headers["x-amz-meta-"+k] = v
|
||||
}
|
||||
|
@ -816,8 +827,8 @@ func (b *Bucket) SignedURLWithMethod(method, path string, expires time.Time, par
|
|||
// UploadSignedURL returns a signed URL that allows anyone holding the URL
|
||||
// to upload the object at path. The signature is valid until expires.
|
||||
// contenttype is a string like image/png
|
||||
// path is the resource name in s3 terminalogy like images/ali.png [obviously exclusing the bucket name itself]
|
||||
func (b *Bucket) UploadSignedURL(path, method, content_type string, expires time.Time) string {
|
||||
// name is the resource name in s3 terminology like images/ali.png [obviously excluding the bucket name itself]
|
||||
func (b *Bucket) UploadSignedURL(name, method, content_type string, expires time.Time) string {
|
||||
expire_date := expires.Unix()
|
||||
if method != "POST" {
|
||||
method = "PUT"
|
||||
|
@ -830,7 +841,7 @@ func (b *Bucket) UploadSignedURL(path, method, content_type string, expires time
|
|||
tokenData = "x-amz-security-token:" + a.Token() + "\n"
|
||||
}
|
||||
|
||||
stringToSign := method + "\n\n" + content_type + "\n" + strconv.FormatInt(expire_date, 10) + "\n" + tokenData + "/" + b.Name + "/" + path
|
||||
stringToSign := method + "\n\n" + content_type + "\n" + strconv.FormatInt(expire_date, 10) + "\n" + tokenData + "/" + path.Join(b.Name, name)
|
||||
secretKey := a.SecretKey
|
||||
accessId := a.AccessKey
|
||||
mac := hmac.New(sha1.New, []byte(secretKey))
|
||||
|
@ -844,7 +855,7 @@ func (b *Bucket) UploadSignedURL(path, method, content_type string, expires time
|
|||
log.Println("ERROR sining url for S3 upload", err)
|
||||
return ""
|
||||
}
|
||||
signedurl.Path += path
|
||||
signedurl.Path = name
|
||||
params := url.Values{}
|
||||
params.Add("AWSAccessKeyId", accessId)
|
||||
params.Add("Expires", strconv.FormatInt(expire_date, 10))
|
||||
|
|
16
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3_test.go
generated
vendored
16
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3_test.go
generated
vendored
|
@ -230,6 +230,22 @@ func (s *S) TestPutObject(c *check.C) {
|
|||
c.Assert(req.Header["X-Amz-Acl"], check.DeepEquals, []string{"private"})
|
||||
}
|
||||
|
||||
func (s *S) TestPutObjectReducedRedundancy(c *check.C) {
|
||||
testServer.Response(200, nil, "")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
err := b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{StorageClass: s3.ReducedRedundancy})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "PUT")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/name")
|
||||
c.Assert(req.Header["Date"], check.Not(check.DeepEquals), []string{""})
|
||||
c.Assert(req.Header["Content-Type"], check.DeepEquals, []string{"content-type"})
|
||||
c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"7"})
|
||||
c.Assert(req.Header["X-Amz-Storage-Class"], check.DeepEquals, []string{"REDUCED_REDUNDANCY"})
|
||||
}
|
||||
|
||||
// PutCopy docs: http://goo.gl/mhEHtA
|
||||
func (s *S) TestPutCopy(c *check.C) {
|
||||
testServer.Response(200, nil, PutCopyResultDump)
|
||||
|
|
230
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3test/server.go
generated
vendored
230
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3test/server.go
generated
vendored
|
@ -11,6 +11,7 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -51,6 +52,10 @@ type Config struct {
|
|||
// all other regions.
|
||||
// http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html
|
||||
Send409Conflict bool
|
||||
|
||||
// Address on which to listen. By default, a random port is assigned by the
|
||||
// operating system and the server listens on localhost.
|
||||
ListenAddress string
|
||||
}
|
||||
|
||||
func (c *Config) send409Conflict() bool {
|
||||
|
@ -72,10 +77,11 @@ type Server struct {
|
|||
}
|
||||
|
||||
type bucket struct {
|
||||
name string
|
||||
acl s3.ACL
|
||||
ctime time.Time
|
||||
objects map[string]*object
|
||||
name string
|
||||
acl s3.ACL
|
||||
ctime time.Time
|
||||
objects map[string]*object
|
||||
multipartUploads map[string][]*multipartUploadPart
|
||||
}
|
||||
|
||||
type object struct {
|
||||
|
@ -86,6 +92,12 @@ type object struct {
|
|||
data []byte
|
||||
}
|
||||
|
||||
type multipartUploadPart struct {
|
||||
data []byte
|
||||
etag string
|
||||
lastModified time.Time
|
||||
}
|
||||
|
||||
// A resource encapsulates the subject of an HTTP request.
|
||||
// The resource referred to may or may not exist
|
||||
// when the request is made.
|
||||
|
@ -97,7 +109,13 @@ type resource interface {
|
|||
}
|
||||
|
||||
func NewServer(config *Config) (*Server, error) {
|
||||
l, err := net.Listen("tcp", "localhost:0")
|
||||
listenAddress := "localhost:0"
|
||||
|
||||
if config != nil && config.ListenAddress != "" {
|
||||
listenAddress = config.ListenAddress
|
||||
}
|
||||
|
||||
l, err := net.Listen("tcp", listenAddress)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot listen on localhost: %v", err)
|
||||
}
|
||||
|
@ -217,10 +235,8 @@ var unimplementedBucketResourceNames = map[string]bool{
|
|||
}
|
||||
|
||||
var unimplementedObjectResourceNames = map[string]bool{
|
||||
"uploadId": true,
|
||||
"acl": true,
|
||||
"torrent": true,
|
||||
"uploads": true,
|
||||
"acl": true,
|
||||
"torrent": true,
|
||||
}
|
||||
|
||||
var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?")
|
||||
|
@ -420,7 +436,8 @@ func (r bucketResource) put(a *action) interface{} {
|
|||
r.bucket = &bucket{
|
||||
name: r.name,
|
||||
// TODO default acl
|
||||
objects: make(map[string]*object),
|
||||
objects: make(map[string]*object),
|
||||
multipartUploads: make(map[string][]*multipartUploadPart),
|
||||
}
|
||||
a.srv.buckets[r.name] = r.bucket
|
||||
created = true
|
||||
|
@ -615,12 +632,29 @@ func (objr objectResource) put(a *action) interface{} {
|
|||
// TODO x-amz-server-side-encryption
|
||||
// TODO x-amz-storage-class
|
||||
|
||||
// TODO is this correct, or should we erase all previous metadata?
|
||||
obj := objr.object
|
||||
if obj == nil {
|
||||
obj = &object{
|
||||
name: objr.name,
|
||||
meta: make(http.Header),
|
||||
uploadId := a.req.URL.Query().Get("uploadId")
|
||||
|
||||
// Check that the upload ID is valid if this is a multipart upload
|
||||
if uploadId != "" {
|
||||
if _, ok := objr.bucket.multipartUploads[uploadId]; !ok {
|
||||
fatalf(404, "NoSuchUpload", "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.")
|
||||
}
|
||||
|
||||
partNumberStr := a.req.URL.Query().Get("partNumber")
|
||||
|
||||
if partNumberStr == "" {
|
||||
fatalf(400, "InvalidRequest", "Missing partNumber parameter")
|
||||
}
|
||||
|
||||
partNumber, err := strconv.ParseUint(partNumberStr, 10, 32)
|
||||
|
||||
if err != nil {
|
||||
fatalf(400, "InvalidRequest", "partNumber is not a number")
|
||||
}
|
||||
|
||||
// Parts are 1-indexed for multipart uploads
|
||||
if uint(partNumber)-1 != uint(len(objr.bucket.multipartUploads[uploadId])) {
|
||||
fatalf(400, "InvalidRequest", "Invalid part number")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -646,26 +680,170 @@ func (objr objectResource) put(a *action) interface{} {
|
|||
fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header")
|
||||
}
|
||||
|
||||
// PUT request has been successful - save data and metadata
|
||||
for key, values := range a.req.Header {
|
||||
key = http.CanonicalHeaderKey(key)
|
||||
if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") {
|
||||
obj.meta[key] = values
|
||||
etag := fmt.Sprintf("\"%x\"", gotHash)
|
||||
|
||||
a.w.Header().Add("ETag", etag)
|
||||
|
||||
if uploadId == "" {
|
||||
// For traditional uploads
|
||||
|
||||
// TODO is this correct, or should we erase all previous metadata?
|
||||
obj := objr.object
|
||||
if obj == nil {
|
||||
obj = &object{
|
||||
name: objr.name,
|
||||
meta: make(http.Header),
|
||||
}
|
||||
}
|
||||
|
||||
// PUT request has been successful - save data and metadata
|
||||
for key, values := range a.req.Header {
|
||||
key = http.CanonicalHeaderKey(key)
|
||||
if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") {
|
||||
obj.meta[key] = values
|
||||
}
|
||||
}
|
||||
obj.data = data
|
||||
obj.checksum = gotHash
|
||||
obj.mtime = time.Now()
|
||||
objr.bucket.objects[objr.name] = obj
|
||||
} else {
|
||||
// For multipart commit
|
||||
|
||||
parts := objr.bucket.multipartUploads[uploadId]
|
||||
part := &multipartUploadPart{
|
||||
data,
|
||||
etag,
|
||||
time.Now(),
|
||||
}
|
||||
|
||||
objr.bucket.multipartUploads[uploadId] = append(parts, part)
|
||||
}
|
||||
obj.data = data
|
||||
obj.checksum = gotHash
|
||||
obj.mtime = time.Now()
|
||||
objr.bucket.objects[objr.name] = obj
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (objr objectResource) delete(a *action) interface{} {
|
||||
delete(objr.bucket.objects, objr.name)
|
||||
uploadId := a.req.URL.Query().Get("uploadId")
|
||||
|
||||
if uploadId == "" {
|
||||
// Traditional object delete
|
||||
delete(objr.bucket.objects, objr.name)
|
||||
} else {
|
||||
// Multipart commit abort
|
||||
_, ok := objr.bucket.multipartUploads[uploadId]
|
||||
|
||||
if !ok {
|
||||
fatalf(404, "NoSuchUpload", "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.")
|
||||
}
|
||||
|
||||
delete(objr.bucket.multipartUploads, uploadId)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (objr objectResource) post(a *action) interface{} {
|
||||
// Check if we're initializing a multipart upload
|
||||
if _, ok := a.req.URL.Query()["uploads"]; ok {
|
||||
type multipartInitResponse struct {
|
||||
XMLName struct{} `xml:"InitiateMultipartUploadResult"`
|
||||
Bucket string
|
||||
Key string
|
||||
UploadId string
|
||||
}
|
||||
|
||||
uploadId := strconv.FormatInt(rand.Int63(), 16)
|
||||
|
||||
objr.bucket.multipartUploads[uploadId] = []*multipartUploadPart{}
|
||||
|
||||
return &multipartInitResponse{
|
||||
Bucket: objr.bucket.name,
|
||||
Key: objr.name,
|
||||
UploadId: uploadId,
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we're completing a multipart upload
|
||||
if uploadId := a.req.URL.Query().Get("uploadId"); uploadId != "" {
|
||||
type multipartCompleteRequestPart struct {
|
||||
XMLName struct{} `xml:"Part"`
|
||||
PartNumber uint
|
||||
ETag string
|
||||
}
|
||||
|
||||
type multipartCompleteRequest struct {
|
||||
XMLName struct{} `xml:"CompleteMultipartUpload"`
|
||||
Part []multipartCompleteRequestPart
|
||||
}
|
||||
|
||||
type multipartCompleteResponse struct {
|
||||
XMLName struct{} `xml:"CompleteMultipartUploadResult"`
|
||||
Location string
|
||||
Bucket string
|
||||
Key string
|
||||
ETag string
|
||||
}
|
||||
|
||||
parts, ok := objr.bucket.multipartUploads[uploadId]
|
||||
|
||||
if !ok {
|
||||
fatalf(404, "NoSuchUpload", "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.")
|
||||
}
|
||||
|
||||
req := &multipartCompleteRequest{}
|
||||
|
||||
if err := xml.NewDecoder(a.req.Body).Decode(req); err != nil {
|
||||
fatalf(400, "InvalidRequest", err.Error())
|
||||
}
|
||||
|
||||
if len(req.Part) != len(parts) {
|
||||
fatalf(400, "InvalidRequest", fmt.Sprintf("Number of parts does not match: expected %d, received %d", len(parts), len(req.Part)))
|
||||
}
|
||||
|
||||
sum := md5.New()
|
||||
data := &bytes.Buffer{}
|
||||
w := io.MultiWriter(sum, data)
|
||||
|
||||
for i, p := range parts {
|
||||
reqPart := req.Part[i]
|
||||
|
||||
if reqPart.PartNumber != uint(1+i) {
|
||||
fatalf(400, "InvalidRequest", "Bad part number")
|
||||
}
|
||||
|
||||
if reqPart.ETag != p.etag {
|
||||
fatalf(400, "InvalidRequest", fmt.Sprintf("Invalid etag for part %d", reqPart.PartNumber))
|
||||
}
|
||||
|
||||
w.Write(p.data)
|
||||
}
|
||||
|
||||
delete(objr.bucket.multipartUploads, uploadId)
|
||||
|
||||
obj := objr.object
|
||||
|
||||
if obj == nil {
|
||||
obj = &object{
|
||||
name: objr.name,
|
||||
meta: make(http.Header),
|
||||
}
|
||||
}
|
||||
|
||||
obj.data = data.Bytes()
|
||||
obj.checksum = sum.Sum(nil)
|
||||
obj.mtime = time.Now()
|
||||
objr.bucket.objects[objr.name] = obj
|
||||
|
||||
objectLocation := fmt.Sprintf("http://%s/%s/%s", a.srv.listener.Addr().String(), objr.bucket.name, objr.name)
|
||||
|
||||
return &multipartCompleteResponse{
|
||||
Location: objectLocation,
|
||||
Bucket: objr.bucket.name,
|
||||
Key: objr.name,
|
||||
ETag: uploadId,
|
||||
}
|
||||
}
|
||||
|
||||
fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
The Docker toolset to pack, ship, store, and deliver content.
|
||||
|
||||
This repository's main product is the Docker Registry Service 2.0 implementation
|
||||
This repository's main product is the Docker Registry 2.0 implementation
|
||||
for storing and distributing Docker images. It supersedes the [docker/docker-
|
||||
registry](https://github.com/docker/docker-registry) project with a new API
|
||||
design, focused around security and performance.
|
||||
|
@ -15,7 +15,7 @@ This repository contains the following components:
|
|||
| **libraries** | A rich set of libraries for interacting with,distribution components. Please see [godoc](http://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
|
||||
| **dist** | An _experimental_ tool to provide distribution, oriented functionality without the `docker` daemon. |
|
||||
| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
|
||||
| **documentation** | Docker's full documentation set is available at [docs.docker.com](http://docs.docker.com). This repository [contains the subset](docs/overview.md) related just to the registry. |
|
||||
| **documentation** | Docker's full documentation set is available at [docs.docker.com](http://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. |
|
||||
|
||||
### How does this integrate with Docker engine?
|
||||
|
||||
|
@ -69,7 +69,9 @@ may be the better choice.
|
|||
|
||||
## Contribute
|
||||
|
||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md).
|
||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
|
||||
issues, fixes, and patches to this project. If you are contributing code, see
|
||||
the instructions for [building a development environment](docs/building.md).
|
||||
|
||||
## Support
|
||||
|
||||
|
|
29
circle.yml
29
circle.yml
|
@ -6,8 +6,8 @@ machine:
|
|||
|
||||
post:
|
||||
# Install many go versions
|
||||
- gvm install go1.3.3 -B --name=old
|
||||
- gvm install go1.4 -B --name=stable
|
||||
# - gvm install go1.3.3 -B --name=old
|
||||
- gvm install go1.4.2 -B --name=stable
|
||||
# - gvm install tip --name=bleed
|
||||
|
||||
environment:
|
||||
|
@ -28,10 +28,10 @@ machine:
|
|||
dependencies:
|
||||
pre:
|
||||
# Copy the code to the gopath of all go versions
|
||||
- >
|
||||
gvm use old &&
|
||||
mkdir -p "$(dirname $BASE_OLD)" &&
|
||||
cp -R "$CHECKOUT" "$BASE_OLD"
|
||||
# - >
|
||||
# gvm use old &&
|
||||
# mkdir -p "$(dirname $BASE_OLD)" &&
|
||||
# cp -R "$CHECKOUT" "$BASE_OLD"
|
||||
|
||||
- >
|
||||
gvm use stable &&
|
||||
|
@ -45,8 +45,8 @@ dependencies:
|
|||
|
||||
override:
|
||||
# Install dependencies for every copied clone/go version
|
||||
- gvm use old && go get github.com/tools/godep:
|
||||
pwd: $BASE_OLD
|
||||
# - gvm use old && go get github.com/tools/godep:
|
||||
# pwd: $BASE_OLD
|
||||
|
||||
- gvm use stable && go get github.com/tools/godep:
|
||||
pwd: $BASE_STABLE
|
||||
|
@ -63,7 +63,7 @@ dependencies:
|
|||
test:
|
||||
pre:
|
||||
# Output the go versions we are going to test
|
||||
- gvm use old && go version
|
||||
# - gvm use old && go version
|
||||
- gvm use stable && go version
|
||||
# - gvm use bleed && go version
|
||||
|
||||
|
@ -81,9 +81,9 @@ test:
|
|||
|
||||
override:
|
||||
# Test every version we have (but stable)
|
||||
- gvm use old; godep go test -test.v -test.short ./...:
|
||||
timeout: 600
|
||||
pwd: $BASE_OLD
|
||||
# - gvm use old; godep go test -test.v -test.short ./...:
|
||||
# timeout: 600
|
||||
# pwd: $BASE_OLD
|
||||
|
||||
# - gvm use bleed; go test -test.v -test.short ./...:
|
||||
# timeout: 600
|
||||
|
@ -103,10 +103,11 @@ test:
|
|||
# Aggregate and report to coveralls
|
||||
- gvm use stable; go list ./... | xargs -L 1 -I{} cat "$GOPATH/src/{}/coverage.out" | grep -v "$CIRCLE_PAIN" >> ~/goverage.report:
|
||||
pwd: $BASE_STABLE
|
||||
- gvm use stable; goveralls -service circleci -coverprofile=/home/ubuntu/goverage.report -repotoken $COVERALLS_TOKEN:
|
||||
pwd: $BASE_STABLE
|
||||
# - gvm use stable; goveralls -service circleci -coverprofile=/home/ubuntu/goverage.report -repotoken $COVERALLS_TOKEN:
|
||||
# pwd: $BASE_STABLE
|
||||
|
||||
## Notes
|
||||
# Disabled coveralls reporting: build breaking sending coverage data to coveralls
|
||||
# Disabled the -race detector due to massive memory usage.
|
||||
# Do we want these as well?
|
||||
# - go get code.google.com/p/go.tools/cmd/goimports
|
||||
|
|
|
@ -9,6 +9,9 @@ storage:
|
|||
layerinfo: inmemory
|
||||
filesystem:
|
||||
rootdirectory: /tmp/registry-dev
|
||||
maintenance:
|
||||
uploadpurging:
|
||||
enabled: false
|
||||
http:
|
||||
addr: :5000
|
||||
secret: asecretforlocaldevelopment
|
||||
|
@ -39,3 +42,4 @@ notifications:
|
|||
threshold: 10
|
||||
backoff: 1s
|
||||
disabled: true
|
||||
|
|
@ -1,125 +0,0 @@
|
|||
Docker-Registry Configuration
|
||||
=============================
|
||||
|
||||
This document describes the registry configuration model and how to specify a custom configuration with a configuration file and/or environment variables.
|
||||
|
||||
Semantic-ish Versioning
|
||||
-----------------------
|
||||
|
||||
The configuration file is designed with versioning in mind, such that most upgrades will not require a change in configuration files, and such that configuration files can be "upgraded" from one version to another.
|
||||
|
||||
The version is specified as a string of the form `MajorVersion.MinorVersion`, where MajorVersion and MinorVersion are both non-negative integer values. Much like [semantic versioning](http://semver.org/), minor version increases denote inherently backwards-compatible changes, such as the addition of optional fields, whereas major version increases denote a restructuring, such as renaming fields or adding required fields. Because of the explicit version definition in the configuration file, it should be possible to parse old configuration files and port them to the current configuration version, although this is not guaranteed for all future versions.
|
||||
|
||||
File Structure (as of Version 0.1)
|
||||
------------------------------------
|
||||
|
||||
The configuration structure is defined by the `Configuration` struct in `configuration.go`, and is best described by the following two examples:
|
||||
|
||||
```yaml
|
||||
version: 0.1
|
||||
loglevel: info
|
||||
storage:
|
||||
s3:
|
||||
region: us-east-1
|
||||
bucket: my-bucket
|
||||
rootdirectory: /registry
|
||||
encrypt: true
|
||||
secure: false
|
||||
accesskey: SAMPLEACCESSKEY
|
||||
secretkey: SUPERSECRET
|
||||
host: ~
|
||||
port: ~
|
||||
auth:
|
||||
silly:
|
||||
realm: test-realm
|
||||
service: my-service
|
||||
reporting:
|
||||
bugsnag:
|
||||
apikey: mybugsnagapikey
|
||||
releasestage: development
|
||||
newrelic:
|
||||
licensekey: mynewreliclicensekey
|
||||
name: docker-distribution
|
||||
http:
|
||||
addr: 0.0.0.0:5000
|
||||
secret: mytokensecret
|
||||
```
|
||||
|
||||
```yaml
|
||||
version: 0.1
|
||||
loglevel: debug
|
||||
storage: inmemory
|
||||
```
|
||||
|
||||
### version
|
||||
The version is expected to remain a top-level field, as to allow for a consistent version check before parsing the remainder of the configuration file.
|
||||
|
||||
### loglevel
|
||||
This specifies the log level of the registry.
|
||||
|
||||
Supported values:
|
||||
* `error`
|
||||
* `warn`
|
||||
* `info`
|
||||
* `debug`
|
||||
|
||||
### storage
|
||||
This specifies the storage driver, and may be provided either as a string (only the driver type) or as a driver name with a parameters map, as seen in the first example above.
|
||||
|
||||
The parameters map will be passed into the factory constructor of the given storage driver type.
|
||||
|
||||
### auth
|
||||
This specifies the authorization method the registry will use, and is provided as an auth type with a parameters map.
|
||||
|
||||
The parameters map will be passed into the factory constructor of the given auth type.
|
||||
|
||||
### reporting
|
||||
This specifies metrics/error reporting systems which the registry will forward information about stats/errors to. There are currently two supported systems, which are documented below.
|
||||
|
||||
#### bugsnag
|
||||
Reports http errors and panics to [bugsnag](https://bugsnag.com).
|
||||
|
||||
##### apikey
|
||||
(Required for bugsnag use) Specifies the bugnsag API Key for authenticating to your account.
|
||||
|
||||
##### releasestage
|
||||
(Optional) Tracks the stage at which the registry is deployed. For example: "production", "staging", "development".
|
||||
|
||||
##### endpoint
|
||||
(Optional) Used for specifying an enterprise bugsnag endpoint other than https://bugsnag.com.
|
||||
|
||||
#### newrelic
|
||||
Reports heap, goroutine, and http stats to [NewRelic](https://newrelic.com).
|
||||
|
||||
##### licensekey
|
||||
(Required for newrelic use) Specifies the NewRelic License Key for authenticating to your account.
|
||||
|
||||
##### name
|
||||
(Optional) Specifies the component name that is displayed in the NewRelic panel.
|
||||
|
||||
### http
|
||||
This is used for HTTP transport-specific configuration options.
|
||||
|
||||
#### addr
|
||||
Specifies the bind address for the registry instance. Example: 0.0.0.0:5000
|
||||
|
||||
#### secret
|
||||
Specifies the secret key with which query-string HMAC tokens are generated.
|
||||
|
||||
### Notes
|
||||
|
||||
All keys in the configuration file **must** be provided as a string of lowercase letters and numbers only, and values must be string-like (booleans and numerical values are fine to parse as strings).
|
||||
|
||||
Environment Variables
|
||||
---------------------
|
||||
|
||||
To support the workflow of running a docker registry from a standard container without having to modify configuration files, the registry configuration also supports environment variables for overriding fields.
|
||||
|
||||
Any configuration field other than version can be replaced by providing an environment variable of the following form: `REGISTRY_<uppercase key>[_<uppercase key>]...`.
|
||||
|
||||
For example, to change the loglevel to `error`, one can provide `REGISTRY_LOGLEVEL=error`, and to change the s3 storage driver's region parameter to `us-west-1`, one can provide `REGISTRY_STORAGE_S3_LOGLEVEL=us-west-1`.
|
||||
|
||||
### Notes
|
||||
If an environment variable changes a map value into a string, such as replacing the storage driver type with `REGISTRY_STORAGE=filesystem`, then all sub-fields will be erased. As such, specifying the storage type in the environment will remove all parameters related to the old storage configuration.
|
||||
|
||||
By restricting all keys in the configuration file to lowercase letters and numbers, we can avoid any potential environment variable mapping ambiguity.
|
|
@ -188,6 +188,8 @@ func (storage Storage) Type() string {
|
|||
// Return only key in this map
|
||||
for k := range storage {
|
||||
switch k {
|
||||
case "maintenance":
|
||||
// allow configuration of maintenance
|
||||
case "cache":
|
||||
// allow configuration of caching
|
||||
default:
|
||||
|
@ -217,6 +219,8 @@ func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
types := make([]string, 0, len(storageMap))
|
||||
for k := range storageMap {
|
||||
switch k {
|
||||
case "maintenance":
|
||||
// allow for configuration of maintenance
|
||||
case "cache":
|
||||
// allow configuration of caching
|
||||
default:
|
||||
|
|
|
@ -302,7 +302,7 @@ func (irw *instrumentedResponseWriter) Flush() {
|
|||
func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "http.response" {
|
||||
return irw.ResponseWriter
|
||||
return irw
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(keyStr, "http.response.") {
|
||||
|
@ -322,9 +322,7 @@ func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
|
|||
case "written":
|
||||
return irw.written
|
||||
case "status":
|
||||
if irw.status != 0 {
|
||||
return irw.status
|
||||
}
|
||||
return irw.status
|
||||
case "contenttype":
|
||||
contentType := irw.Header().Get("Content-Type")
|
||||
if contentType != "" {
|
||||
|
|
|
@ -132,8 +132,21 @@ func TestWithResponseWriter(t *testing.T) {
|
|||
trw := testResponseWriter{}
|
||||
ctx, rw := WithResponseWriter(Background(), &trw)
|
||||
|
||||
if ctx.Value("http.response") != &trw {
|
||||
t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), &trw)
|
||||
if ctx.Value("http.response") != rw {
|
||||
t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), rw)
|
||||
}
|
||||
|
||||
grw, err := GetResponseWriter(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("error getting response writer: %v", err)
|
||||
}
|
||||
|
||||
if grw != rw {
|
||||
t.Fatalf("unexpected response writer returned: %#v != %#v", grw, rw)
|
||||
}
|
||||
|
||||
if ctx.Value("http.response.status") != 0 {
|
||||
t.Fatalf("response status should always be a number and should be zero here: %v != 0", ctx.Value("http.response.status"))
|
||||
}
|
||||
|
||||
if n, err := rw.Write(make([]byte, 1024)); err != nil {
|
||||
|
|
36
contrib/apache/README.MD
Normal file
36
contrib/apache/README.MD
Normal file
|
@ -0,0 +1,36 @@
|
|||
# Apache HTTPd sample for Registry v1, v2 and mirror
|
||||
|
||||
3 containers involved
|
||||
|
||||
* Docker Registry v1 (registry 0.9.1)
|
||||
* Docker Registry v2 (registry 2.0.0)
|
||||
* Docker Registry v1 in mirror mode
|
||||
|
||||
HTTP for mirror and HTTPS for v1 & v2
|
||||
|
||||
* http://registry.example.com proxify Docker Registry 1.0 in Mirror mode
|
||||
* https://registry.example.com proxify Docker Registry 1.0 or 2.0 in Hosting mode
|
||||
|
||||
## 3 Docker containers should be started
|
||||
|
||||
* Docker Registry 1.0 in Mirror mode : port 5001
|
||||
* Docker Registry 1.0 in Hosting mode : port 5000
|
||||
* Docker Registry 2.0 in Hosting mode : port 5002
|
||||
|
||||
### Registry v1
|
||||
|
||||
docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/docker-registry/storage/hosting-v1:/tmp -p 5000:5000 registry:0.9.1"
|
||||
|
||||
### Mirror
|
||||
|
||||
docker run -d -e SETTINGS_FLAVOR=dev -e STANDALONE=false -e MIRROR_SOURCE=https://registry-1.docker.io -e MIRROR_SOURCE_INDEX=https://index.docker.io \
|
||||
-e MIRROR_TAGS_CACHE_TTL=172800 -v /var/lib/docker-registry/storage/mirror:/tmp -p 5001:5000 registry:0.9.1"
|
||||
|
||||
### Registry v2
|
||||
|
||||
docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/axway/docker-registry/storage/hosting2-v2:/tmp -p 5002:5000 registry:2.0"
|
||||
|
||||
# For Hosting mode access
|
||||
|
||||
* users should have account (valid-user) to be able to fetch images
|
||||
* only users using account docker-deployer will be allowed to push images
|
127
contrib/apache/apache.conf
Normal file
127
contrib/apache/apache.conf
Normal file
|
@ -0,0 +1,127 @@
|
|||
#
|
||||
# Sample Apache 2.x configuration where :
|
||||
#
|
||||
|
||||
<VirtualHost *:80>
|
||||
|
||||
ServerName registry.example.com
|
||||
ServerAlias www.registry.example.com
|
||||
|
||||
ProxyRequests off
|
||||
ProxyPreserveHost on
|
||||
|
||||
# no proxy for /error/ (Apache HTTPd errors messages)
|
||||
ProxyPass /error/ !
|
||||
|
||||
ProxyPass /_ping http://localhost:5001/_ping
|
||||
ProxyPassReverse /_ping http://localhost:5001/_ping
|
||||
|
||||
ProxyPass /v1 http://localhost:5001/v1
|
||||
ProxyPassReverse /v1 http://localhost:5001/v1
|
||||
|
||||
# Logs
|
||||
ErrorLog ${APACHE_LOG_DIR}/mirror_error_log
|
||||
CustomLog ${APACHE_LOG_DIR}/mirror_access_log combined env=!dontlog
|
||||
|
||||
</VirtualHost>
|
||||
|
||||
|
||||
<VirtualHost *:443>
|
||||
|
||||
ServerName registry.example.com
|
||||
ServerAlias www.registry.example.com
|
||||
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/apache2/ssl/registry.example.com.crt
|
||||
SSLCertificateKeyFile /etc/apache2/ssl/registry.example.com.key
|
||||
|
||||
# Higher Strength SSL Ciphers
|
||||
SSLProtocol all -SSLv2 -SSLv3 -TLSv1
|
||||
SSLCipherSuite RC4-SHA:HIGH
|
||||
SSLHonorCipherOrder on
|
||||
|
||||
# Logs
|
||||
ErrorLog ${APACHE_LOG_DIR}/registry_error_ssl_log
|
||||
CustomLog ${APACHE_LOG_DIR}/registry_access_ssl_log combined env=!dontlog
|
||||
|
||||
Header always set "Docker-Distribution-Api-Version" "registry/2.0"
|
||||
Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0"
|
||||
RequestHeader set X-Forwarded-Proto "https"
|
||||
|
||||
ProxyRequests off
|
||||
ProxyPreserveHost on
|
||||
|
||||
# no proxy for /error/ (Apache HTTPd errors messages)
|
||||
ProxyPass /error/ !
|
||||
|
||||
#
|
||||
# Registry v1
|
||||
#
|
||||
|
||||
ProxyPass /v1 http://localhost:5000/v1
|
||||
ProxyPassReverse /v1 http://localhost:5000/v1
|
||||
|
||||
ProxyPass /_ping http://localhost:5000/_ping
|
||||
ProxyPassReverse /_ping http://localhost:5000/_ping
|
||||
|
||||
# Authentication require for push
|
||||
<Location /v1>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
AuthName "Registry Authentication"
|
||||
AuthType basic
|
||||
AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd"
|
||||
|
||||
# Read access to authentified users
|
||||
<Limit GET HEAD>
|
||||
Require valid-user
|
||||
</Limit>
|
||||
|
||||
# Write access to docker-deployer account only
|
||||
<Limit POST PUT DELETE>
|
||||
Require user docker-deployer
|
||||
</Limit>
|
||||
|
||||
</Location>
|
||||
|
||||
# Allow ping to run unauthenticated.
|
||||
<Location /v1/_ping>
|
||||
Satisfy any
|
||||
Allow from all
|
||||
</Location>
|
||||
|
||||
# Allow ping to run unauthenticated.
|
||||
<Location /_ping>
|
||||
Satisfy any
|
||||
Allow from all
|
||||
</Location>
|
||||
|
||||
#
|
||||
# Registry v2
|
||||
#
|
||||
|
||||
ProxyPass /v2 http://localhost:5002/v2
|
||||
ProxyPassReverse /v2 http://localhost:5002/v2
|
||||
|
||||
<Location /v2>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
AuthName "Registry Authentication"
|
||||
AuthType basic
|
||||
AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd"
|
||||
|
||||
# Read access to authentified users
|
||||
<Limit GET HEAD>
|
||||
Require valid-user
|
||||
</Limit>
|
||||
|
||||
# Write access to docker-deployer only
|
||||
<Limit POST PUT DELETE>
|
||||
Require user docker-deployer
|
||||
</Limit>
|
||||
|
||||
</Location>
|
||||
|
||||
|
||||
</VirtualHost>
|
||||
|
|
@ -26,6 +26,9 @@ server {
|
|||
return 404;
|
||||
}
|
||||
|
||||
# The docker client expects this header from the /v2/ endpoint.
|
||||
more_set_headers 'Docker-Distribution-Api-Version: registry/2.0';
|
||||
|
||||
include docker-registry-v2.conf;
|
||||
}
|
||||
|
||||
|
|
|
@ -6,10 +6,10 @@ import (
|
|||
"regexp"
|
||||
)
|
||||
|
||||
// TarSumRegexp defines a reguler expression to match tarsum identifiers.
|
||||
// TarSumRegexp defines a regular expression to match tarsum identifiers.
|
||||
var TarsumRegexp = regexp.MustCompile("tarsum(?:.[a-z0-9]+)?\\+[a-zA-Z0-9]+:[A-Fa-f0-9]+")
|
||||
|
||||
// TarsumRegexpCapturing defines a reguler expression to match tarsum identifiers with
|
||||
// TarsumRegexpCapturing defines a regular expression to match tarsum identifiers with
|
||||
// capture groups corresponding to each component.
|
||||
var TarsumRegexpCapturing = regexp.MustCompile("(tarsum)(.([a-z0-9]+))?\\+([a-zA-Z0-9]+):([A-Fa-f0-9]+)")
|
||||
|
||||
|
|
2
doc.go
2
doc.go
|
@ -2,6 +2,6 @@
|
|||
// docker distribution. The goal is to allow users to reliably package, ship
|
||||
// and store content related to docker images.
|
||||
//
|
||||
// This is currently a work in progress. More details are availalbe in the
|
||||
// This is currently a work in progress. More details are available in the
|
||||
// README.md.
|
||||
package distribution
|
||||
|
|
|
@ -7,18 +7,31 @@ COPY . /src
|
|||
# Reset the /docs dir so we can replace the theme meta with the new repo's git info
|
||||
RUN git reset --hard
|
||||
|
||||
RUN grep "VERSION =" /src/version/version.go | sed 's/.*"\(.*\)".*/\1/' > /docs/VERSION
|
||||
|
||||
|
||||
#
|
||||
# RUN git describe --match 'v[0-9]*' --dirty='.m' --always > /docs/VERSION
|
||||
# The above line causes a floating point error in our tools
|
||||
#
|
||||
RUN grep "VERSION =" /src/version/version.go | sed 's/.*"\(.*\)".*/\1/' > /docs/VERSION
|
||||
COPY docs/* /docs/sources/distribution/
|
||||
COPY docs/images/* /docs/sources/distribution/images/
|
||||
COPY docs/spec/* /docs/sources/distribution/spec/
|
||||
COPY docs/spec/auth/* /docs/sources/distribution/spec/auth/
|
||||
COPY docs/storage-drivers/* /docs/sources/distribution/storage-drivers/
|
||||
COPY docs/* /docs/sources/registry/
|
||||
COPY docs/images/* /docs/sources/registry/images/
|
||||
COPY docs/spec/* /docs/sources/registry/spec/
|
||||
COPY docs/spec/auth/* /docs/sources/registry/spec/auth/
|
||||
COPY docs/storage-drivers/* /docs/sources/registry/storage-drivers/
|
||||
COPY docs/mkdocs.yml /docs/mkdocs-distribution.yml
|
||||
|
||||
RUN sed -i.old '1s;^;no_version_dropdown: true;' \
|
||||
/docs/sources/registry/*.md \
|
||||
/docs/sources/registry/spec/*.md \
|
||||
/docs/sources/registry/spec/auth/*.md \
|
||||
/docs/sources/registry/storage-drivers/*.md
|
||||
|
||||
RUN sed -i.old -e '/^<!--GITHUB/g' -e '/^IGNORES-->/g'\
|
||||
/docs/sources/registry/*.md \
|
||||
/docs/sources/registry/spec/*.md \
|
||||
/docs/sources/registry/spec/auth/*.md \
|
||||
/docs/sources/registry/storage-drivers/*.md
|
||||
|
||||
# Then build everything together, ready for mkdocs
|
||||
RUN /docs/build.sh
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
<!--GITHUB
|
||||
page_title: Configure a Registry
|
||||
page_description: Explains how to deploy a registry service
|
||||
page_description: Explains how to deploy a registry
|
||||
page_keywords: registry, service, images, repository
|
||||
IGNORES-->
|
||||
|
||||
|
||||
# Registry Configuration Reference
|
||||
|
@ -9,6 +11,18 @@ You configure a registry server using a YAML file. This page explains the
|
|||
configuration options and the values they can take. You'll also find examples of
|
||||
middleware and development environment configurations.
|
||||
|
||||
## Overriding configuration options
|
||||
Environment variables may be used to override configuration parameters other than
|
||||
version. To override a configuration option, create an environment variable named
|
||||
REGISTRY\_variable_ where *variable* is the name of the configuration option.
|
||||
|
||||
e.g
|
||||
```
|
||||
REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/tmp/registry/test
|
||||
```
|
||||
|
||||
will set the storage root directory to `/tmp/registry/test`
|
||||
|
||||
## List of configuration options
|
||||
|
||||
This section lists all the registry configuration options. Some options in
|
||||
|
@ -43,6 +57,12 @@ storage:
|
|||
rootdirectory: /s3/object/name/prefix
|
||||
cache:
|
||||
layerinfo: inmemory
|
||||
maintenance:
|
||||
uploadpurging:
|
||||
enabled: true
|
||||
age: 168h
|
||||
interval: 24h
|
||||
dryrun: false
|
||||
auth:
|
||||
silly:
|
||||
realm: silly-realm
|
||||
|
@ -116,6 +136,40 @@ options marked as **required**. This indicates that you can omit the parent with
|
|||
all its children. However, if the parent is included, you must also include all
|
||||
the children marked **required**.
|
||||
|
||||
## Override configuration options
|
||||
|
||||
You can use environment variables to override most configuration parameters. The
|
||||
exception is the `version` variable which cannot be overridden. You can set
|
||||
environment variables on the command line using the `-e` flag on `docker run` or
|
||||
from within a Dockerfile using the `ENV` instruction.
|
||||
|
||||
To override a configuration option, create an environment variable named
|
||||
`REGISTRY\variable_` where *`variable`* is the name of the configuration option
|
||||
and the `_` (underscore) represents indention levels. For example, you can
|
||||
configure the `rootdirectory` of the `filesystem` storage backend:
|
||||
|
||||
```
|
||||
storage:
|
||||
filesystem:
|
||||
rootdirectory: /tmp/registry
|
||||
```
|
||||
|
||||
To override this value, set an environment variable like this:
|
||||
|
||||
```
|
||||
REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/tmp/registry/test
|
||||
```
|
||||
|
||||
This variable overrides the `/tmp/registry` value to the `/tmp/registry/test`
|
||||
directory.
|
||||
|
||||
>**Note**: If an environment variable changes a map value into a string, such
|
||||
>as replacing the storage driver type with `REGISTRY_STORAGE=filesystem`, then
|
||||
>all sub-fields will be erased. As such, specifying the storage type in the
|
||||
>environment will remove all parameters related to the old storage
|
||||
>configuration.
|
||||
|
||||
|
||||
## version
|
||||
|
||||
```yaml
|
||||
|
@ -221,6 +275,12 @@ storage:
|
|||
rootdirectory: /s3/object/name/prefix
|
||||
cache:
|
||||
layerinfo: inmemory
|
||||
maintenance:
|
||||
uploadpurging:
|
||||
enabled: true
|
||||
age: 168h
|
||||
interval: 24h
|
||||
dryrun: false
|
||||
```
|
||||
|
||||
The storage option is **required** and defines which storage backend is in use.
|
||||
|
@ -410,6 +470,27 @@ This storage backend uses Amazon's Simple Storage Service (S3).
|
|||
</tr>
|
||||
</table>
|
||||
|
||||
### Maintenance
|
||||
|
||||
Currently the registry can perform one maintenance function: upload purging. This and future
|
||||
maintenance functions which are related to storage can be configured under the maintenance section.
|
||||
|
||||
### Upload Purging
|
||||
|
||||
Upload purging is a background process that periodically removes orphaned files from the upload
|
||||
directories of the registry. Upload purging is enabled by default. To
|
||||
configure upload directory purging, the following parameters
|
||||
must be set.
|
||||
|
||||
|
||||
| Parameter | Required | Description
|
||||
--------- | -------- | -----------
|
||||
`enabled` | yes | Set to true to enable upload purging. Default=true. |
|
||||
`age` | yes | Upload directories which are older than this age will be deleted. Default=168h (1 week)
|
||||
`interval` | yes | The interval between upload directory purging. Default=24h.
|
||||
`dryrun` | yes | dryrun can be set to true to obtain a summary of what directories will be deleted. Default=false.
|
||||
|
||||
Note: `age` and `interval` are strings containing a number with optional fraction and a unit suffix: e.g. 45m, 2h10m, 168h (1 week).
|
||||
|
||||
## auth
|
||||
|
||||
|
@ -1140,6 +1221,7 @@ Configure the behavior of the Redis connection pool.
|
|||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
## Example: Development configuration
|
||||
|
||||
The following is a simple example you can use for local development:
|
||||
|
@ -1164,8 +1246,8 @@ The above configures the registry instance to run on port `5000`, binding to
|
|||
verbose.
|
||||
|
||||
A similar simple configuration is available at
|
||||
[config.yml](https://github.com/docker/distribution/blob/master/cmd/registry/
|
||||
config.yml). Both are generally useful for local development.
|
||||
[config.yml](https://github.com/docker/distribution/blob/master/cmd/registry/config.yml).
|
||||
Both are generally useful for local development.
|
||||
|
||||
|
||||
## Example: Middleware configuration
|
||||
|
@ -1221,6 +1303,6 @@ middleware:
|
|||
|
||||
|
||||
>**Note**: Cloudfront keys exist separately to other AWS keys. See
|
||||
>[the documentation on AWS credentials](http://docs.aws.amazon.com/AWSSecurityCredentials/1.0/
|
||||
>AboutAWSCredentials.html#KeyPairs) for more information.
|
||||
>[the documentation on AWS credentials](http://docs.aws.amazon.com/AWSSecurityCredentials/1.0/AboutAWSCredentials.html#KeyPairs)
|
||||
>for more information.
|
||||
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
page_title: Deploying a registry service
|
||||
page_description: Explains how to deploy a registry service
|
||||
<!--GITHUB
|
||||
page_title: Deploying a registry server
|
||||
page_description: Explains how to deploy a registry server
|
||||
page_keywords: registry, service, images, repository
|
||||
IGNORES-->
|
||||
|
||||
# Deploying a registry service
|
||||
|
||||
This section explains how to deploy a Docker Registry Service either privately
|
||||
# Deploying a registry server
|
||||
|
||||
This section explains how to deploy a Docker Registry either privately
|
||||
for your own company or publicly for other users. For example, your company may
|
||||
require a private registry to support your continuous integration (CI) system as
|
||||
it builds new releases or test servers. Alternatively, your company may have a
|
||||
|
@ -37,7 +40,7 @@ a local registry.
|
|||
The `run` command automatically pulls a `hello-world` image from Docker's
|
||||
official images.
|
||||
|
||||
3. Start a registry service on your localhost.
|
||||
3. Start a registry on your localhost.
|
||||
|
||||
$ docker run -p 5000:5000 registry:2.0
|
||||
|
||||
|
@ -82,7 +85,7 @@ a local registry.
|
|||
511136ea3c5a: Image already exists
|
||||
Digest: sha256:a1b13bc01783882434593119198938b9b9ef2bd32a0a246f16ac99b01383ef7a
|
||||
|
||||
7. Use the `curl` command and the Docker Registry Service API v2 to list your
|
||||
7. Use the `curl` command and the Docker Registry API v2 to list your
|
||||
image in the registry:
|
||||
|
||||
$ curl -v -X GET http://localhost:5000/v2/hello-mine/tags/list
|
||||
|
@ -104,7 +107,7 @@ a local registry.
|
|||
* Connection #0 to host localhost left intact
|
||||
|
||||
You can also get this information by entering the
|
||||
`http://52.10.125.146:5000/v2/hello-mine/tags/list` address in your browser.
|
||||
`http://localhost:5000/v2/hello-mine/tags/list` address in your browser.
|
||||
|
||||
8. Remove all the unused images from your local environment:
|
||||
|
||||
|
@ -205,6 +208,9 @@ storage:
|
|||
layerinfo: inmemory
|
||||
filesystem:
|
||||
rootdirectory: /tmp/registry-dev
|
||||
maintenance:
|
||||
uploadpurging:
|
||||
enabled: false
|
||||
http:
|
||||
addr: :5000
|
||||
secret: asecretforlocaldevelopment
|
||||
|
@ -328,7 +334,7 @@ support.
|
|||
|
||||
2. Run your new image.
|
||||
|
||||
$ docker run -p 5000:5000 registry_local:latest
|
||||
$ docker run -p 5000:5000 secure_registry:latest
|
||||
time="2015-04-12T03:06:18.616502588Z" level=info msg="endpoint local-8082 disabled, skipping" environment=development instance.id=bf33c9dc-2564-406b-97c3-6ee69dc20ec6 service=registry
|
||||
time="2015-04-12T03:06:18.617012948Z" level=info msg="endpoint local-8083 disabled, skipping" environment=development instance.id=bf33c9dc-2564-406b-97c3-6ee69dc20ec6 service=registry
|
||||
time="2015-04-12T03:06:18.617190113Z" level=info msg="using inmemory layerinfo cache" environment=development instance.id=bf33c9dc-2564-406b-97c3-6ee69dc20ec6 service=registry
|
||||
|
@ -542,11 +548,11 @@ procedure. The directory includes an example `compose` configuration.
|
|||
|
||||
4. Use `curl` to list the image in the registry.
|
||||
|
||||
$ curl -v -X GET http://localhost:32777/v2/registry1/tags/list
|
||||
$ curl -v -X GET http://localhost:32777/v2/registry_one/tags/list
|
||||
* Hostname was NOT found in DNS cache
|
||||
* Trying 127.0.0.1...
|
||||
* Connected to localhost (127.0.0.1) port 32777 (#0)
|
||||
> GET /v2/registry1/tags/list HTTP/1.1
|
||||
> GET /v2/registry_one/tags/list HTTP/1.1
|
||||
> User-Agent: curl/7.36.0
|
||||
> Host: localhost:32777
|
||||
> Accept: */*
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
# Project
|
||||
|
||||
## Contents
|
||||
- [Docker Registry Service 2.0](overview.md)
|
||||
- [Docker Registry 2.0](index.md)
|
||||
- [Architecture](architecture.md)
|
||||
- [Build the development environment](building.md)
|
||||
- [Configure a registry](configuration.md)
|
||||
- [Deploying a registry service](deploying.md)
|
||||
- [Deploying a registry server](deploying.md)
|
||||
- [Microsoft Azure storage driver](storage-drivers/azure.md)
|
||||
- [Filesystem storage driver](storage-drivers/filesystem.md)
|
||||
- [In-memory storage driver](storage-drivers/inmemory.md)
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
<!--GITHUB
|
||||
page_title: Docker Registry 2.0
|
||||
page_description: Introduces the Docker Registry
|
||||
page_keywords: registry, images, repository
|
||||
IGNORES-->
|
||||
|
||||
# Docker Registry 2.0
|
||||
|
||||
|
@ -22,7 +24,7 @@ is collection of images. Users interact with the registry by pushing images to
|
|||
or pulling images from the registry. The Docker Registry includes several
|
||||
optional features that you can configure according to your needs.
|
||||
|
||||
![](../images/registry.png)
|
||||
![](images/registry.png)
|
||||
|
||||
The architecture supports a configurable storage backend. You can store images
|
||||
on a file system or on a service such as Amazon S3 or Microsoft Azure. The
|
|
@ -10,7 +10,7 @@ One can migrate images from one version to the other by pulling images from the
|
|||
|
||||
-----
|
||||
|
||||
The Docker Registry Service 2.0 is backward compatible with images created by the earlier specification. If you are migrating a private registry to version 2.0, you should use the following process:
|
||||
The Docker Registry 2.0 is backward compatible with images created by the earlier specification. If you are migrating a private registry to version 2.0, you should use the following process:
|
||||
|
||||
1. Configure and test a 2.0 registry image in a sandbox environment.
|
||||
|
||||
|
|
|
@ -1,9 +1,13 @@
|
|||
- ['distribution/overview.md', 'Reference', 'Docker Registry Service 2.0']
|
||||
- ['distribution/deploying.md', 'Reference', '-- Deploy a registry' ]
|
||||
- ['distribution/configuration.md', 'Reference', '-- Configure a registry' ]
|
||||
- ['distribution/storagedrivers.md', 'Reference', '-- Storage driver model' ]
|
||||
- ['distribution/notifications.md', 'Reference', '-- Work with notifications' ]
|
||||
- ['distribution/spec/api.md', 'Reference', '-- Registry Service API v2' ]
|
||||
- ['distribution/spec/json.md', 'Reference', '-- JSON format' ]
|
||||
- ['distribution/spec/auth/token.md', 'Reference', '-- Authenticate via central service' ]
|
||||
- ['registry/index.md', 'Reference', 'Docker Registry 2.0']
|
||||
- ['registry/deploying.md', 'Reference', ' ▪ Deploy a registry' ]
|
||||
- ['registry/configuration.md', 'Reference', ' ▪ Configure a registry' ]
|
||||
- ['registry/storagedrivers.md', 'Reference', ' ▪ Storage driver model' ]
|
||||
- ['registry/notifications.md', 'Reference', ' ▪ Work with notifications' ]
|
||||
- ['registry/spec/api.md', 'Reference', ' ▪ Registry Service API v2' ]
|
||||
- ['registry/spec/json.md', 'Reference', ' ▪ JSON format' ]
|
||||
- ['registry/spec/auth/token.md', 'Reference', ' ▪ Authenticate via central service' ]
|
||||
|
||||
- ['registry/storage-drivers/azure.md', '**HIDDEN**' ]
|
||||
- ['registry/storage-drivers/filesystem.md', '**HIDDEN**' ]
|
||||
- ['registry/storage-drivers/inmemory.md', '**HIDDEN**' ]
|
||||
- ['registry/storage-drivers/s3.md','**HIDDEN**' ]
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
<!--GITHUB
|
||||
page_title: Work with Notifications
|
||||
page_description: Explains how to deploy a registry service
|
||||
page_description: Explains how to deploy a registry server
|
||||
page_keywords: registry, service, images, repository
|
||||
IGNORES-->
|
||||
|
||||
|
||||
# Notifications
|
||||
|
||||
|
|
56
docs/osx-setup-guide.md
Normal file
56
docs/osx-setup-guide.md
Normal file
|
@ -0,0 +1,56 @@
|
|||
# OS X Setup Guide
|
||||
|
||||
This guide will walk you through running the new Go based [Docker registry](https://github.com/docker/distribution) on your local OS X machine.
|
||||
|
||||
## Checkout the Docker Distribution source tree
|
||||
|
||||
```
|
||||
mkdir -p $GOPATH/src/github.com/docker
|
||||
git clone https://github.com/docker/distribution.git $GOPATH/src/github.com/docker/distribution
|
||||
cd $GOPATH/src/github.com/docker/distribution
|
||||
```
|
||||
|
||||
## Build the registry binary
|
||||
|
||||
```
|
||||
GOPATH=$(PWD)/Godeps/_workspace:$GOPATH make binaries
|
||||
sudo cp bin/registry /usr/local/libexec/registry
|
||||
```
|
||||
|
||||
## Setup
|
||||
|
||||
Copy the registry configuration file in place:
|
||||
|
||||
```
|
||||
mkdir /Users/Shared/Registry
|
||||
cp docs/osx/config.yml /Users/Shared/Registry/config.yml
|
||||
```
|
||||
|
||||
## Running the Docker Registry under launchd
|
||||
|
||||
Copy the Docker registry plist into place:
|
||||
|
||||
```
|
||||
plutil -lint docs/osx/com.docker.registry.plist
|
||||
cp docs/osx/com.docker.registry.plist ~/Library/LaunchAgents/
|
||||
chmod 644 ~/Library/LaunchAgents/com.docker.registry.plist
|
||||
```
|
||||
|
||||
Start the Docker registry:
|
||||
|
||||
```
|
||||
launchctl load ~/Library/LaunchAgents/com.docker.registry.plist
|
||||
```
|
||||
|
||||
### Restarting the docker registry service
|
||||
|
||||
```
|
||||
launchctl stop com.docker.registry
|
||||
launchctl start com.docker.registry
|
||||
```
|
||||
|
||||
### Unloading the docker registry service
|
||||
|
||||
```
|
||||
launchctl unload ~/Library/LaunchAgents/com.docker.registry.plist
|
||||
```
|
42
docs/osx/com.docker.registry.plist
Normal file
42
docs/osx/com.docker.registry.plist
Normal file
|
@ -0,0 +1,42 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>Label</key>
|
||||
<string>com.docker.registry</string>
|
||||
<key>KeepAlive</key>
|
||||
<true/>
|
||||
<key>StandardErrorPath</key>
|
||||
<string>/Users/Shared/Registry/registry.log</string>
|
||||
<key>StandardOutPath</key>
|
||||
<string>/Users/Shared/Registry/registry.log</string>
|
||||
<key>Program</key>
|
||||
<string>/usr/local/libexec/registry</string>
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>/usr/local/libexec/registry</string>
|
||||
<string>/Users/Shared/Registry/config.yml</string>
|
||||
</array>
|
||||
<key>Sockets</key>
|
||||
<dict>
|
||||
<key>http-listen-address</key>
|
||||
<dict>
|
||||
<key>SockServiceName</key>
|
||||
<string>5000</string>
|
||||
<key>SockType</key>
|
||||
<string>dgram</string>
|
||||
<key>SockFamily</key>
|
||||
<string>IPv4</string>
|
||||
</dict>
|
||||
<key>http-debug-address</key>
|
||||
<dict>
|
||||
<key>SockServiceName</key>
|
||||
<string>5001</string>
|
||||
<key>SockType</key>
|
||||
<string>dgram</string>
|
||||
<key>SockFamily</key>
|
||||
<string>IPv4</string>
|
||||
</dict>
|
||||
</dict>
|
||||
</dict>
|
||||
</plist>
|
16
docs/osx/config.yml
Normal file
16
docs/osx/config.yml
Normal file
|
@ -0,0 +1,16 @@
|
|||
version: 0.1
|
||||
log:
|
||||
level: info
|
||||
fields:
|
||||
service: registry
|
||||
environment: macbook-air
|
||||
storage:
|
||||
cache:
|
||||
layerinfo: inmemory
|
||||
filesystem:
|
||||
rootdirectory: /Users/Shared/Registry
|
||||
http:
|
||||
addr: 0.0.0.0:5000
|
||||
secret: mytokensecret
|
||||
debug:
|
||||
addr: localhost:5001
|
218
docs/spec/api.md
218
docs/spec/api.md
|
@ -1,3 +1,9 @@
|
|||
<!--GITHUB
|
||||
page_title: Docker Registry HTTP API V2
|
||||
page_description: Explains how to use registry API
|
||||
page_keywords: registry, service, driver, images, storage, api
|
||||
IGNORES-->
|
||||
|
||||
# Docker Registry HTTP API V2
|
||||
|
||||
## Introduction
|
||||
|
@ -111,12 +117,24 @@ specification to correspond with the versions enumerated here.
|
|||
|
||||
<dl>
|
||||
<dt>2.0.1</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>Added capability of doing streaming upload to PATCH blob upload.</li>
|
||||
<li>Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.</li>
|
||||
<li>Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>2.0.0</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>Added support for immutable manifest references in manifest endpoints.</li>
|
||||
<li>Deleting a manifest by tag has been deprecated.</li>
|
||||
<li>Specified `Docker-Content-Digest` header for appropriate entities.</li>
|
||||
<li>Added error code for unsupported operations.</li>
|
||||
<li>Added capability of doing streaming upload to PATCH blob upload.</li>
|
||||
<li>Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.</li>
|
||||
<li>Removed 416 return code from PUT blob upload.</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
|
@ -218,6 +236,11 @@ If `404 Not Found` response status, or other unexpected status, is returned,
|
|||
the client should proceed with the assumption that the registry does not
|
||||
implement V2 of the API.
|
||||
|
||||
When a `200 OK` or `401 Unauthorized` response is returned, the
|
||||
"Docker-Distribution-API-Version" header should be set to "registry/2.0".
|
||||
Clients may require this header value to determine if the endpoint serves this
|
||||
API. When this header is omitted, clients may fallback to an older API version.
|
||||
|
||||
### Pulling An Image
|
||||
|
||||
An "image" is a combination of a JSON manifest and individual layer files. The
|
||||
|
@ -989,7 +1012,7 @@ Content-Type: application/json; charset=utf-8
|
|||
"tag": <tag>,
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": <tarsum>
|
||||
"blobSum": "<digest>"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
@ -1120,7 +1143,7 @@ Content-Type: application/json; charset=utf-8
|
|||
"tag": <tag>,
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": <tarsum>
|
||||
"blobSum": "<digest>"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
@ -1242,7 +1265,7 @@ Content-Type: application/json; charset=utf-8
|
|||
"code": "BLOB_UNKNOWN",
|
||||
"message": "blob unknown to registry",
|
||||
"detail": {
|
||||
"digest": <tarsum>
|
||||
"digest": "<digest>"
|
||||
}
|
||||
},
|
||||
...
|
||||
|
@ -1446,7 +1469,7 @@ The error codes that may be included in the response body are enumerated below:
|
|||
|
||||
### Blob
|
||||
|
||||
Fetch the blob identified by `name` and `digest`. Used to fetch layers by tarsum digest.
|
||||
Fetch the blob identified by `name` and `digest`. Used to fetch layers by digest.
|
||||
|
||||
|
||||
|
||||
|
@ -1794,7 +1817,7 @@ Initiate a resumable blob upload. If successful, an upload location will be prov
|
|||
##### Initiate Monolithic Blob Upload
|
||||
|
||||
```
|
||||
POST /v2/<name>/blobs/uploads/?digest=<tarsum>
|
||||
POST /v2/<name>/blobs/uploads/?digest=<digest>
|
||||
Host: <registry host>
|
||||
Authorization: <scheme> <token>
|
||||
Content-Length: <length of blob>
|
||||
|
@ -2169,6 +2192,158 @@ The error codes that may be included in the response body are enumerated below:
|
|||
Upload a chunk of data for the specified upload.
|
||||
|
||||
|
||||
##### Stream upload
|
||||
|
||||
```
|
||||
PATCH /v2/<name>/blobs/uploads/<uuid>
|
||||
Host: <registry host>
|
||||
Authorization: <scheme> <token>
|
||||
Content-Type: application/octet-stream
|
||||
|
||||
<binary data>
|
||||
```
|
||||
|
||||
Upload a stream of data to upload without completing the upload.
|
||||
|
||||
|
||||
The following parameters should be specified on the request:
|
||||
|
||||
|Name|Kind|Description|
|
||||
|----|----|-----------|
|
||||
|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.|
|
||||
|`Authorization`|header|An RFC7235 compliant authorization header.|
|
||||
|`name`|path|Name of the target repository.|
|
||||
|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.|
|
||||
|
||||
|
||||
|
||||
|
||||
###### On Success: Data Accepted
|
||||
|
||||
```
|
||||
204 No Content
|
||||
Location: /v2/<name>/blobs/uploads/<uuid>
|
||||
Range: 0-<offset>
|
||||
Content-Length: 0
|
||||
Docker-Upload-UUID: <uuid>
|
||||
```
|
||||
|
||||
The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.
|
||||
|
||||
The following headers will be returned with the response:
|
||||
|
||||
|Name|Description|
|
||||
|----|-----------|
|
||||
|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.|
|
||||
|`Range`|Range indicating the current progress of the upload.|
|
||||
|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.|
|
||||
|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.|
|
||||
|
||||
|
||||
|
||||
|
||||
###### On Failure: Bad Request
|
||||
|
||||
```
|
||||
400 Bad Request
|
||||
Content-Type: application/json; charset=utf-8
|
||||
|
||||
{
|
||||
"errors:" [
|
||||
{
|
||||
"code": <error code>,
|
||||
"message": "<error message>",
|
||||
"detail": ...
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
There was an error processing the upload and it must be restarted.
|
||||
|
||||
|
||||
|
||||
The error codes that may be included in the response body are enumerated below:
|
||||
|
||||
|Code|Message|Description|
|
||||
-------|----|------|------------
|
||||
| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. |
|
||||
| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. |
|
||||
| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. |
|
||||
|
||||
|
||||
|
||||
###### On Failure: Unauthorized
|
||||
|
||||
```
|
||||
401 Unauthorized
|
||||
WWW-Authenticate: <scheme> realm="<realm>", ..."
|
||||
Content-Length: <length>
|
||||
Content-Type: application/json; charset=utf-8
|
||||
|
||||
{
|
||||
"errors:" [
|
||||
{
|
||||
"code": "UNAUTHORIZED",
|
||||
"message": "access to the requested resource is not authorized",
|
||||
"detail": ...
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The client does not have access to push to the repository.
|
||||
|
||||
The following headers will be returned on the response:
|
||||
|
||||
|Name|Description|
|
||||
|----|-----------|
|
||||
|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.|
|
||||
|`Content-Length`|Length of the JSON error response body.|
|
||||
|
||||
|
||||
|
||||
The error codes that may be included in the response body are enumerated below:
|
||||
|
||||
|Code|Message|Description|
|
||||
-------|----|------|------------
|
||||
| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. |
|
||||
|
||||
|
||||
|
||||
###### On Failure: Not Found
|
||||
|
||||
```
|
||||
404 Not Found
|
||||
Content-Type: application/json; charset=utf-8
|
||||
|
||||
{
|
||||
"errors:" [
|
||||
{
|
||||
"code": <error code>,
|
||||
"message": "<error message>",
|
||||
"detail": ...
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The upload is unknown to the registry. The upload must be restarted.
|
||||
|
||||
|
||||
|
||||
The error codes that may be included in the response body are enumerated below:
|
||||
|
||||
|Code|Message|Description|
|
||||
-------|----|------|------------
|
||||
| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. |
|
||||
|
||||
|
||||
|
||||
##### Chunked upload
|
||||
|
||||
```
|
||||
PATCH /v2/<name>/blobs/uploads/<uuid>
|
||||
|
@ -2181,7 +2356,7 @@ Content-Type: application/octet-stream
|
|||
<binary chunk>
|
||||
```
|
||||
|
||||
Upload a chunk of data to specified upload without completing the upload.
|
||||
Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.
|
||||
|
||||
|
||||
The following parameters should be specified on the request:
|
||||
|
@ -2341,17 +2516,16 @@ Complete the upload specified by `uuid`, optionally appending the body as the fi
|
|||
|
||||
|
||||
```
|
||||
PUT /v2/<name>/blobs/uploads/<uuid>?digest=<tarsum>
|
||||
PUT /v2/<name>/blobs/uploads/<uuid>?digest=<digest>
|
||||
Host: <registry host>
|
||||
Authorization: <scheme> <token>
|
||||
Content-Range: <start of range>-<end of range, inclusive>
|
||||
Content-Length: <length of chunk>
|
||||
Content-Length: <length of data>
|
||||
Content-Type: application/octet-stream
|
||||
|
||||
<binary chunk>
|
||||
<binary data>
|
||||
```
|
||||
|
||||
Complete the upload, providing the _final_ chunk of data, if necessary. This method may take a body with all the data. If the `Content-Range` header is specified, it may include the final chunk. A request without a body will just complete the upload with previously uploaded content.
|
||||
Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.
|
||||
|
||||
|
||||
The following parameters should be specified on the request:
|
||||
|
@ -2360,8 +2534,7 @@ The following parameters should be specified on the request:
|
|||
|----|----|-----------|
|
||||
|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.|
|
||||
|`Authorization`|header|An RFC7235 compliant authorization header.|
|
||||
|`Content-Range`|header|Range of bytes identifying the block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header. May be omitted if no data is provided.|
|
||||
|`Content-Length`|header|Length of the chunk being uploaded, corresponding to the length of the request body. May be zero if no data is provided.|
|
||||
|`Content-Length`|header|Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.|
|
||||
|`name`|path|Name of the target repository.|
|
||||
|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.|
|
||||
|`digest`|query|Digest of uploaded blob.|
|
||||
|
@ -2494,25 +2667,6 @@ The error codes that may be included in the response body are enumerated below:
|
|||
|
||||
|
||||
|
||||
###### On Failure: Requested Range Not Satisfiable
|
||||
|
||||
```
|
||||
416 Requested Range Not Satisfiable
|
||||
Location: /v2/<name>/blobs/uploads/<uuid>
|
||||
Range: 0-<offset>
|
||||
```
|
||||
|
||||
The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. The contents of the `Range` header may be used to resolve the condition.
|
||||
|
||||
The following headers will be returned on the response:
|
||||
|
||||
|Name|Description|
|
||||
|----|-----------|
|
||||
|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.|
|
||||
|`Range`|Range indicating the current progress of the upload.|
|
||||
|
||||
|
||||
|
||||
|
||||
#### DELETE Blob Upload
|
||||
|
||||
|
|
|
@ -1,3 +1,9 @@
|
|||
<!--GITHUB
|
||||
page_title: Docker Registry HTTP API V2
|
||||
page_description: Explains how to use registry API
|
||||
page_keywords: registry, service, driver, images, storage, api
|
||||
IGNORES-->
|
||||
|
||||
# Docker Registry HTTP API V2
|
||||
|
||||
## Introduction
|
||||
|
@ -111,12 +117,24 @@ specification to correspond with the versions enumerated here.
|
|||
|
||||
<dl>
|
||||
<dt>2.0.1</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>Added capability of doing streaming upload to PATCH blob upload.</li>
|
||||
<li>Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.</li>
|
||||
<li>Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>2.0.0</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>Added support for immutable manifest references in manifest endpoints.</li>
|
||||
<li>Deleting a manifest by tag has been deprecated.</li>
|
||||
<li>Specified `Docker-Content-Digest` header for appropriate entities.</li>
|
||||
<li>Added error code for unsupported operations.</li>
|
||||
<li>Added capability of doing streaming upload to PATCH blob upload.</li>
|
||||
<li>Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.</li>
|
||||
<li>Removed 416 return code from PUT blob upload.</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
|
@ -218,6 +236,11 @@ If `404 Not Found` response status, or other unexpected status, is returned,
|
|||
the client should proceed with the assumption that the registry does not
|
||||
implement V2 of the API.
|
||||
|
||||
When a `200 OK` or `401 Unauthorized` response is returned, the
|
||||
"Docker-Distribution-API-Version" header should be set to "registry/2.0".
|
||||
Clients may require this header value to determine if the endpoint serves this
|
||||
API. When this header is omitted, clients may fallback to an older API version.
|
||||
|
||||
### Pulling An Image
|
||||
|
||||
An "image" is a combination of a JSON manifest and individual layer files. The
|
||||
|
|
|
@ -1,3 +1,10 @@
|
|||
<!--GITHUB
|
||||
page_title: Docker Registry v2 Authentication
|
||||
page_description: Introduces the Docker Registry v2 authentication
|
||||
page_keywords: registry, images, repository, v2, authentication
|
||||
IGNORES-->
|
||||
|
||||
|
||||
# Docker Registry v2 authentication via central service
|
||||
|
||||
Today a Docker Registry can run in standalone mode in which there are no
|
||||
|
|
26
docs/spec/implementations.md
Normal file
26
docs/spec/implementations.md
Normal file
|
@ -0,0 +1,26 @@
|
|||
# Distribution API Implementations
|
||||
|
||||
This is a list of known implementations of the Distribution API spec.
|
||||
|
||||
## [Docker Distribution Registry](https://github.com/docker/distribution)
|
||||
|
||||
Docker distribution is the reference implementation of the distribution API
|
||||
specification. It aims to fully implement the entire specification.
|
||||
|
||||
### Releases
|
||||
#### 2.0.1 (_in development_)
|
||||
Implements API 2.0.1
|
||||
|
||||
_Known Issues_
|
||||
- No resumable push support
|
||||
- Content ranges ignored
|
||||
- Blob upload status will always return a starting range of 0
|
||||
|
||||
#### 2.0.0
|
||||
Implements API 2.0.0
|
||||
|
||||
_Known Issues_
|
||||
- No resumable push support
|
||||
- No PATCH implementation for blob upload
|
||||
- Content ranges ignored
|
||||
|
|
@ -1,3 +1,10 @@
|
|||
<!--GITHUB
|
||||
page_title: Docker Distribution JSON Canonicalization
|
||||
page_description: Explains registry JSON objects
|
||||
page_keywords: registry, service, images, repository, json
|
||||
IGNORES-->
|
||||
|
||||
|
||||
# Docker Distribution JSON Canonicalization
|
||||
|
||||
To provide consistent content hashing of JSON objects throughout Docker
|
||||
|
|
|
@ -121,7 +121,7 @@ by *libtrust*. A signature consists of the following fields:
|
|||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
|
||||
},
|
||||
}
|
||||
],
|
||||
"schemaVersion": 1,
|
||||
"signatures": [
|
||||
|
|
|
@ -1,5 +1,10 @@
|
|||
# Microsoft Azure storage driver
|
||||
<!--GITHUB
|
||||
page_title: Microsoft Azure storage driver
|
||||
page_description: Explains how to use the Azure storage drivers
|
||||
page_keywords: registry, service, driver, images, storage, azure
|
||||
IGNORES-->
|
||||
|
||||
# Microsoft Azure storage driver
|
||||
|
||||
An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage][azure-blob-storage] for object storage.
|
||||
|
||||
|
|
|
@ -1,3 +1,9 @@
|
|||
<!--GITHUB
|
||||
page_title: Filesystem storage driver
|
||||
page_description: Explains how to use the filesystem storage drivers
|
||||
page_keywords: registry, service, driver, images, storage, filesystem
|
||||
IGNORES-->
|
||||
|
||||
# Filesystem storage driver
|
||||
|
||||
An implementation of the `storagedriver.StorageDriver` interface which uses the local filesystem.
|
||||
|
|
|
@ -1,3 +1,9 @@
|
|||
<!--GITHUB
|
||||
page_title: In-memory storage driver
|
||||
page_description: Explains how to use the in-memory storage drivers
|
||||
page_keywords: registry, service, driver, images, storage, in-memory
|
||||
IGNORES-->
|
||||
|
||||
# In-memory storage driver
|
||||
|
||||
An implementation of the `storagedriver.StorageDriver` interface which uses local memory for object storage.
|
||||
|
|
|
@ -1,3 +1,9 @@
|
|||
<!--GITHUB
|
||||
page_title: S3 storage driver
|
||||
page_description: Explains how to use the S3 storage drivers
|
||||
page_keywords: registry, service, driver, images, storage, S3
|
||||
IGNORES-->
|
||||
|
||||
# S3 storage driver
|
||||
|
||||
An implementation of the `storagedriver.StorageDriver` interface which uses Amazon S3 for object storage.
|
||||
|
|
|
@ -1,20 +1,23 @@
|
|||
Docker-Registry Storage Driver
|
||||
==============================
|
||||
<!--GITHUB
|
||||
page_title: Docker Registry Storage Driver
|
||||
page_description: Explains how to use the storage drivers
|
||||
page_keywords: registry, service, driver, images, storage
|
||||
IGNORES-->
|
||||
|
||||
# Docker Registry Storage Driver
|
||||
|
||||
This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers.
|
||||
|
||||
Provided Drivers
|
||||
================
|
||||
## Provided Drivers
|
||||
|
||||
This storage driver package comes bundled with several drivers:
|
||||
|
||||
- [inmemory](storage-drivers/inmemory): A temporary storage driver using a local inmemory map. This exists solely for reference and testing.
|
||||
- [filesystem](storage-drivers/filesystem): A local storage driver configured to use a directory tree in the local filesystem.
|
||||
- [s3](storage-drivers/s3): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket.
|
||||
- [azure](storage-drivers/azure): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/).
|
||||
- [inmemory](storage-drivers/inmemory.md): A temporary storage driver using a local inmemory map. This exists solely for reference and testing.
|
||||
- [filesystem](storage-drivers/filesystem.md): A local storage driver configured to use a directory tree in the local filesystem.
|
||||
- [s3](storage-drivers/s3.md): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket.
|
||||
- [azure](storage-drivers/azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/).
|
||||
|
||||
Storage Driver API
|
||||
==================
|
||||
## Storage Driver API
|
||||
|
||||
The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems.
|
||||
|
||||
|
@ -22,23 +25,21 @@ Storage drivers are required to implement the `storagedriver.StorageDriver` inte
|
|||
|
||||
Storage drivers are intended (but not required) to be written in go, providing compile-time validation of the `storagedriver.StorageDriver` interface, although an IPC driver wrapper means that it is not required for drivers to be included in the compiled registry. The `storagedriver/ipc` package provides a client/server protocol for running storage drivers provided in external executables as a managed child server process.
|
||||
|
||||
Driver Selection and Configuration
|
||||
==================================
|
||||
## Driver Selection and Configuration
|
||||
|
||||
The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package.
|
||||
|
||||
Storage driver factories may be registered by name using the `factory.Register` method, and then later invoked by calling `factory.Create` with a driver name and parameters map. If no driver is registered with the given name, this factory will attempt to find an executable storage driver with the executable name "registry-storage-\<driver name\>" and return an IPC storage driver wrapper managing the driver subprocess. If no such storage driver can be found, `factory.Create` will return an `InvalidStorageDriverError`.
|
||||
|
||||
Driver Contribution
|
||||
===================
|
||||
## Driver Contribution
|
||||
|
||||
## Writing new storage drivers
|
||||
### Writing new storage drivers
|
||||
To create a valid storage driver, one must implement the `storagedriver.StorageDriver` interface and make sure to expose this driver via the factory system and as a distributable IPC server executable.
|
||||
|
||||
### In-process drivers
|
||||
#### In-process drivers
|
||||
Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase.
|
||||
|
||||
### Out-of-process drivers
|
||||
#### Out-of-process drivers
|
||||
As many users will run the registry as a pre-constructed docker container, storage drivers should also be distributable as IPC server executables. Drivers written in go should model the main method provided in `storagedriver/filesystem/registry-storage-filesystem/filesystem.go`. Parameters to IPC drivers will be provided as a JSON-serialized map in the first argument to the process. These parameters should be validated and then a blocking call to `ipc.StorageDriverServer` should be made with a new storage driver.
|
||||
|
||||
Out-of-process drivers must also implement the `ipc.IPCStorageDriver` interface, which exposes a `Version` check for the storage driver. This is used to validate storage driver api compatibility at driver load-time.
|
||||
|
|
10
errors.go
10
errors.go
|
@ -74,6 +74,16 @@ func (ErrManifestUnverified) Error() string {
|
|||
return fmt.Sprintf("unverified manifest")
|
||||
}
|
||||
|
||||
// ErrManifestValidation is returned during manifest verification if a common
|
||||
// validation error is encountered.
|
||||
type ErrManifestValidation struct {
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (err ErrManifestValidation) Error() string {
|
||||
return fmt.Sprintf("invalid manifest: %s", err.Reason)
|
||||
}
|
||||
|
||||
// ErrManifestVerification provides a type to collect errors encountered
|
||||
// during manifest verification. Currently, it accepts errors of all types,
|
||||
// but it may be narrowed to those involving manifest verification.
|
||||
|
|
|
@ -53,6 +53,7 @@ type httpStatusListener interface {
|
|||
func (hs *httpSink) Write(events ...Event) error {
|
||||
hs.mu.Lock()
|
||||
defer hs.mu.Unlock()
|
||||
defer hs.client.Transport.(*headerRoundTripper).CloseIdleConnections()
|
||||
|
||||
if hs.closed {
|
||||
return ErrSinkClosed
|
||||
|
@ -83,6 +84,7 @@ func (hs *httpSink) Write(events ...Event) error {
|
|||
|
||||
return fmt.Errorf("%v: error posting: %v", hs, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// The notifier will treat any 2xx or 3xx response as accepted by the
|
||||
// endpoint.
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
package notifications
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"code.google.com/p/go-uuid/uuid"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
|
@ -132,6 +134,8 @@ func checkExerciseRepository(t *testing.T, repository distribution.Repository) {
|
|||
}
|
||||
}
|
||||
|
||||
m.History = generateHistory(t, len(m.FSLayers))
|
||||
|
||||
pk, err := libtrust.GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error generating key: %v", err)
|
||||
|
@ -176,3 +180,37 @@ func checkExerciseRepository(t *testing.T, repository distribution.Repository) {
|
|||
t.Fatalf("retrieved unexpected manifest: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// generateHistory creates a valid history entry of length n.
|
||||
func generateHistory(t *testing.T, n int) []manifest.History {
|
||||
var images []map[string]interface{}
|
||||
|
||||
// first pass: create images entries.
|
||||
for i := 0; i < n; i++ {
|
||||
// simulate correct id -> parent links in v1Compatibility, using uuids.
|
||||
image := map[string]interface{}{
|
||||
"id": uuid.New(),
|
||||
}
|
||||
|
||||
images = append(images, image)
|
||||
}
|
||||
|
||||
var history []manifest.History
|
||||
|
||||
for i, image := range images {
|
||||
if i+1 < len(images) {
|
||||
image["parent"] = images[i+1]["id"]
|
||||
}
|
||||
|
||||
p, err := json.Marshal(image)
|
||||
if err != nil {
|
||||
t.Fatalf("error generating image json: %v", err)
|
||||
}
|
||||
|
||||
history = append(history, manifest.History{
|
||||
V1Compatibility: string(p),
|
||||
})
|
||||
}
|
||||
|
||||
return history
|
||||
}
|
||||
|
|
|
@ -220,7 +220,7 @@ type retryingSink struct {
|
|||
sink Sink
|
||||
closed bool
|
||||
|
||||
// circuit breaker hueristics
|
||||
// circuit breaker heuristics
|
||||
failures struct {
|
||||
threshold int
|
||||
recent int
|
||||
|
@ -317,7 +317,7 @@ func (rs *retryingSink) wait(backoff time.Duration) {
|
|||
time.Sleep(backoff)
|
||||
}
|
||||
|
||||
// reset marks a succesful call.
|
||||
// reset marks a successful call.
|
||||
func (rs *retryingSink) reset() {
|
||||
rs.failures.recent = 0
|
||||
rs.failures.last = time.Time{}
|
||||
|
@ -330,7 +330,7 @@ func (rs *retryingSink) failure() {
|
|||
}
|
||||
|
||||
// proceed returns true if the call should proceed based on circuit breaker
|
||||
// hueristics.
|
||||
// heuristics.
|
||||
func (rs *retryingSink) proceed() bool {
|
||||
return rs.failures.recent < rs.failures.threshold ||
|
||||
time.Now().UTC().After(rs.failures.last.Add(rs.failures.backoff))
|
||||
|
|
|
@ -135,7 +135,7 @@ const (
|
|||
"tag": <tag>,
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": <tarsum>
|
||||
"blobSum": "<digest>"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
@ -606,7 +606,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||
"code": "BLOB_UNKNOWN",
|
||||
"message": "blob unknown to registry",
|
||||
"detail": {
|
||||
"digest": <tarsum>
|
||||
"digest": "<digest>"
|
||||
}
|
||||
},
|
||||
...
|
||||
|
@ -712,7 +712,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||
Name: RouteNameBlob,
|
||||
Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}",
|
||||
Entity: "Blob",
|
||||
Description: "Fetch the blob identified by `name` and `digest`. Used to fetch layers by tarsum digest.",
|
||||
Description: "Fetch the blob identified by `name` and `digest`. Used to fetch layers by digest.",
|
||||
Methods: []MethodDescriptor{
|
||||
|
||||
{
|
||||
|
@ -898,7 +898,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||
{
|
||||
Name: "digest",
|
||||
Type: "query",
|
||||
Format: "<tarsum>",
|
||||
Format: "<digest>",
|
||||
Regexp: digest.DigestRegexp,
|
||||
Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`,
|
||||
},
|
||||
|
@ -1055,7 +1055,74 @@ var routeDescriptors = []RouteDescriptor{
|
|||
Description: "Upload a chunk of data for the specified upload.",
|
||||
Requests: []RequestDescriptor{
|
||||
{
|
||||
Description: "Upload a chunk of data to specified upload without completing the upload.",
|
||||
Name: "Stream upload",
|
||||
Description: "Upload a stream of data to upload without completing the upload.",
|
||||
PathParameters: []ParameterDescriptor{
|
||||
nameParameterDescriptor,
|
||||
uuidParameterDescriptor,
|
||||
},
|
||||
Headers: []ParameterDescriptor{
|
||||
hostHeader,
|
||||
authHeader,
|
||||
},
|
||||
Body: BodyDescriptor{
|
||||
ContentType: "application/octet-stream",
|
||||
Format: "<binary data>",
|
||||
},
|
||||
Successes: []ResponseDescriptor{
|
||||
{
|
||||
Name: "Data Accepted",
|
||||
Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.",
|
||||
StatusCode: http.StatusNoContent,
|
||||
Headers: []ParameterDescriptor{
|
||||
{
|
||||
Name: "Location",
|
||||
Type: "url",
|
||||
Format: "/v2/<name>/blobs/uploads/<uuid>",
|
||||
Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
|
||||
},
|
||||
{
|
||||
Name: "Range",
|
||||
Type: "header",
|
||||
Format: "0-<offset>",
|
||||
Description: "Range indicating the current progress of the upload.",
|
||||
},
|
||||
contentLengthZeroHeader,
|
||||
dockerUploadUUIDHeader,
|
||||
},
|
||||
},
|
||||
},
|
||||
Failures: []ResponseDescriptor{
|
||||
{
|
||||
Description: "There was an error processing the upload and it must be restarted.",
|
||||
StatusCode: http.StatusBadRequest,
|
||||
ErrorCodes: []ErrorCode{
|
||||
ErrorCodeDigestInvalid,
|
||||
ErrorCodeNameInvalid,
|
||||
ErrorCodeBlobUploadInvalid,
|
||||
},
|
||||
Body: BodyDescriptor{
|
||||
ContentType: "application/json; charset=utf-8",
|
||||
Format: errorsBody,
|
||||
},
|
||||
},
|
||||
unauthorizedResponsePush,
|
||||
{
|
||||
Description: "The upload is unknown to the registry. The upload must be restarted.",
|
||||
StatusCode: http.StatusNotFound,
|
||||
ErrorCodes: []ErrorCode{
|
||||
ErrorCodeBlobUploadUnknown,
|
||||
},
|
||||
Body: BodyDescriptor{
|
||||
ContentType: "application/json; charset=utf-8",
|
||||
Format: errorsBody,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Chunked upload",
|
||||
Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.",
|
||||
PathParameters: []ParameterDescriptor{
|
||||
nameParameterDescriptor,
|
||||
uuidParameterDescriptor,
|
||||
|
@ -1143,26 +1210,15 @@ var routeDescriptors = []RouteDescriptor{
|
|||
Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.",
|
||||
Requests: []RequestDescriptor{
|
||||
{
|
||||
// TODO(stevvooe): Break this down into three separate requests:
|
||||
// 1. Complete an upload where all data has already been sent.
|
||||
// 2. Complete an upload where the entire body is in the PUT.
|
||||
// 3. Complete an upload where the final, partial chunk is the body.
|
||||
|
||||
Description: "Complete the upload, providing the _final_ chunk of data, if necessary. This method may take a body with all the data. If the `Content-Range` header is specified, it may include the final chunk. A request without a body will just complete the upload with previously uploaded content.",
|
||||
Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.",
|
||||
Headers: []ParameterDescriptor{
|
||||
hostHeader,
|
||||
authHeader,
|
||||
{
|
||||
Name: "Content-Range",
|
||||
Type: "header",
|
||||
Format: "<start of range>-<end of range, inclusive>",
|
||||
Description: "Range of bytes identifying the block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header. May be omitted if no data is provided.",
|
||||
},
|
||||
{
|
||||
Name: "Content-Length",
|
||||
Type: "integer",
|
||||
Format: "<length of chunk>",
|
||||
Description: "Length of the chunk being uploaded, corresponding to the length of the request body. May be zero if no data is provided.",
|
||||
Format: "<length of data>",
|
||||
Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.",
|
||||
},
|
||||
},
|
||||
PathParameters: []ParameterDescriptor{
|
||||
|
@ -1173,7 +1229,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||
{
|
||||
Name: "digest",
|
||||
Type: "string",
|
||||
Format: "<tarsum>",
|
||||
Format: "<digest>",
|
||||
Regexp: digest.DigestRegexp,
|
||||
Required: true,
|
||||
Description: `Digest of uploaded blob.`,
|
||||
|
@ -1181,7 +1237,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||
},
|
||||
Body: BodyDescriptor{
|
||||
ContentType: "application/octet-stream",
|
||||
Format: "<binary chunk>",
|
||||
Format: "<binary data>",
|
||||
},
|
||||
Successes: []ResponseDescriptor{
|
||||
{
|
||||
|
@ -1232,24 +1288,6 @@ var routeDescriptors = []RouteDescriptor{
|
|||
Format: errorsBody,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. The contents of the `Range` header may be used to resolve the condition.",
|
||||
StatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||
Headers: []ParameterDescriptor{
|
||||
{
|
||||
Name: "Location",
|
||||
Type: "url",
|
||||
Format: "/v2/<name>/blobs/uploads/<uuid>",
|
||||
Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
|
||||
},
|
||||
{
|
||||
Name: "Range",
|
||||
Type: "header",
|
||||
Format: "0-<offset>",
|
||||
Description: "Range indicating the current progress of the upload.",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -62,7 +62,12 @@ func NewURLBuilderFromRequest(r *http.Request) *URLBuilder {
|
|||
host := r.Host
|
||||
forwardedHost := r.Header.Get("X-Forwarded-Host")
|
||||
if len(forwardedHost) > 0 {
|
||||
host = forwardedHost
|
||||
// According to the Apache mod_proxy docs, X-Forwarded-Host can be a
|
||||
// comma-separated list of hosts, to which each proxy appends the
|
||||
// requested host. We want to grab the first from this comma-separated
|
||||
// list.
|
||||
hosts := strings.SplitN(forwardedHost, ",", 2)
|
||||
host = strings.TrimSpace(hosts[0])
|
||||
}
|
||||
|
||||
basePath := routeDescriptorsMap[RouteNameBase].Path
|
||||
|
|
|
@ -151,6 +151,12 @@ func TestBuilderFromRequest(t *testing.T) {
|
|||
forwardedProtoHeader := make(http.Header, 1)
|
||||
forwardedProtoHeader.Set("X-Forwarded-Proto", "https")
|
||||
|
||||
forwardedHostHeader1 := make(http.Header, 1)
|
||||
forwardedHostHeader1.Set("X-Forwarded-Host", "first.example.com")
|
||||
|
||||
forwardedHostHeader2 := make(http.Header, 1)
|
||||
forwardedHostHeader2.Set("X-Forwarded-Host", "first.example.com, proxy1.example.com")
|
||||
|
||||
testRequests := []struct {
|
||||
request *http.Request
|
||||
base string
|
||||
|
@ -163,6 +169,14 @@ func TestBuilderFromRequest(t *testing.T) {
|
|||
request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader},
|
||||
base: "https://example.com",
|
||||
},
|
||||
{
|
||||
request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader1},
|
||||
base: "http://first.example.com",
|
||||
},
|
||||
{
|
||||
request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2},
|
||||
base: "http://first.example.com",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tr := range testRequests {
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
// An access controller has a simple interface with a single `Authorized`
|
||||
// method which checks that a given request is authorized to perform one or
|
||||
// more actions on one or more resources. This method should return a non-nil
|
||||
// error if the requset is not authorized.
|
||||
// error if the request is not authorized.
|
||||
//
|
||||
// An implementation registers its access controller by name with a constructor
|
||||
// which accepts an options map for configuring the access controller.
|
||||
|
@ -50,7 +50,7 @@ type Resource struct {
|
|||
}
|
||||
|
||||
// Access describes a specific action that is
|
||||
// requested or allowed for a given recource.
|
||||
// requested or allowed for a given resource.
|
||||
type Access struct {
|
||||
Resource
|
||||
Action string
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
)
|
||||
|
||||
// joseBase64UrlEncode encodes the given data using the standard base64 url
|
||||
// encoding format but with all trailing '=' characters ommitted in accordance
|
||||
// encoding format but with all trailing '=' characters omitted in accordance
|
||||
// with the jose specification.
|
||||
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
|
||||
func joseBase64UrlEncode(b []byte) string {
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"code.google.com/p/go-uuid/uuid"
|
||||
"github.com/docker/distribution/configuration"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
|
@ -93,7 +94,7 @@ func TestURLPrefix(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
// TestLayerAPI conducts a full of the of the layer api.
|
||||
// TestLayerAPI conducts a full test of the of the layer api.
|
||||
func TestLayerAPI(t *testing.T) {
|
||||
// TODO(stevvooe): This test code is complete junk but it should cover the
|
||||
// complete flow. This must be broken down and checked against the
|
||||
|
@ -209,6 +210,13 @@ func TestLayerAPI(t *testing.T) {
|
|||
uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName)
|
||||
pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile)
|
||||
|
||||
// ------------------------------------------
|
||||
// Now, push just a chunk
|
||||
layerFile.Seek(0, 0)
|
||||
|
||||
uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName)
|
||||
uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength)
|
||||
finishUpload(t, env.builder, imageName, uploadURLBase, dgst)
|
||||
// ------------------------
|
||||
// Use a head request to see if the layer exists.
|
||||
resp, err = http.Head(layerURL)
|
||||
|
@ -246,6 +254,16 @@ func TestLayerAPI(t *testing.T) {
|
|||
t.Fatalf("response body did not pass verification")
|
||||
}
|
||||
|
||||
// ----------------
|
||||
// Fetch the layer with an invalid digest
|
||||
badURL := strings.Replace(layerURL, "tarsum", "trsum", 1)
|
||||
resp, err = http.Get(badURL)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error fetching layer: %v", err)
|
||||
}
|
||||
|
||||
checkResponse(t, "fetching layer bad digest", resp, http.StatusBadRequest)
|
||||
|
||||
// Missing tests:
|
||||
// - Upload the same tarsum file under and different repository and
|
||||
// ensure the content remains uncorrupted.
|
||||
|
@ -306,10 +324,12 @@ func TestManifestAPI(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
unsignedManifest.History = generateHistory(t, len(unsignedManifest.FSLayers), false)
|
||||
|
||||
resp = putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest)
|
||||
defer resp.Body.Close()
|
||||
checkResponse(t, "posting unsigned manifest", resp, http.StatusBadRequest)
|
||||
_, p, counts := checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp,
|
||||
_, p, counts := checkBodyHasErrorCodes(t, "putting unsigned manifest", resp,
|
||||
v2.ErrorCodeManifestUnverified, v2.ErrorCodeBlobUnknown, v2.ErrorCodeDigestInvalid)
|
||||
|
||||
expectedCounts := map[v2.ErrorCode]int{
|
||||
|
@ -344,8 +364,10 @@ func TestManifestAPI(t *testing.T) {
|
|||
pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs)
|
||||
}
|
||||
|
||||
// -------------------
|
||||
// Push the signed manifest with all layers pushed.
|
||||
// -----------------------
|
||||
// mostly valid, but we have an extra parent point in history.
|
||||
unsignedManifest.History = generateHistory(t, len(unsignedManifest.FSLayers), true)
|
||||
|
||||
signedManifest, err := manifest.Sign(unsignedManifest, env.pk)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error signing manifest: %v", err)
|
||||
|
@ -360,6 +382,36 @@ func TestManifestAPI(t *testing.T) {
|
|||
manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String())
|
||||
checkErr(t, err, "building manifest url")
|
||||
|
||||
resp = putManifest(t, "putting signed manifest with bad parent", manifestURL, signedManifest)
|
||||
checkResponse(t, "putting signed manifest with bad parent", resp, http.StatusBadRequest)
|
||||
_, p, counts = checkBodyHasErrorCodes(t, "putting unsigned manifest with bad parent", resp, v2.ErrorCodeManifestInvalid)
|
||||
|
||||
expectedCounts = map[v2.ErrorCode]int{
|
||||
v2.ErrorCodeManifestInvalid: 1,
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(counts, expectedCounts) {
|
||||
t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p))
|
||||
}
|
||||
|
||||
// -------------------
|
||||
// Push the signed manifest with all layers pushed.
|
||||
unsignedManifest.History = generateHistory(t, len(unsignedManifest.FSLayers), false)
|
||||
|
||||
signedManifest, err = manifest.Sign(unsignedManifest, env.pk)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error signing manifest: %v", err)
|
||||
}
|
||||
|
||||
payload, err = signedManifest.Payload()
|
||||
checkErr(t, err, "getting manifest payload")
|
||||
|
||||
dgst, err = digest.FromBytes(payload)
|
||||
checkErr(t, err, "digesting manifest")
|
||||
|
||||
manifestDigestURL, err = env.builder.BuildManifestURL(imageName, dgst.String())
|
||||
checkErr(t, err, "building manifest url")
|
||||
|
||||
resp = putManifest(t, "putting signed manifest", manifestURL, signedManifest)
|
||||
checkResponse(t, "putting signed manifest", resp, http.StatusAccepted)
|
||||
checkHeaders(t, resp, http.Header{
|
||||
|
@ -606,6 +658,75 @@ func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest,
|
|||
return resp.Header.Get("Location")
|
||||
}
|
||||
|
||||
func finishUpload(t *testing.T, ub *v2.URLBuilder, name string, uploadURLBase string, dgst digest.Digest) string {
|
||||
resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error doing push layer request: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated)
|
||||
|
||||
expectedLayerURL, err := ub.BuildBlobURL(name, dgst)
|
||||
if err != nil {
|
||||
t.Fatalf("error building expected layer url: %v", err)
|
||||
}
|
||||
|
||||
checkHeaders(t, resp, http.Header{
|
||||
"Location": []string{expectedLayerURL},
|
||||
"Content-Length": []string{"0"},
|
||||
"Docker-Content-Digest": []string{dgst.String()},
|
||||
})
|
||||
|
||||
return resp.Header.Get("Location")
|
||||
}
|
||||
|
||||
func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Response, digest.Digest, error) {
|
||||
u, err := url.Parse(uploadURLBase)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error parsing pushLayer url: %v", err)
|
||||
}
|
||||
|
||||
u.RawQuery = url.Values{
|
||||
"_state": u.Query()["_state"],
|
||||
}.Encode()
|
||||
|
||||
uploadURL := u.String()
|
||||
|
||||
digester := digest.NewCanonicalDigester()
|
||||
|
||||
req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
|
||||
return resp, digester.Digest(), err
|
||||
}
|
||||
|
||||
func pushChunk(t *testing.T, ub *v2.URLBuilder, name string, uploadURLBase string, body io.Reader, length int64) (string, digest.Digest) {
|
||||
resp, dgst, err := doPushChunk(t, uploadURLBase, body)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error doing push layer request: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
checkResponse(t, "putting chunk", resp, http.StatusAccepted)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error generating sha256 digest of body")
|
||||
}
|
||||
|
||||
checkHeaders(t, resp, http.Header{
|
||||
"Range": []string{fmt.Sprintf("0-%d", length-1)},
|
||||
"Content-Length": []string{"0"},
|
||||
})
|
||||
|
||||
return resp.Header.Get("Location"), dgst
|
||||
}
|
||||
|
||||
func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) {
|
||||
if resp.StatusCode != expectedStatus {
|
||||
t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus)
|
||||
|
@ -705,3 +826,41 @@ func checkErr(t *testing.T, err error, msg string) {
|
|||
t.Fatalf("unexpected error %s: %v", msg, err)
|
||||
}
|
||||
}
|
||||
|
||||
// generateHistory creates a valid history entry of length n.
|
||||
func generateHistory(t *testing.T, n int, extraParent bool) []manifest.History {
|
||||
var images []map[string]interface{}
|
||||
|
||||
// first pass: create images entries.
|
||||
for i := 0; i < n; i++ {
|
||||
// simulate correct id -> parent links in v1Compatibility, using uuids.
|
||||
image := map[string]interface{}{
|
||||
"id": uuid.New(),
|
||||
}
|
||||
|
||||
images = append(images, image)
|
||||
}
|
||||
|
||||
var history []manifest.History
|
||||
|
||||
for i, image := range images {
|
||||
if i+1 < len(images) {
|
||||
image["parent"] = images[i+1]["id"]
|
||||
}
|
||||
|
||||
if extraParent && i == len(images)-1 {
|
||||
image["parent"] = uuid.New()
|
||||
}
|
||||
|
||||
p, err := json.Marshal(image)
|
||||
if err != nil {
|
||||
t.Fatalf("error generating image json: %v", err)
|
||||
}
|
||||
|
||||
history = append(history, manifest.History{
|
||||
V1Compatibility: string(p),
|
||||
})
|
||||
}
|
||||
|
||||
return history
|
||||
}
|
||||
|
|
|
@ -81,7 +81,18 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App
|
|||
panic(err)
|
||||
}
|
||||
|
||||
startUploadPurger(app.driver, ctxu.GetLogger(app))
|
||||
purgeConfig := uploadPurgeDefaultConfig()
|
||||
if mc, ok := configuration.Storage["maintenance"]; ok {
|
||||
for k, v := range mc {
|
||||
switch k {
|
||||
case "uploadpurging":
|
||||
purgeConfig = v.(map[interface{}]interface{})
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
startUploadPurger(app.driver, ctxu.GetLogger(app), purgeConfig)
|
||||
|
||||
app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"])
|
||||
if err != nil {
|
||||
|
@ -365,11 +376,25 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler {
|
|||
// future refactoring.
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
}
|
||||
app.logError(context, context.Errors)
|
||||
serveJSON(w, context.Errors)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (app *App) logError(context context.Context, errors v2.Errors) {
|
||||
for _, e := range errors.Errors {
|
||||
c := ctxu.WithValue(context, "err.code", e.Code)
|
||||
c = ctxu.WithValue(c, "err.message", e.Message)
|
||||
c = ctxu.WithValue(c, "err.detail", e.Detail)
|
||||
c = ctxu.WithLogger(c, ctxu.GetLogger(c,
|
||||
"err.code",
|
||||
"err.message",
|
||||
"err.detail"))
|
||||
ctxu.GetLogger(c).Errorf("An error occured")
|
||||
}
|
||||
}
|
||||
|
||||
// context constructs the context object for the application. This only be
|
||||
// called once per request.
|
||||
func (app *App) context(w http.ResponseWriter, r *http.Request) *Context {
|
||||
|
@ -554,26 +579,82 @@ func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []co
|
|||
return driver, nil
|
||||
}
|
||||
|
||||
// uploadPurgeDefaultConfig provides a default configuration for upload
|
||||
// purging to be used in the absence of configuration in the
|
||||
// confifuration file
|
||||
func uploadPurgeDefaultConfig() map[interface{}]interface{} {
|
||||
config := map[interface{}]interface{}{}
|
||||
config["enabled"] = true
|
||||
config["age"] = "168h"
|
||||
config["interval"] = "24h"
|
||||
config["dryrun"] = false
|
||||
return config
|
||||
}
|
||||
|
||||
func badPurgeUploadConfig(reason string) {
|
||||
panic(fmt.Sprintf("Unable to parse upload purge configuration: %s", reason))
|
||||
}
|
||||
|
||||
// startUploadPurger schedules a goroutine which will periodically
|
||||
// check upload directories for old files and delete them
|
||||
func startUploadPurger(storageDriver storagedriver.StorageDriver, log ctxu.Logger) {
|
||||
rand.Seed(time.Now().Unix())
|
||||
jitter := time.Duration(rand.Int()%60) * time.Minute
|
||||
func startUploadPurger(storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) {
|
||||
if config["enabled"] == false {
|
||||
return
|
||||
}
|
||||
|
||||
// Start with reasonable defaults
|
||||
// TODO:(richardscothern) make configurable
|
||||
purgeAge := time.Duration(7 * 24 * time.Hour)
|
||||
timeBetweenPurges := time.Duration(1 * 24 * time.Hour)
|
||||
var purgeAgeDuration time.Duration
|
||||
var err error
|
||||
purgeAge, ok := config["age"]
|
||||
if ok {
|
||||
ageStr, ok := purgeAge.(string)
|
||||
if !ok {
|
||||
badPurgeUploadConfig("age is not a string")
|
||||
}
|
||||
purgeAgeDuration, err = time.ParseDuration(ageStr)
|
||||
if err != nil {
|
||||
badPurgeUploadConfig(fmt.Sprintf("Cannot parse duration: %s", err.Error()))
|
||||
}
|
||||
} else {
|
||||
badPurgeUploadConfig("age missing")
|
||||
}
|
||||
|
||||
var intervalDuration time.Duration
|
||||
interval, ok := config["interval"]
|
||||
if ok {
|
||||
intervalStr, ok := interval.(string)
|
||||
if !ok {
|
||||
badPurgeUploadConfig("interval is not a string")
|
||||
}
|
||||
|
||||
intervalDuration, err = time.ParseDuration(intervalStr)
|
||||
if err != nil {
|
||||
badPurgeUploadConfig(fmt.Sprintf("Cannot parse interval: %s", err.Error()))
|
||||
}
|
||||
} else {
|
||||
badPurgeUploadConfig("interval missing")
|
||||
}
|
||||
|
||||
var dryRunBool bool
|
||||
dryRun, ok := config["dryrun"]
|
||||
if ok {
|
||||
dryRunBool, ok = dryRun.(bool)
|
||||
if !ok {
|
||||
badPurgeUploadConfig("cannot parse dryrun")
|
||||
}
|
||||
} else {
|
||||
badPurgeUploadConfig("dryrun missing")
|
||||
}
|
||||
|
||||
go func() {
|
||||
rand.Seed(time.Now().Unix())
|
||||
jitter := time.Duration(rand.Int()%60) * time.Minute
|
||||
log.Infof("Starting upload purge in %s", jitter)
|
||||
time.Sleep(jitter)
|
||||
|
||||
for {
|
||||
storage.PurgeUploads(storageDriver, time.Now().Add(-purgeAge), true)
|
||||
log.Infof("Starting upload purge in %s", timeBetweenPurges)
|
||||
time.Sleep(timeBetweenPurges)
|
||||
storage.PurgeUploads(storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool)
|
||||
log.Infof("Starting upload purge in %s", intervalDuration)
|
||||
time.Sleep(intervalDuration)
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
|
|
|
@ -140,6 +140,8 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http
|
|||
imh.Errors.Push(v2.ErrorCodeBlobUnknown, verificationError.FSLayer)
|
||||
case distribution.ErrManifestUnverified:
|
||||
imh.Errors.Push(v2.ErrorCodeManifestUnverified)
|
||||
case distribution.ErrManifestValidation:
|
||||
imh.Errors.Push(v2.ErrorCodeManifestInvalid, verificationError.Error())
|
||||
default:
|
||||
if verificationError == digest.ErrDigestInvalidFormat {
|
||||
// TODO(stevvooe): We need to really need to move all
|
||||
|
|
|
@ -23,11 +23,10 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler {
|
|||
}
|
||||
|
||||
handler := http.Handler(handlers.MethodHandler{
|
||||
"POST": http.HandlerFunc(luh.StartLayerUpload),
|
||||
"GET": http.HandlerFunc(luh.GetUploadStatus),
|
||||
"HEAD": http.HandlerFunc(luh.GetUploadStatus),
|
||||
// TODO(stevvooe): Must implement patch support.
|
||||
// "PATCH": http.HandlerFunc(luh.PutLayerChunk),
|
||||
"POST": http.HandlerFunc(luh.StartLayerUpload),
|
||||
"GET": http.HandlerFunc(luh.GetUploadStatus),
|
||||
"HEAD": http.HandlerFunc(luh.GetUploadStatus),
|
||||
"PATCH": http.HandlerFunc(luh.PatchLayerData),
|
||||
"PUT": http.HandlerFunc(luh.PutLayerUploadComplete),
|
||||
"DELETE": http.HandlerFunc(luh.CancelLayerUpload),
|
||||
})
|
||||
|
@ -133,7 +132,7 @@ func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.R
|
|||
luh.Upload = upload
|
||||
defer luh.Upload.Close()
|
||||
|
||||
if err := luh.layerUploadResponse(w, r); err != nil {
|
||||
if err := luh.layerUploadResponse(w, r, true); err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError) // Error conditions here?
|
||||
luh.Errors.Push(v2.ErrorCodeUnknown, err)
|
||||
return
|
||||
|
@ -151,7 +150,10 @@ func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
if err := luh.layerUploadResponse(w, r); err != nil {
|
||||
// TODO(dmcgowan): Set last argument to false in layerUploadResponse when
|
||||
// resumable upload is supported. This will enable returning a non-zero
|
||||
// range for clients to begin uploading at an offset.
|
||||
if err := luh.layerUploadResponse(w, r, true); err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError) // Error conditions here?
|
||||
luh.Errors.Push(v2.ErrorCodeUnknown, err)
|
||||
return
|
||||
|
@ -161,11 +163,45 @@ func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Re
|
|||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// PutLayerUploadComplete takes the final request of a layer upload. The final
|
||||
// chunk may include all the layer data, the final chunk of layer data or no
|
||||
// layer data. Any data provided is received and verified. If successful, the
|
||||
// layer is linked into the blob store and 201 Created is returned with the
|
||||
// canonical url of the layer.
|
||||
// PatchLayerData writes data to an upload.
|
||||
func (luh *layerUploadHandler) PatchLayerData(w http.ResponseWriter, r *http.Request) {
|
||||
if luh.Upload == nil {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown)
|
||||
return
|
||||
}
|
||||
|
||||
ct := r.Header.Get("Content-Type")
|
||||
if ct != "" && ct != "application/octet-stream" {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
// TODO(dmcgowan): encode error
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(dmcgowan): support Content-Range header to seek and write range
|
||||
|
||||
// Copy the data
|
||||
if _, err := io.Copy(luh.Upload, r.Body); err != nil {
|
||||
ctxu.GetLogger(luh).Errorf("unknown error copying into upload: %v", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
luh.Errors.Push(v2.ErrorCodeUnknown, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := luh.layerUploadResponse(w, r, false); err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError) // Error conditions here?
|
||||
luh.Errors.Push(v2.ErrorCodeUnknown, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
}
|
||||
|
||||
// PutLayerUploadComplete takes the final request of a layer upload. The
|
||||
// request may include all the layer data or no layer data. Any data
|
||||
// provided is received and verified. If successful, the layer is linked
|
||||
// into the blob store and 201 Created is returned with the canonical
|
||||
// url of the layer.
|
||||
func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r *http.Request) {
|
||||
if luh.Upload == nil {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
|
@ -190,15 +226,17 @@ func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r *
|
|||
return
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Check the incoming range header here, per the
|
||||
// specification. LayerUpload should be seeked (sought?) to that position.
|
||||
|
||||
// TODO(stevvooe): Consider checking the error on this copy.
|
||||
// Theoretically, problems should be detected during verification but we
|
||||
// may miss a root cause.
|
||||
|
||||
// Read in the final chunk, if any.
|
||||
io.Copy(luh.Upload, r.Body)
|
||||
// Read in the data, if any.
|
||||
if _, err := io.Copy(luh.Upload, r.Body); err != nil {
|
||||
ctxu.GetLogger(luh).Errorf("unknown error copying into upload: %v", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
luh.Errors.Push(v2.ErrorCodeUnknown, err)
|
||||
return
|
||||
}
|
||||
|
||||
layer, err := luh.Upload.Finish(dgst)
|
||||
if err != nil {
|
||||
|
@ -255,13 +293,19 @@ func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http.
|
|||
|
||||
// layerUploadResponse provides a standard request for uploading layers and
|
||||
// chunk responses. This sets the correct headers but the response status is
|
||||
// left to the caller.
|
||||
func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *http.Request) error {
|
||||
// left to the caller. The fresh argument is used to ensure that new layer
|
||||
// uploads always start at a 0 offset. This allows disabling resumable push
|
||||
// by always returning a 0 offset on check status.
|
||||
func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error {
|
||||
|
||||
offset, err := luh.Upload.Seek(0, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
ctxu.GetLogger(luh).Errorf("unable get current offset of layer upload: %v", err)
|
||||
return err
|
||||
var offset int64
|
||||
if !fresh {
|
||||
var err error
|
||||
offset, err = luh.Upload.Seek(0, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
ctxu.GetLogger(luh).Errorf("unable get current offset of layer upload: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Need a better way to manage the upload state automatically.
|
||||
|
@ -286,10 +330,15 @@ func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *htt
|
|||
return err
|
||||
}
|
||||
|
||||
endRange := offset
|
||||
if endRange > 0 {
|
||||
endRange = endRange - 1
|
||||
}
|
||||
|
||||
w.Header().Set("Docker-Upload-UUID", luh.UUID)
|
||||
w.Header().Set("Location", uploadURL)
|
||||
w.Header().Set("Content-Length", "0")
|
||||
w.Header().Set("Range", fmt.Sprintf("0-%d", luh.State.Offset))
|
||||
w.Header().Set("Range", fmt.Sprintf("0-%d", endRange))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -94,6 +94,9 @@ func New(accountName, accountKey, container, realm string) (*Driver, error) {
|
|||
}
|
||||
|
||||
// Implement the storagedriver.StorageDriver interface.
|
||||
func (d *driver) Name() string {
|
||||
return driverName
|
||||
}
|
||||
|
||||
// GetContent retrieves the content stored at "path" as a []byte.
|
||||
func (d *driver) GetContent(path string) ([]byte, error) {
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
// }
|
||||
//
|
||||
// The type now implements StorageDriver, proxying through Base, without
|
||||
// exporting an unnessecary field.
|
||||
// exporting an unnecessary field.
|
||||
package base
|
||||
|
||||
import (
|
||||
|
@ -53,7 +53,7 @@ type Base struct {
|
|||
// GetContent wraps GetContent of underlying storage driver.
|
||||
func (base *Base) GetContent(path string) ([]byte, error) {
|
||||
_, done := context.WithTrace(context.Background())
|
||||
defer done("Base.GetContent")
|
||||
defer done("%s.GetContent(%q)", base.Name(), path)
|
||||
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
return nil, storagedriver.InvalidPathError{Path: path}
|
||||
|
@ -65,7 +65,7 @@ func (base *Base) GetContent(path string) ([]byte, error) {
|
|||
// PutContent wraps PutContent of underlying storage driver.
|
||||
func (base *Base) PutContent(path string, content []byte) error {
|
||||
_, done := context.WithTrace(context.Background())
|
||||
defer done("Base.PutContent")
|
||||
defer done("%s.PutContent(%q)", base.Name(), path)
|
||||
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
return storagedriver.InvalidPathError{Path: path}
|
||||
|
@ -77,7 +77,7 @@ func (base *Base) PutContent(path string, content []byte) error {
|
|||
// ReadStream wraps ReadStream of underlying storage driver.
|
||||
func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) {
|
||||
_, done := context.WithTrace(context.Background())
|
||||
defer done("Base.ReadStream")
|
||||
defer done("%s.ReadStream(%q, %d)", base.Name(), path, offset)
|
||||
|
||||
if offset < 0 {
|
||||
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
|
||||
|
@ -93,7 +93,7 @@ func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) {
|
|||
// WriteStream wraps WriteStream of underlying storage driver.
|
||||
func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) {
|
||||
_, done := context.WithTrace(context.Background())
|
||||
defer done("Base.WriteStream")
|
||||
defer done("%s.WriteStream(%q, %d)", base.Name(), path, offset)
|
||||
|
||||
if offset < 0 {
|
||||
return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
|
||||
|
@ -109,7 +109,7 @@ func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn i
|
|||
// Stat wraps Stat of underlying storage driver.
|
||||
func (base *Base) Stat(path string) (storagedriver.FileInfo, error) {
|
||||
_, done := context.WithTrace(context.Background())
|
||||
defer done("Base.Stat")
|
||||
defer done("%s.Stat(%q)", base.Name(), path)
|
||||
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
return nil, storagedriver.InvalidPathError{Path: path}
|
||||
|
@ -121,7 +121,7 @@ func (base *Base) Stat(path string) (storagedriver.FileInfo, error) {
|
|||
// List wraps List of underlying storage driver.
|
||||
func (base *Base) List(path string) ([]string, error) {
|
||||
_, done := context.WithTrace(context.Background())
|
||||
defer done("Base.List")
|
||||
defer done("%s.List(%q)", base.Name(), path)
|
||||
|
||||
if !storagedriver.PathRegexp.MatchString(path) && path != "/" {
|
||||
return nil, storagedriver.InvalidPathError{Path: path}
|
||||
|
@ -133,7 +133,7 @@ func (base *Base) List(path string) ([]string, error) {
|
|||
// Move wraps Move of underlying storage driver.
|
||||
func (base *Base) Move(sourcePath string, destPath string) error {
|
||||
_, done := context.WithTrace(context.Background())
|
||||
defer done("Base.Move")
|
||||
defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath)
|
||||
|
||||
if !storagedriver.PathRegexp.MatchString(sourcePath) {
|
||||
return storagedriver.InvalidPathError{Path: sourcePath}
|
||||
|
@ -147,7 +147,7 @@ func (base *Base) Move(sourcePath string, destPath string) error {
|
|||
// Delete wraps Delete of underlying storage driver.
|
||||
func (base *Base) Delete(path string) error {
|
||||
_, done := context.WithTrace(context.Background())
|
||||
defer done("Base.Move")
|
||||
defer done("%s.Delete(%q)", base.Name(), path)
|
||||
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
return storagedriver.InvalidPathError{Path: path}
|
||||
|
@ -159,7 +159,7 @@ func (base *Base) Delete(path string) error {
|
|||
// URLFor wraps URLFor of underlying storage driver.
|
||||
func (base *Base) URLFor(path string, options map[string]interface{}) (string, error) {
|
||||
_, done := context.WithTrace(context.Background())
|
||||
defer done("Base.URLFor")
|
||||
defer done("%s.URLFor(%q)", base.Name(), path)
|
||||
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
return "", storagedriver.InvalidPathError{Path: path}
|
||||
|
|
|
@ -71,6 +71,10 @@ func New(rootDirectory string) *Driver {
|
|||
|
||||
// Implement the storagedriver.StorageDriver interface
|
||||
|
||||
func (d *driver) Name() string {
|
||||
return driverName
|
||||
}
|
||||
|
||||
// GetContent retrieves the content stored at "path" as a []byte.
|
||||
func (d *driver) GetContent(path string) ([]byte, error) {
|
||||
rc, err := d.ReadStream(path, 0)
|
||||
|
|
|
@ -64,6 +64,10 @@ func New() *Driver {
|
|||
|
||||
// Implement the storagedriver.StorageDriver interface.
|
||||
|
||||
func (d *driver) Name() string {
|
||||
return driverName
|
||||
}
|
||||
|
||||
// GetContent retrieves the content stored at "path" as a []byte.
|
||||
func (d *driver) GetContent(path string) ([]byte, error) {
|
||||
d.mutex.RLock()
|
||||
|
|
|
@ -101,7 +101,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) {
|
|||
}
|
||||
case "ReadStream":
|
||||
path, _ := request.Parameters["Path"].(string)
|
||||
// Depending on serialization method, Offset may be convereted to any int/uint type
|
||||
// Depending on serialization method, Offset may be converted to any int/uint type
|
||||
offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int()
|
||||
reader, err := driver.ReadStream(path, offset)
|
||||
var response ReadStreamResponse
|
||||
|
@ -116,9 +116,9 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) {
|
|||
}
|
||||
case "WriteStream":
|
||||
path, _ := request.Parameters["Path"].(string)
|
||||
// Depending on serialization method, Offset may be convereted to any int/uint type
|
||||
// Depending on serialization method, Offset may be converted to any int/uint type
|
||||
offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int()
|
||||
// Depending on serialization method, Size may be convereted to any int/uint type
|
||||
// Depending on serialization method, Size may be converted to any int/uint type
|
||||
size := reflect.ValueOf(request.Parameters["Size"]).Convert(reflect.TypeOf(int64(0))).Int()
|
||||
reader, _ := request.Parameters["Reader"].(io.ReadCloser)
|
||||
err := driver.WriteStream(path, offset, size, reader)
|
||||
|
|
|
@ -20,12 +20,15 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/AdRoll/goamz/aws"
|
||||
"github.com/AdRoll/goamz/s3"
|
||||
"github.com/Sirupsen/logrus"
|
||||
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/docker/distribution/registry/storage/driver/base"
|
||||
"github.com/docker/distribution/registry/storage/driver/factory"
|
||||
|
@ -72,6 +75,9 @@ type driver struct {
|
|||
ChunkSize int64
|
||||
Encrypt bool
|
||||
RootDirectory string
|
||||
|
||||
pool sync.Pool // pool []byte buffers used for WriteStream
|
||||
zeros []byte // shared, zero-valued buffer used for WriteStream
|
||||
}
|
||||
|
||||
type baseEmbed struct {
|
||||
|
@ -148,9 +154,23 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
chunkSize := int64(defaultChunkSize)
|
||||
chunkSizeParam, ok := parameters["chunksize"]
|
||||
if ok {
|
||||
chunkSize, ok = chunkSizeParam.(int64)
|
||||
if !ok || chunkSize < minChunkSize {
|
||||
return nil, fmt.Errorf("The chunksize parameter should be a number that is larger than 5*1024*1024")
|
||||
switch v := chunkSizeParam.(type) {
|
||||
case string:
|
||||
vv, err := strconv.ParseInt(v, 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam)
|
||||
}
|
||||
chunkSize = vv
|
||||
case int64:
|
||||
chunkSize = v
|
||||
case int, uint, int32, uint32, uint64:
|
||||
chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int()
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam)
|
||||
}
|
||||
|
||||
if chunkSize < minChunkSize {
|
||||
return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -224,6 +244,11 @@ func New(params DriverParameters) (*Driver, error) {
|
|||
ChunkSize: params.ChunkSize,
|
||||
Encrypt: params.Encrypt,
|
||||
RootDirectory: params.RootDirectory,
|
||||
zeros: make([]byte, params.ChunkSize),
|
||||
}
|
||||
|
||||
d.pool.New = func() interface{} {
|
||||
return make([]byte, d.ChunkSize)
|
||||
}
|
||||
|
||||
return &Driver{
|
||||
|
@ -237,6 +262,10 @@ func New(params DriverParameters) (*Driver, error) {
|
|||
|
||||
// Implement the storagedriver.StorageDriver interface
|
||||
|
||||
func (d *driver) Name() string {
|
||||
return driverName
|
||||
}
|
||||
|
||||
// GetContent retrieves the content stored at "path" as a []byte.
|
||||
func (d *driver) GetContent(path string) ([]byte, error) {
|
||||
content, err := d.Bucket.Get(d.s3Path(path))
|
||||
|
@ -281,14 +310,14 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total
|
|||
var putErrChan chan error
|
||||
parts := []s3.Part{}
|
||||
var part s3.Part
|
||||
done := make(chan struct{}) // stopgap to free up waiting goroutines
|
||||
|
||||
multi, err := d.Bucket.InitMulti(d.s3Path(path), d.getContentType(), getPermissions(), d.getOptions())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
buf := make([]byte, d.ChunkSize)
|
||||
zeroBuf := make([]byte, d.ChunkSize)
|
||||
buf := d.getbuf()
|
||||
|
||||
// We never want to leave a dangling multipart upload, our only consistent state is
|
||||
// when there is a whole object at path. This is in order to remain consistent with
|
||||
|
@ -314,6 +343,9 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
d.putbuf(buf) // needs to be here to pick up new buf value
|
||||
close(done) // free up any waiting goroutines
|
||||
}()
|
||||
|
||||
// Fills from 0 to total from current
|
||||
|
@ -367,21 +399,77 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total
|
|||
}
|
||||
|
||||
go func(bytesRead int, from int64, buf []byte) {
|
||||
// parts and partNumber are safe, because this function is the only one modifying them and we
|
||||
// force it to be executed serially.
|
||||
if bytesRead > 0 {
|
||||
part, putErr := multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from]))
|
||||
if putErr != nil {
|
||||
putErrChan <- putErr
|
||||
defer d.putbuf(buf) // this buffer gets dropped after this call
|
||||
|
||||
// DRAGONS(stevvooe): There are few things one might want to know
|
||||
// about this section. First, the putErrChan is expecting an error
|
||||
// and a nil or just a nil to come through the channel. This is
|
||||
// covered by the silly defer below. The other aspect is the s3
|
||||
// retry backoff to deal with RequestTimeout errors. Even though
|
||||
// the underlying s3 library should handle it, it doesn't seem to
|
||||
// be part of the shouldRetry function (see AdRoll/goamz/s3).
|
||||
defer func() {
|
||||
select {
|
||||
case putErrChan <- nil: // for some reason, we do this no matter what.
|
||||
case <-done:
|
||||
return // ensure we don't leak the goroutine
|
||||
}
|
||||
}()
|
||||
|
||||
if bytesRead <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
var part s3.Part
|
||||
|
||||
loop:
|
||||
for retries := 0; retries < 5; retries++ {
|
||||
part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from]))
|
||||
if err == nil {
|
||||
break // success!
|
||||
}
|
||||
|
||||
parts = append(parts, part)
|
||||
partNumber++
|
||||
// NOTE(stevvooe): This retry code tries to only retry under
|
||||
// conditions where the s3 package does not. We may add s3
|
||||
// error codes to the below if we see others bubble up in the
|
||||
// application. Right now, the most troubling is
|
||||
// RequestTimeout, which seems to only triggered when a tcp
|
||||
// connection to s3 slows to a crawl. If the RequestTimeout
|
||||
// ends up getting added to the s3 library and we don't see
|
||||
// other errors, this retry loop can be removed.
|
||||
switch err := err.(type) {
|
||||
case *s3.Error:
|
||||
switch err.Code {
|
||||
case "RequestTimeout":
|
||||
// allow retries on only this error.
|
||||
default:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
backoff := 100 * time.Millisecond * time.Duration(retries+1)
|
||||
logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String())
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
putErrChan <- nil
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf("error putting part, aborting: %v", err)
|
||||
select {
|
||||
case putErrChan <- err:
|
||||
case <-done:
|
||||
return // don't leak the goroutine
|
||||
}
|
||||
}
|
||||
|
||||
// parts and partNumber are safe, because this function is the
|
||||
// only one modifying them and we force it to be executed
|
||||
// serially.
|
||||
parts = append(parts, part)
|
||||
partNumber++
|
||||
}(bytesRead, from, buf)
|
||||
|
||||
buf = make([]byte, d.ChunkSize)
|
||||
buf = d.getbuf() // use a new buffer for the next call
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -429,7 +517,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total
|
|||
fromZeroFillSmall := func(from, to int64) error {
|
||||
bytesRead = 0
|
||||
for from+int64(bytesRead) < to {
|
||||
nn, err := bytes.NewReader(zeroBuf).Read(buf[from+int64(bytesRead) : to])
|
||||
nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to])
|
||||
bytesRead += nn
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -443,7 +531,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total
|
|||
fromZeroFillLarge := func(from, to int64) error {
|
||||
bytesRead64 := int64(0)
|
||||
for to-(from+bytesRead64) >= d.ChunkSize {
|
||||
part, err := multi.PutPart(int(partNumber), bytes.NewReader(zeroBuf))
|
||||
part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -724,3 +812,13 @@ func getPermissions() s3.ACL {
|
|||
func (d *driver) getContentType() string {
|
||||
return "application/octet-stream"
|
||||
}
|
||||
|
||||
// getbuf returns a buffer from the driver's pool with length d.ChunkSize.
|
||||
func (d *driver) getbuf() []byte {
|
||||
return d.pool.Get().([]byte)
|
||||
}
|
||||
|
||||
func (d *driver) putbuf(p []byte) {
|
||||
copy(p, d.zeros)
|
||||
d.pool.Put(p)
|
||||
}
|
||||
|
|
|
@ -35,6 +35,11 @@ const CurrentVersion Version = "0.1"
|
|||
// StorageDriver defines methods that a Storage Driver must implement for a
|
||||
// filesystem-like key/value object storage.
|
||||
type StorageDriver interface {
|
||||
// Name returns the human-readable "name" of the driver, useful in error
|
||||
// messages and logging. By convention, this will just be the registration
|
||||
// name, but drivers may provide other information here.
|
||||
Name() string
|
||||
|
||||
// GetContent retrieves the content stored at "path" as a []byte.
|
||||
// This should primarily be used for small objects.
|
||||
GetContent(path string) ([]byte, error)
|
||||
|
|
|
@ -435,7 +435,7 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64)
|
|||
c.Assert(err, check.IsNil)
|
||||
c.Assert(received, check.DeepEquals, fullContents)
|
||||
|
||||
// Writing past size of file extends file (no offest error). We would like
|
||||
// Writing past size of file extends file (no offset error). We would like
|
||||
// to write chunk 4 one chunk length past chunk 3. It should be successful
|
||||
// and the resulting file will be 5 chunks long, with a chunk of all
|
||||
// zeros.
|
||||
|
|
|
@ -336,7 +336,7 @@ func seekerSize(seeker io.ReadSeeker) (int64, error) {
|
|||
|
||||
// createTestLayer creates a simple test layer in the provided driver under
|
||||
// tarsum dgst, returning the sha256 digest location. This is implemented
|
||||
// peicemeal and should probably be replaced by the uploader when it's ready.
|
||||
// piecemeal and should probably be replaced by the uploader when it's ready.
|
||||
func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) {
|
||||
h := sha256.New()
|
||||
rd := io.TeeReader(content, h)
|
||||
|
|
|
@ -65,7 +65,7 @@ func (ls *layerStore) Upload() (distribution.LayerUpload, error) {
|
|||
uuid := uuid.New()
|
||||
startedAt := time.Now().UTC()
|
||||
|
||||
path, err := ls.repository.registry.pm.path(uploadDataPathSpec{
|
||||
path, err := ls.repository.pm.path(uploadDataPathSpec{
|
||||
name: ls.repository.Name(),
|
||||
uuid: uuid,
|
||||
})
|
||||
|
@ -74,7 +74,7 @@ func (ls *layerStore) Upload() (distribution.LayerUpload, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
startedAtPath, err := ls.repository.registry.pm.path(uploadStartedAtPathSpec{
|
||||
startedAtPath, err := ls.repository.pm.path(uploadStartedAtPathSpec{
|
||||
name: ls.repository.Name(),
|
||||
uuid: uuid,
|
||||
})
|
||||
|
@ -95,7 +95,7 @@ func (ls *layerStore) Upload() (distribution.LayerUpload, error) {
|
|||
// state of the upload.
|
||||
func (ls *layerStore) Resume(uuid string) (distribution.LayerUpload, error) {
|
||||
ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Resume")
|
||||
startedAtPath, err := ls.repository.registry.pm.path(uploadStartedAtPathSpec{
|
||||
startedAtPath, err := ls.repository.pm.path(uploadStartedAtPathSpec{
|
||||
name: ls.repository.Name(),
|
||||
uuid: uuid,
|
||||
})
|
||||
|
@ -152,7 +152,7 @@ func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (di
|
|||
|
||||
func (ls *layerStore) path(dgst digest.Digest) (string, error) {
|
||||
// We must traverse this path through the link to enforce ownership.
|
||||
layerLinkPath, err := ls.repository.registry.pm.path(layerLinkPathSpec{name: ls.repository.Name(), digest: dgst})
|
||||
layerLinkPath, err := ls.repository.pm.path(layerLinkPathSpec{name: ls.repository.Name(), digest: dgst})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -46,16 +46,37 @@ func (lw *layerWriter) StartedAt() time.Time {
|
|||
// uploaded layer. The final size and checksum are validated against the
|
||||
// contents of the uploaded layer. The checksum should be provided in the
|
||||
// format <algorithm>:<hex digest>.
|
||||
func (lw *layerWriter) Finish(digest digest.Digest) (distribution.Layer, error) {
|
||||
func (lw *layerWriter) Finish(dgst digest.Digest) (distribution.Layer, error) {
|
||||
ctxu.GetLogger(lw.layerStore.repository.ctx).Debug("(*layerWriter).Finish")
|
||||
|
||||
if err := lw.bufferedFileWriter.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
canonical, err := lw.validateLayer(digest)
|
||||
if err != nil {
|
||||
var (
|
||||
canonical digest.Digest
|
||||
err error
|
||||
)
|
||||
|
||||
// HACK(stevvooe): To deal with s3's lack of consistency, attempt to retry
|
||||
// validation on failure. Three attempts are made, backing off
|
||||
// retries*100ms each time.
|
||||
for retries := 0; ; retries++ {
|
||||
canonical, err = lw.validateLayer(dgst)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
ctxu.GetLoggerWithField(lw.layerStore.repository.ctx, "retries", retries).
|
||||
Errorf("error validating layer: %v", err)
|
||||
|
||||
if retries < 3 {
|
||||
time.Sleep(100 * time.Millisecond * time.Duration(retries+1))
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, err
|
||||
|
||||
}
|
||||
|
||||
if err := lw.moveLayer(canonical); err != nil {
|
||||
|
@ -64,7 +85,7 @@ func (lw *layerWriter) Finish(digest digest.Digest) (distribution.Layer, error)
|
|||
}
|
||||
|
||||
// Link the layer blob into the repository.
|
||||
if err := lw.linkLayer(canonical, digest); err != nil {
|
||||
if err := lw.linkLayer(canonical, dgst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -137,7 +158,7 @@ type hashStateEntry struct {
|
|||
|
||||
// getStoredHashStates returns a slice of hashStateEntries for this upload.
|
||||
func (lw *layerWriter) getStoredHashStates() ([]hashStateEntry, error) {
|
||||
uploadHashStatePathPrefix, err := lw.layerStore.repository.registry.pm.path(uploadHashStatePathSpec{
|
||||
uploadHashStatePathPrefix, err := lw.layerStore.repository.pm.path(uploadHashStatePathSpec{
|
||||
name: lw.layerStore.repository.Name(),
|
||||
uuid: lw.uuid,
|
||||
alg: lw.resumableDigester.Digest().Algorithm(),
|
||||
|
@ -182,7 +203,7 @@ func (lw *layerWriter) resumeHashAt(offset int64) error {
|
|||
}
|
||||
|
||||
if offset == int64(lw.resumableDigester.Len()) {
|
||||
// State of digester is already at the requseted offset.
|
||||
// State of digester is already at the requested offset.
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -250,7 +271,7 @@ func (lw *layerWriter) resumeHashAt(offset int64) error {
|
|||
}
|
||||
|
||||
func (lw *layerWriter) storeHashState() error {
|
||||
uploadHashStatePath, err := lw.layerStore.repository.registry.pm.path(uploadHashStatePathSpec{
|
||||
uploadHashStatePath, err := lw.layerStore.repository.pm.path(uploadHashStatePathSpec{
|
||||
name: lw.layerStore.repository.Name(),
|
||||
uuid: lw.uuid,
|
||||
alg: lw.resumableDigester.Digest().Algorithm(),
|
||||
|
@ -324,6 +345,8 @@ func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error)
|
|||
}
|
||||
|
||||
if !verified {
|
||||
ctxu.GetLoggerWithField(lw.layerStore.repository.ctx, "canonical", dgst).
|
||||
Errorf("canonical digest does match provided digest")
|
||||
return "", distribution.ErrLayerInvalidDigest{
|
||||
Digest: dgst,
|
||||
Reason: fmt.Errorf("content does not match digest"),
|
||||
|
@ -337,7 +360,7 @@ func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error)
|
|||
// identified by dgst. The layer should be validated before commencing the
|
||||
// move.
|
||||
func (lw *layerWriter) moveLayer(dgst digest.Digest) error {
|
||||
blobPath, err := lw.layerStore.repository.registry.pm.path(blobDataPathSpec{
|
||||
blobPath, err := lw.layerStore.repository.pm.path(blobDataPathSpec{
|
||||
digest: dgst,
|
||||
})
|
||||
|
||||
|
@ -403,7 +426,7 @@ func (lw *layerWriter) linkLayer(canonical digest.Digest, aliases ...digest.Dige
|
|||
}
|
||||
seenDigests[dgst] = struct{}{}
|
||||
|
||||
layerLinkPath, err := lw.layerStore.repository.registry.pm.path(layerLinkPathSpec{
|
||||
layerLinkPath, err := lw.layerStore.repository.pm.path(layerLinkPathSpec{
|
||||
name: lw.layerStore.repository.Name(),
|
||||
digest: dgst,
|
||||
})
|
||||
|
@ -412,7 +435,7 @@ func (lw *layerWriter) linkLayer(canonical digest.Digest, aliases ...digest.Dige
|
|||
return err
|
||||
}
|
||||
|
||||
if err := lw.layerStore.repository.registry.driver.PutContent(layerLinkPath, []byte(canonical)); err != nil {
|
||||
if err := lw.layerStore.repository.driver.PutContent(layerLinkPath, []byte(canonical)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -424,7 +447,7 @@ func (lw *layerWriter) linkLayer(canonical digest.Digest, aliases ...digest.Dige
|
|||
// instance. An error will be returned if the clean up cannot proceed. If the
|
||||
// resources are already not present, no error will be returned.
|
||||
func (lw *layerWriter) removeResources() error {
|
||||
dataPath, err := lw.layerStore.repository.registry.pm.path(uploadDataPathSpec{
|
||||
dataPath, err := lw.layerStore.repository.pm.path(uploadDataPathSpec{
|
||||
name: lw.layerStore.repository.Name(),
|
||||
uuid: lw.uuid,
|
||||
})
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
|
@ -102,6 +103,48 @@ func (ms *manifestStore) verifyManifest(mnfst *manifest.SignedManifest) error {
|
|||
}
|
||||
}
|
||||
|
||||
if len(mnfst.FSLayers) == 0 || len(mnfst.History) == 0 {
|
||||
errs = append(errs, distribution.ErrManifestValidation{
|
||||
Reason: "no layers present"})
|
||||
}
|
||||
|
||||
if len(mnfst.FSLayers) != len(mnfst.History) {
|
||||
errs = append(errs, distribution.ErrManifestValidation{
|
||||
Reason: "mismatched layers and history"})
|
||||
}
|
||||
|
||||
// image provides a local type for validating the image relationship.
|
||||
type image struct {
|
||||
ID string `json:"id"`
|
||||
Parent string `json:"parent"`
|
||||
}
|
||||
|
||||
// Process the history portion to ensure that the parent links are
|
||||
// correctly represented. We serialize the image json, then walk the
|
||||
// entries, checking the parent link.
|
||||
var images []image
|
||||
for _, entry := range mnfst.History {
|
||||
var im image
|
||||
if err := json.Unmarshal([]byte(entry.V1Compatibility), &im); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
images = append(images, im)
|
||||
}
|
||||
|
||||
// go back through each image, checking the parent link and rank
|
||||
var parentID string
|
||||
for i := len(images) - 1; i >= 0; i-- {
|
||||
// ensure that the parent id matches but only if there is a parent.
|
||||
// There are cases where successive layers don't fill in the parents.
|
||||
if images[i].Parent != parentID {
|
||||
errs = append(errs, distribution.ErrManifestValidation{
|
||||
Reason: "parent not adjacent in manifest"})
|
||||
}
|
||||
|
||||
parentID = images[i].ID
|
||||
}
|
||||
|
||||
for _, fsLayer := range mnfst.FSLayers {
|
||||
exists, err := ms.repository.Layers().Exists(fsLayer.BlobSum)
|
||||
if err != nil {
|
||||
|
|
|
@ -2,15 +2,16 @@ package storage
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/registry/storage/cache"
|
||||
|
||||
"code.google.com/p/go-uuid/uuid"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/registry/storage/cache"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/docker/distribution/registry/storage/driver/inmemory"
|
||||
"github.com/docker/distribution/testutil"
|
||||
|
@ -103,6 +104,38 @@ func TestManifestStorage(t *testing.T) {
|
|||
t.Fatalf("error signing manifest: %v", err)
|
||||
}
|
||||
|
||||
// try to put the manifest initially. this will fail since we have not
|
||||
// included history or pushed any layers.
|
||||
err = ms.Put(sm)
|
||||
if err == nil {
|
||||
t.Fatalf("expected errors putting manifest with full verification")
|
||||
}
|
||||
|
||||
switch err := err.(type) {
|
||||
case distribution.ErrManifestVerification:
|
||||
if len(err) != 4 {
|
||||
t.Fatalf("expected 4 verification errors: %#v", err)
|
||||
}
|
||||
|
||||
for _, err := range err {
|
||||
switch err := err.(type) {
|
||||
case distribution.ErrUnknownLayer, distribution.ErrManifestValidation:
|
||||
// noop: we expect these errors
|
||||
default:
|
||||
t.Fatalf("unexpected error type: %v", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
t.Fatalf("unexpected error verifying manifest: %v", err)
|
||||
}
|
||||
|
||||
m.History = generateHistory(t, len(m.FSLayers))
|
||||
sm, err = manifest.Sign(&m, pk)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error signing manfiest with history: %v", err)
|
||||
}
|
||||
|
||||
// we've fixed the missing history, try the push and fail on layer checks.
|
||||
err = ms.Put(sm)
|
||||
if err == nil {
|
||||
t.Fatalf("expected errors putting manifest")
|
||||
|
@ -288,3 +321,37 @@ func TestManifestStorage(t *testing.T) {
|
|||
t.Fatalf("unexpected an error deleting manifest by digest: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// generateHistory creates a valid history entry of length n.
|
||||
func generateHistory(t *testing.T, n int) []manifest.History {
|
||||
var images []map[string]interface{}
|
||||
|
||||
// first pass: create images entries.
|
||||
for i := 0; i < n; i++ {
|
||||
// simulate correct id -> parent links in v1Compatibility, using uuids.
|
||||
image := map[string]interface{}{
|
||||
"id": uuid.New(),
|
||||
}
|
||||
|
||||
images = append(images, image)
|
||||
}
|
||||
|
||||
var history []manifest.History
|
||||
|
||||
for i, image := range images {
|
||||
if i+1 < len(images) {
|
||||
image["parent"] = images[i+1]["id"]
|
||||
}
|
||||
|
||||
p, err := json.Marshal(image)
|
||||
if err != nil {
|
||||
t.Fatalf("error generating image json: %v", err)
|
||||
}
|
||||
|
||||
history = append(history, manifest.History{
|
||||
V1Compatibility: string(p),
|
||||
})
|
||||
}
|
||||
|
||||
return history
|
||||
}
|
||||
|
|
|
@ -387,7 +387,7 @@ type layerLinkPathSpec struct {
|
|||
func (layerLinkPathSpec) pathSpec() {}
|
||||
|
||||
// blobAlgorithmReplacer does some very simple path sanitization for user
|
||||
// input. Mostly, this is to provide some heirachry for tarsum digests. Paths
|
||||
// input. Mostly, this is to provide some hierarchy for tarsum digests. Paths
|
||||
// should be "safe" before getting this far due to strict digest requirements
|
||||
// but we can add further path conversion here, if needed.
|
||||
var blobAlgorithmReplacer = strings.NewReplacer(
|
||||
|
|
Loading…
Reference in a new issue