forked from TrueCloudLab/distribution
Import next-generation branch
Signed-off-by: Arnaud Porterie <arnaud.porterie@docker.com>
This commit is contained in:
commit
ae55a8232f
97 changed files with 14300 additions and 0 deletions
38
.drone.yml
Normal file
38
.drone.yml
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
image: dmp42/go:stable
|
||||||
|
|
||||||
|
script:
|
||||||
|
# To be spoofed back into the test image
|
||||||
|
- go get github.com/modocache/gover
|
||||||
|
|
||||||
|
- go get -t ./...
|
||||||
|
|
||||||
|
# Go fmt
|
||||||
|
- test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
|
||||||
|
# Go lint
|
||||||
|
- test -z "$(golint ./... | tee /dev/stderr)"
|
||||||
|
# Go vet
|
||||||
|
- go vet ./...
|
||||||
|
# Go test
|
||||||
|
- go test -v -race -cover ./...
|
||||||
|
# Helper to concatenate reports
|
||||||
|
- gover
|
||||||
|
# Send to coverall
|
||||||
|
- goveralls -service drone.io -coverprofile=gover.coverprofile -repotoken {{COVERALLS_TOKEN}}
|
||||||
|
|
||||||
|
# Do we want these as well?
|
||||||
|
# - go get code.google.com/p/go.tools/cmd/goimports
|
||||||
|
# - test -z "$(goimports -l -w ./... | tee /dev/stderr)"
|
||||||
|
# http://labix.org/gocheck
|
||||||
|
|
||||||
|
notify:
|
||||||
|
email:
|
||||||
|
recipients:
|
||||||
|
- distribution@docker.com
|
||||||
|
|
||||||
|
slack:
|
||||||
|
team: docker
|
||||||
|
channel: "#dt"
|
||||||
|
username: mom
|
||||||
|
token: {{SLACK_TOKEN}}
|
||||||
|
on_success: true
|
||||||
|
on_failure: true
|
54
CONTRIBUTING.md
Normal file
54
CONTRIBUTING.md
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
# Contributing to the registry
|
||||||
|
|
||||||
|
## Are you having issues?
|
||||||
|
|
||||||
|
Please first try any of these support forums before opening an issue:
|
||||||
|
|
||||||
|
* irc #docker on freenode (archives: [https://botbot.me/freenode/docker/])
|
||||||
|
* https://forums.docker.com/
|
||||||
|
* if your problem is with the "hub" (the website and other user-facing components), or about automated builds, then please direct your issues to https://support.docker.com
|
||||||
|
|
||||||
|
## So, you found a bug?
|
||||||
|
|
||||||
|
First check if your problem was already reported in the issue tracker.
|
||||||
|
|
||||||
|
If it's already there, please refrain from adding "same here" comments - these don't add any value and are only adding useless noise. **Said comments will quite often be deleted at sight**. On the other hand, if you have any technical, relevant information to add, by all means do!
|
||||||
|
|
||||||
|
Your issue is not there? Then please, create a ticket.
|
||||||
|
|
||||||
|
If possible the following guidelines should be followed:
|
||||||
|
|
||||||
|
* try to come up with a minimal, simple to reproduce test-case
|
||||||
|
* try to add a title that describe succinctly the issue
|
||||||
|
* if you are running your own registry, please provide:
|
||||||
|
* registry version
|
||||||
|
* registry launch command used
|
||||||
|
* registry configuration
|
||||||
|
* registry logs
|
||||||
|
* in all cases:
|
||||||
|
* `docker version` and `docker info`
|
||||||
|
* run your docker daemon in debug mode (-D), and provide docker daemon logs
|
||||||
|
|
||||||
|
## You have a patch for a known bug, or a small correction?
|
||||||
|
|
||||||
|
Basic github workflow (fork, patch, make sure the tests pass, PR).
|
||||||
|
|
||||||
|
... and some simple rules to ensure quick merge:
|
||||||
|
|
||||||
|
* clearly point to the issue(s) you want to fix
|
||||||
|
* when possible, prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once
|
||||||
|
* if you need to amend your PR following comments, squash instead of adding more commits
|
||||||
|
|
||||||
|
## You want some shiny new feature to be added?
|
||||||
|
|
||||||
|
Fork the project.
|
||||||
|
|
||||||
|
Create a new proposal in the folder `open-design/specs`, named `DEP_MY_AWESOME_PROPOSAL.md`, using `open-design/specs/TEMPLATE.md` as a starting point.
|
||||||
|
|
||||||
|
Then immediately submit this new file as a pull-request, in order to get early feedback.
|
||||||
|
|
||||||
|
Eventually, you will have to update your proposal to accommodate the feedback you received.
|
||||||
|
|
||||||
|
Usually, it's not advisable to start working too much on the implementation itself before the proposal receives sufficient feedback, since it can significantly altered (or rejected).
|
||||||
|
|
||||||
|
Your implementation should then be submitted as a separate PR, that will be reviewed as well.
|
14
Dockerfile
Normal file
14
Dockerfile
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
FROM golang
|
||||||
|
|
||||||
|
COPY . /go/src/github.com/docker/docker-registry
|
||||||
|
|
||||||
|
# Fetch any dependencies to run the registry
|
||||||
|
RUN go get github.com/docker/docker-registry/...
|
||||||
|
RUN go install github.com/docker/docker-registry/cmd/registry
|
||||||
|
|
||||||
|
ENV CONFIG_PATH /etc/docker/registry/config.yml
|
||||||
|
COPY ./cmd/registry/config.yml $CONFIG_PATH
|
||||||
|
|
||||||
|
EXPOSE 5000
|
||||||
|
ENV PATH /go/bin
|
||||||
|
CMD registry $CONFIG_PATH
|
3
MAINTAINERS
Normal file
3
MAINTAINERS
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
Solomon Hykes <solomon@docker.com> (@shykes)
|
||||||
|
Olivier Gambier <olivier@docker.com> (@dmp42)
|
||||||
|
Sam Alba <sam@docker.com> (@samalba)
|
152
api/v2/descriptors.go
Normal file
152
api/v2/descriptors.go
Normal file
|
@ -0,0 +1,152 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
|
// TODO(stevvooe): Add route descriptors for each named route, along with
|
||||||
|
// accepted methods, parameters, returned status codes and error codes.
|
||||||
|
|
||||||
|
// ErrorDescriptor provides relevant information about a given error code.
|
||||||
|
type ErrorDescriptor struct {
|
||||||
|
// Code is the error code that this descriptor describes.
|
||||||
|
Code ErrorCode
|
||||||
|
|
||||||
|
// Value provides a unique, string key, often captilized with
|
||||||
|
// underscores, to identify the error code. This value is used as the
|
||||||
|
// keyed value when serializing api errors.
|
||||||
|
Value string
|
||||||
|
|
||||||
|
// Message is a short, human readable decription of the error condition
|
||||||
|
// included in API responses.
|
||||||
|
Message string
|
||||||
|
|
||||||
|
// Description provides a complete account of the errors purpose, suitable
|
||||||
|
// for use in documentation.
|
||||||
|
Description string
|
||||||
|
|
||||||
|
// HTTPStatusCodes provides a list of status under which this error
|
||||||
|
// condition may arise. If it is empty, the error condition may be seen
|
||||||
|
// for any status code.
|
||||||
|
HTTPStatusCodes []int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorDescriptors provides a list of HTTP API Error codes that may be
|
||||||
|
// encountered when interacting with the registry API.
|
||||||
|
var ErrorDescriptors = []ErrorDescriptor{
|
||||||
|
{
|
||||||
|
Code: ErrorCodeUnknown,
|
||||||
|
Value: "UNKNOWN",
|
||||||
|
Message: "unknown error",
|
||||||
|
Description: `Generic error returned when the error does not have an
|
||||||
|
API classification.`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Code: ErrorCodeUnauthorized,
|
||||||
|
Value: "UNAUTHORIZED",
|
||||||
|
Message: "access to the requested resource is not authorized",
|
||||||
|
Description: `The access controller denied access for the operation on
|
||||||
|
a resource. Often this will be accompanied by a 401 Unauthorized
|
||||||
|
response status.`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Code: ErrorCodeDigestInvalid,
|
||||||
|
Value: "DIGEST_INVALID",
|
||||||
|
Message: "provided digest did not match uploaded content",
|
||||||
|
Description: `When a blob is uploaded, the registry will check that
|
||||||
|
the content matches the digest provided by the client. The error may
|
||||||
|
include a detail structure with the key "digest", including the
|
||||||
|
invalid digest string. This error may also be returned when a manifest
|
||||||
|
includes an invalid layer digest.`,
|
||||||
|
HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Code: ErrorCodeSizeInvalid,
|
||||||
|
Value: "SIZE_INVALID",
|
||||||
|
Message: "provided length did not match content length",
|
||||||
|
Description: `When a layer is uploaded, the provided size will be
|
||||||
|
checked against the uploaded content. If they do not match, this error
|
||||||
|
will be returned.`,
|
||||||
|
HTTPStatusCodes: []int{http.StatusBadRequest},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Code: ErrorCodeNameInvalid,
|
||||||
|
Value: "NAME_INVALID",
|
||||||
|
Message: "manifest name did not match URI",
|
||||||
|
Description: `During a manifest upload, if the name in the manifest
|
||||||
|
does not match the uri name, this error will be returned.`,
|
||||||
|
HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Code: ErrorCodeTagInvalid,
|
||||||
|
Value: "TAG_INVALID",
|
||||||
|
Message: "manifest tag did not match URI",
|
||||||
|
Description: `During a manifest upload, if the tag in the manifest
|
||||||
|
does not match the uri tag, this error will be returned.`,
|
||||||
|
HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Code: ErrorCodeNameUnknown,
|
||||||
|
Value: "NAME_UNKNOWN",
|
||||||
|
Message: "repository name not known to registry",
|
||||||
|
Description: `This is returned if the name used during an operation is
|
||||||
|
unknown to the registry.`,
|
||||||
|
HTTPStatusCodes: []int{http.StatusNotFound},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Code: ErrorCodeManifestUnknown,
|
||||||
|
Value: "MANIFEST_UNKNOWN",
|
||||||
|
Message: "manifest unknown",
|
||||||
|
Description: `This error is returned when the manifest, identified by
|
||||||
|
name and tag is unknown to the repository.`,
|
||||||
|
HTTPStatusCodes: []int{http.StatusNotFound},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Code: ErrorCodeManifestInvalid,
|
||||||
|
Value: "MANIFEST_INVALID",
|
||||||
|
Message: "manifest invalid",
|
||||||
|
Description: `During upload, manifests undergo several checks ensuring
|
||||||
|
validity. If those checks fail, this error may be returned, unless a
|
||||||
|
more specific error is included. The detail will contain information
|
||||||
|
the failed validation.`,
|
||||||
|
HTTPStatusCodes: []int{http.StatusBadRequest},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Code: ErrorCodeManifestUnverified,
|
||||||
|
Value: "MANIFEST_UNVERIFIED",
|
||||||
|
Message: "manifest failed signature verification",
|
||||||
|
Description: `During manifest upload, if the manifest fails signature
|
||||||
|
verification, this error will be returned.`,
|
||||||
|
HTTPStatusCodes: []int{http.StatusBadRequest},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Code: ErrorCodeBlobUnknown,
|
||||||
|
Value: "BLOB_UNKNOWN",
|
||||||
|
Message: "blob unknown to registry",
|
||||||
|
Description: `This error may be returned when a blob is unknown to the
|
||||||
|
registry in a specified repository. This can be returned with a
|
||||||
|
standard get or if a manifest references an unknown layer during
|
||||||
|
upload.`,
|
||||||
|
HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
Code: ErrorCodeBlobUploadUnknown,
|
||||||
|
Value: "BLOB_UPLOAD_UNKNOWN",
|
||||||
|
Message: "blob upload unknown to registry",
|
||||||
|
Description: `If a blob upload has been cancelled or was never
|
||||||
|
started, this error code may be returned.`,
|
||||||
|
HTTPStatusCodes: []int{http.StatusNotFound},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var errorCodeToDescriptors map[ErrorCode]ErrorDescriptor
|
||||||
|
var idToDescriptors map[string]ErrorDescriptor
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(ErrorDescriptors))
|
||||||
|
idToDescriptors = make(map[string]ErrorDescriptor, len(ErrorDescriptors))
|
||||||
|
|
||||||
|
for _, descriptor := range ErrorDescriptors {
|
||||||
|
errorCodeToDescriptors[descriptor.Code] = descriptor
|
||||||
|
idToDescriptors[descriptor.Value] = descriptor
|
||||||
|
}
|
||||||
|
}
|
9
api/v2/doc.go
Normal file
9
api/v2/doc.go
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
// Package v2 describes routes, urls and the error codes used in the Docker
|
||||||
|
// Registry JSON HTTP API V2. In addition to declarations, descriptors are
|
||||||
|
// provided for routes and error codes that can be used for implementation and
|
||||||
|
// automatically generating documentation.
|
||||||
|
//
|
||||||
|
// Definitions here are considered to be locked down for the V2 registry api.
|
||||||
|
// Any changes must be considered carefully and should not proceed without a
|
||||||
|
// change proposal in docker core.
|
||||||
|
package v2
|
188
api/v2/errors.go
Normal file
188
api/v2/errors.go
Normal file
|
@ -0,0 +1,188 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrorCode represents the error type. The errors are serialized via strings
|
||||||
|
// and the integer format may change and should *never* be exported.
|
||||||
|
type ErrorCode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ErrorCodeUnknown is a catch-all for errors not defined below.
|
||||||
|
ErrorCodeUnknown ErrorCode = iota
|
||||||
|
|
||||||
|
// ErrorCodeUnauthorized is returned if a request is not authorized.
|
||||||
|
ErrorCodeUnauthorized
|
||||||
|
|
||||||
|
// ErrorCodeDigestInvalid is returned when uploading a blob if the
|
||||||
|
// provided digest does not match the blob contents.
|
||||||
|
ErrorCodeDigestInvalid
|
||||||
|
|
||||||
|
// ErrorCodeSizeInvalid is returned when uploading a blob if the provided
|
||||||
|
// size does not match the content length.
|
||||||
|
ErrorCodeSizeInvalid
|
||||||
|
|
||||||
|
// ErrorCodeNameInvalid is returned when the name in the manifest does not
|
||||||
|
// match the provided name.
|
||||||
|
ErrorCodeNameInvalid
|
||||||
|
|
||||||
|
// ErrorCodeTagInvalid is returned when the tag in the manifest does not
|
||||||
|
// match the provided tag.
|
||||||
|
ErrorCodeTagInvalid
|
||||||
|
|
||||||
|
// ErrorCodeNameUnknown when the repository name is not known.
|
||||||
|
ErrorCodeNameUnknown
|
||||||
|
|
||||||
|
// ErrorCodeManifestUnknown returned when image manifest is unknown.
|
||||||
|
ErrorCodeManifestUnknown
|
||||||
|
|
||||||
|
// ErrorCodeManifestInvalid returned when an image manifest is invalid,
|
||||||
|
// typically during a PUT operation. This error encompasses all errors
|
||||||
|
// encountered during manifest validation that aren't signature errors.
|
||||||
|
ErrorCodeManifestInvalid
|
||||||
|
|
||||||
|
// ErrorCodeManifestUnverified is returned when the manifest fails
|
||||||
|
// signature verfication.
|
||||||
|
ErrorCodeManifestUnverified
|
||||||
|
|
||||||
|
// ErrorCodeBlobUnknown is returned when a blob is unknown to the
|
||||||
|
// registry. This can happen when the manifest references a nonexistent
|
||||||
|
// layer or the result is not found by a blob fetch.
|
||||||
|
ErrorCodeBlobUnknown
|
||||||
|
|
||||||
|
// ErrorCodeBlobUploadUnknown is returned when an upload is unknown.
|
||||||
|
ErrorCodeBlobUploadUnknown
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseErrorCode attempts to parse the error code string, returning
|
||||||
|
// ErrorCodeUnknown if the error is not known.
|
||||||
|
func ParseErrorCode(s string) ErrorCode {
|
||||||
|
desc, ok := idToDescriptors[s]
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return ErrorCodeUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
return desc.Code
|
||||||
|
}
|
||||||
|
|
||||||
|
// Descriptor returns the descriptor for the error code.
|
||||||
|
func (ec ErrorCode) Descriptor() ErrorDescriptor {
|
||||||
|
d, ok := errorCodeToDescriptors[ec]
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return ErrorCodeUnknown.Descriptor()
|
||||||
|
}
|
||||||
|
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the canonical identifier for this error code.
|
||||||
|
func (ec ErrorCode) String() string {
|
||||||
|
return ec.Descriptor().Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message returned the human-readable error message for this error code.
|
||||||
|
func (ec ErrorCode) Message() string {
|
||||||
|
return ec.Descriptor().Message
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText encodes the receiver into UTF-8-encoded text and returns the
|
||||||
|
// result.
|
||||||
|
func (ec ErrorCode) MarshalText() (text []byte, err error) {
|
||||||
|
return []byte(ec.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText decodes the form generated by MarshalText.
|
||||||
|
func (ec *ErrorCode) UnmarshalText(text []byte) error {
|
||||||
|
desc, ok := idToDescriptors[string(text)]
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
desc = ErrorCodeUnknown.Descriptor()
|
||||||
|
}
|
||||||
|
|
||||||
|
*ec = desc.Code
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error provides a wrapper around ErrorCode with extra Details provided.
|
||||||
|
type Error struct {
|
||||||
|
Code ErrorCode `json:"code"`
|
||||||
|
Message string `json:"message,omitempty"`
|
||||||
|
Detail interface{} `json:"detail,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns a human readable representation of the error.
|
||||||
|
func (e Error) Error() string {
|
||||||
|
return fmt.Sprintf("%s: %s",
|
||||||
|
strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)),
|
||||||
|
e.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errors provides the envelope for multiple errors and a few sugar methods
|
||||||
|
// for use within the application.
|
||||||
|
type Errors struct {
|
||||||
|
Errors []Error `json:"errors,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push pushes an error on to the error stack, with the optional detail
|
||||||
|
// argument. It is a programming error (ie panic) to push more than one
|
||||||
|
// detail at a time.
|
||||||
|
func (errs *Errors) Push(code ErrorCode, details ...interface{}) {
|
||||||
|
if len(details) > 1 {
|
||||||
|
panic("please specify zero or one detail items for this error")
|
||||||
|
}
|
||||||
|
|
||||||
|
var detail interface{}
|
||||||
|
if len(details) > 0 {
|
||||||
|
detail = details[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
if err, ok := detail.(error); ok {
|
||||||
|
detail = err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
errs.PushErr(Error{
|
||||||
|
Code: code,
|
||||||
|
Message: code.Message(),
|
||||||
|
Detail: detail,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushErr pushes an error interface onto the error stack.
|
||||||
|
func (errs *Errors) PushErr(err error) {
|
||||||
|
switch err.(type) {
|
||||||
|
case Error:
|
||||||
|
errs.Errors = append(errs.Errors, err.(Error))
|
||||||
|
default:
|
||||||
|
errs.Errors = append(errs.Errors, Error{Message: err.Error()})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (errs *Errors) Error() string {
|
||||||
|
switch errs.Len() {
|
||||||
|
case 0:
|
||||||
|
return "<nil>"
|
||||||
|
case 1:
|
||||||
|
return errs.Errors[0].Error()
|
||||||
|
default:
|
||||||
|
msg := "errors:\n"
|
||||||
|
for _, err := range errs.Errors {
|
||||||
|
msg += err.Error() + "\n"
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear clears the errors.
|
||||||
|
func (errs *Errors) Clear() {
|
||||||
|
errs.Errors = errs.Errors[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the current number of errors.
|
||||||
|
func (errs *Errors) Len() int {
|
||||||
|
return len(errs.Errors)
|
||||||
|
}
|
165
api/v2/errors_test.go
Normal file
165
api/v2/errors_test.go
Normal file
|
@ -0,0 +1,165 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestErrorCodes ensures that error code format, mappings and
|
||||||
|
// marshaling/unmarshaling. round trips are stable.
|
||||||
|
func TestErrorCodes(t *testing.T) {
|
||||||
|
for _, desc := range ErrorDescriptors {
|
||||||
|
if desc.Code.String() != desc.Value {
|
||||||
|
t.Fatalf("error code string incorrect: %q != %q", desc.Code.String(), desc.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
if desc.Code.Message() != desc.Message {
|
||||||
|
t.Fatalf("incorrect message for error code %v: %q != %q", desc.Code, desc.Code.Message(), desc.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize the error code using the json library to ensure that we
|
||||||
|
// get a string and it works round trip.
|
||||||
|
p, err := json.Marshal(desc.Code)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error marshaling error code %v: %v", desc.Code, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p) <= 0 {
|
||||||
|
t.Fatalf("expected content in marshaled before for error code %v", desc.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
// First, unmarshal to interface and ensure we have a string.
|
||||||
|
var ecUnspecified interface{}
|
||||||
|
if err := json.Unmarshal(p, &ecUnspecified); err != nil {
|
||||||
|
t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := ecUnspecified.(string); !ok {
|
||||||
|
t.Fatalf("expected a string for error code %v on unmarshal got a %T", desc.Code, ecUnspecified)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now, unmarshal with the error code type and ensure they are equal
|
||||||
|
var ecUnmarshaled ErrorCode
|
||||||
|
if err := json.Unmarshal(p, &ecUnmarshaled); err != nil {
|
||||||
|
t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ecUnmarshaled != desc.Code {
|
||||||
|
t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, desc.Code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestErrorsManagement does a quick check of the Errors type to ensure that
|
||||||
|
// members are properly pushed and marshaled.
|
||||||
|
func TestErrorsManagement(t *testing.T) {
|
||||||
|
var errs Errors
|
||||||
|
|
||||||
|
errs.Push(ErrorCodeDigestInvalid)
|
||||||
|
errs.Push(ErrorCodeBlobUnknown,
|
||||||
|
map[string]digest.Digest{"digest": "sometestblobsumdoesntmatter"})
|
||||||
|
|
||||||
|
p, err := json.Marshal(errs)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error marashaling errors: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedJSON := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest did not match uploaded content\"},{\"code\":\"BLOB_UNKNOWN\",\"message\":\"blob unknown to registry\",\"detail\":{\"digest\":\"sometestblobsumdoesntmatter\"}}]}"
|
||||||
|
|
||||||
|
if string(p) != expectedJSON {
|
||||||
|
t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON)
|
||||||
|
}
|
||||||
|
|
||||||
|
errs.Clear()
|
||||||
|
errs.Push(ErrorCodeUnknown)
|
||||||
|
expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}"
|
||||||
|
p, err = json.Marshal(errs)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error marashaling errors: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(p) != expectedJSON {
|
||||||
|
t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestMarshalUnmarshal ensures that api errors can round trip through json
|
||||||
|
// without losing information.
|
||||||
|
func TestMarshalUnmarshal(t *testing.T) {
|
||||||
|
|
||||||
|
var errors Errors
|
||||||
|
|
||||||
|
for _, testcase := range []struct {
|
||||||
|
description string
|
||||||
|
err Error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
description: "unknown error",
|
||||||
|
err: Error{
|
||||||
|
|
||||||
|
Code: ErrorCodeUnknown,
|
||||||
|
Message: ErrorCodeUnknown.Descriptor().Message,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "unknown manifest",
|
||||||
|
err: Error{
|
||||||
|
Code: ErrorCodeManifestUnknown,
|
||||||
|
Message: ErrorCodeManifestUnknown.Descriptor().Message,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "unknown manifest",
|
||||||
|
err: Error{
|
||||||
|
Code: ErrorCodeBlobUnknown,
|
||||||
|
Message: ErrorCodeBlobUnknown.Descriptor().Message,
|
||||||
|
Detail: map[string]interface{}{"digest": "asdfqwerqwerqwerqwer"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
fatalf := func(format string, args ...interface{}) {
|
||||||
|
t.Fatalf(testcase.description+": "+format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
unexpectedErr := func(err error) {
|
||||||
|
fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := json.Marshal(testcase.err)
|
||||||
|
if err != nil {
|
||||||
|
unexpectedErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var unmarshaled Error
|
||||||
|
if err := json.Unmarshal(p, &unmarshaled); err != nil {
|
||||||
|
unexpectedErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(unmarshaled, testcase.err) {
|
||||||
|
fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, testcase.err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Roll everything up into an error response envelope.
|
||||||
|
errors.PushErr(testcase.err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := json.Marshal(errors)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error marshaling error envelope: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var unmarshaled Errors
|
||||||
|
if err := json.Unmarshal(p, &unmarshaled); err != nil {
|
||||||
|
t.Fatalf("unexpected error unmarshaling error envelope: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(unmarshaled, errors) {
|
||||||
|
t.Fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, errors)
|
||||||
|
}
|
||||||
|
}
|
69
api/v2/routes.go
Normal file
69
api/v2/routes.go
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/docker-registry/common"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The following are definitions of the name under which all V2 routes are
|
||||||
|
// registered. These symbols can be used to look up a route based on the name.
|
||||||
|
const (
|
||||||
|
RouteNameBase = "base"
|
||||||
|
RouteNameManifest = "manifest"
|
||||||
|
RouteNameTags = "tags"
|
||||||
|
RouteNameBlob = "blob"
|
||||||
|
RouteNameBlobUpload = "blob-upload"
|
||||||
|
RouteNameBlobUploadChunk = "blob-upload-chunk"
|
||||||
|
)
|
||||||
|
|
||||||
|
var allEndpoints = []string{
|
||||||
|
RouteNameManifest,
|
||||||
|
RouteNameTags,
|
||||||
|
RouteNameBlob,
|
||||||
|
RouteNameBlobUpload,
|
||||||
|
RouteNameBlobUploadChunk,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Router builds a gorilla router with named routes for the various API
|
||||||
|
// methods. This can be used directly by both server implementations and
|
||||||
|
// clients.
|
||||||
|
func Router() *mux.Router {
|
||||||
|
router := mux.NewRouter().
|
||||||
|
StrictSlash(true)
|
||||||
|
|
||||||
|
// GET /v2/ Check Check that the registry implements API version 2(.1)
|
||||||
|
router.
|
||||||
|
Path("/v2/").
|
||||||
|
Name(RouteNameBase)
|
||||||
|
|
||||||
|
// GET /v2/<name>/manifest/<tag> Image Manifest Fetch the image manifest identified by name and tag.
|
||||||
|
// PUT /v2/<name>/manifest/<tag> Image Manifest Upload the image manifest identified by name and tag.
|
||||||
|
// DELETE /v2/<name>/manifest/<tag> Image Manifest Delete the image identified by name and tag.
|
||||||
|
router.
|
||||||
|
Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/manifests/{tag:" + common.TagNameRegexp.String() + "}").
|
||||||
|
Name(RouteNameManifest)
|
||||||
|
|
||||||
|
// GET /v2/<name>/tags/list Tags Fetch the tags under the repository identified by name.
|
||||||
|
router.
|
||||||
|
Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/tags/list").
|
||||||
|
Name(RouteNameTags)
|
||||||
|
|
||||||
|
// GET /v2/<name>/blob/<digest> Layer Fetch the blob identified by digest.
|
||||||
|
router.
|
||||||
|
Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/blobs/{digest:[a-zA-Z0-9-_+.]+:[a-zA-Z0-9-_+.=]+}").
|
||||||
|
Name(RouteNameBlob)
|
||||||
|
|
||||||
|
// POST /v2/<name>/blob/upload/ Layer Upload Initiate an upload of the layer identified by tarsum.
|
||||||
|
router.
|
||||||
|
Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/blobs/uploads/").
|
||||||
|
Name(RouteNameBlobUpload)
|
||||||
|
|
||||||
|
// GET /v2/<name>/blob/upload/<uuid> Layer Upload Get the status of the upload identified by tarsum and uuid.
|
||||||
|
// PUT /v2/<name>/blob/upload/<uuid> Layer Upload Upload all or a chunk of the upload identified by tarsum and uuid.
|
||||||
|
// DELETE /v2/<name>/blob/upload/<uuid> Layer Upload Cancel the upload identified by layer and uuid
|
||||||
|
router.
|
||||||
|
Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid}").
|
||||||
|
Name(RouteNameBlobUploadChunk)
|
||||||
|
|
||||||
|
return router
|
||||||
|
}
|
184
api/v2/routes_test.go
Normal file
184
api/v2/routes_test.go
Normal file
|
@ -0,0 +1,184 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
)
|
||||||
|
|
||||||
|
type routeTestCase struct {
|
||||||
|
RequestURI string
|
||||||
|
Vars map[string]string
|
||||||
|
RouteName string
|
||||||
|
StatusCode int
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRouter registers a test handler with all the routes and ensures that
|
||||||
|
// each route returns the expected path variables. Not method verification is
|
||||||
|
// present. This not meant to be exhaustive but as check to ensure that the
|
||||||
|
// expected variables are extracted.
|
||||||
|
//
|
||||||
|
// This may go away as the application structure comes together.
|
||||||
|
func TestRouter(t *testing.T) {
|
||||||
|
|
||||||
|
router := Router()
|
||||||
|
|
||||||
|
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
testCase := routeTestCase{
|
||||||
|
RequestURI: r.RequestURI,
|
||||||
|
Vars: mux.Vars(r),
|
||||||
|
RouteName: mux.CurrentRoute(r).GetName(),
|
||||||
|
}
|
||||||
|
|
||||||
|
enc := json.NewEncoder(w)
|
||||||
|
|
||||||
|
if err := enc.Encode(testCase); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Startup test server
|
||||||
|
server := httptest.NewServer(router)
|
||||||
|
|
||||||
|
for _, testcase := range []routeTestCase{
|
||||||
|
{
|
||||||
|
RouteName: RouteNameBase,
|
||||||
|
RequestURI: "/v2/",
|
||||||
|
Vars: map[string]string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
RouteName: RouteNameManifest,
|
||||||
|
RequestURI: "/v2/foo/bar/manifests/tag",
|
||||||
|
Vars: map[string]string{
|
||||||
|
"name": "foo/bar",
|
||||||
|
"tag": "tag",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
RouteName: RouteNameTags,
|
||||||
|
RequestURI: "/v2/foo/bar/tags/list",
|
||||||
|
Vars: map[string]string{
|
||||||
|
"name": "foo/bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
RouteName: RouteNameBlob,
|
||||||
|
RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234",
|
||||||
|
Vars: map[string]string{
|
||||||
|
"name": "foo/bar",
|
||||||
|
"digest": "tarsum.dev+foo:abcdef0919234",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
RouteName: RouteNameBlob,
|
||||||
|
RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234",
|
||||||
|
Vars: map[string]string{
|
||||||
|
"name": "foo/bar",
|
||||||
|
"digest": "sha256:abcdef0919234",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
RouteName: RouteNameBlobUpload,
|
||||||
|
RequestURI: "/v2/foo/bar/blobs/uploads/",
|
||||||
|
Vars: map[string]string{
|
||||||
|
"name": "foo/bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
RouteName: RouteNameBlobUploadChunk,
|
||||||
|
RequestURI: "/v2/foo/bar/blobs/uploads/uuid",
|
||||||
|
Vars: map[string]string{
|
||||||
|
"name": "foo/bar",
|
||||||
|
"uuid": "uuid",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
RouteName: RouteNameBlobUploadChunk,
|
||||||
|
RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
|
||||||
|
Vars: map[string]string{
|
||||||
|
"name": "foo/bar",
|
||||||
|
"uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
RouteName: RouteNameBlobUploadChunk,
|
||||||
|
RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==",
|
||||||
|
Vars: map[string]string{
|
||||||
|
"name": "foo/bar",
|
||||||
|
"uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Check ambiguity: ensure we can distinguish between tags for
|
||||||
|
// "foo/bar/image/image" and image for "foo/bar/image" with tag
|
||||||
|
// "tags"
|
||||||
|
RouteName: RouteNameManifest,
|
||||||
|
RequestURI: "/v2/foo/bar/manifests/manifests/tags",
|
||||||
|
Vars: map[string]string{
|
||||||
|
"name": "foo/bar/manifests",
|
||||||
|
"tag": "tags",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// This case presents an ambiguity between foo/bar with tag="tags"
|
||||||
|
// and list tags for "foo/bar/manifest"
|
||||||
|
RouteName: RouteNameTags,
|
||||||
|
RequestURI: "/v2/foo/bar/manifests/tags/list",
|
||||||
|
Vars: map[string]string{
|
||||||
|
"name": "foo/bar/manifests",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
RouteName: RouteNameBlobUploadChunk,
|
||||||
|
RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
|
||||||
|
StatusCode: http.StatusNotFound,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
// Register the endpoint
|
||||||
|
router.GetRoute(testcase.RouteName).Handler(testHandler)
|
||||||
|
u := server.URL + testcase.RequestURI
|
||||||
|
|
||||||
|
resp, err := http.Get(u)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error issuing get request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if testcase.StatusCode == 0 {
|
||||||
|
// Override default, zero-value
|
||||||
|
testcase.StatusCode = http.StatusOK
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != testcase.StatusCode {
|
||||||
|
t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
if testcase.StatusCode != http.StatusOK {
|
||||||
|
// We don't care about json response.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := json.NewDecoder(resp.Body)
|
||||||
|
|
||||||
|
var actualRouteInfo routeTestCase
|
||||||
|
if err := dec.Decode(&actualRouteInfo); err != nil {
|
||||||
|
t.Fatalf("error reading json response: %v", err)
|
||||||
|
}
|
||||||
|
// Needs to be set out of band
|
||||||
|
actualRouteInfo.StatusCode = resp.StatusCode
|
||||||
|
|
||||||
|
if actualRouteInfo.RouteName != testcase.RouteName {
|
||||||
|
t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(actualRouteInfo, testcase) {
|
||||||
|
t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
165
api/v2/urls.go
Normal file
165
api/v2/urls.go
Normal file
|
@ -0,0 +1,165 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
)
|
||||||
|
|
||||||
|
// URLBuilder creates registry API urls from a single base endpoint. It can be
|
||||||
|
// used to create urls for use in a registry client or server.
|
||||||
|
//
|
||||||
|
// All urls will be created from the given base, including the api version.
|
||||||
|
// For example, if a root of "/foo/" is provided, urls generated will be fall
|
||||||
|
// under "/foo/v2/...". Most application will only provide a schema, host and
|
||||||
|
// port, such as "https://localhost:5000/".
|
||||||
|
type URLBuilder struct {
|
||||||
|
root *url.URL // url root (ie http://localhost/)
|
||||||
|
router *mux.Router
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewURLBuilder creates a URLBuilder with provided root url object.
|
||||||
|
func NewURLBuilder(root *url.URL) *URLBuilder {
|
||||||
|
return &URLBuilder{
|
||||||
|
root: root,
|
||||||
|
router: Router(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewURLBuilderFromString workes identically to NewURLBuilder except it takes
|
||||||
|
// a string argument for the root, returning an error if it is not a valid
|
||||||
|
// url.
|
||||||
|
func NewURLBuilderFromString(root string) (*URLBuilder, error) {
|
||||||
|
u, err := url.Parse(root)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewURLBuilder(u), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewURLBuilderFromRequest uses information from an *http.Request to
|
||||||
|
// construct the root url.
|
||||||
|
func NewURLBuilderFromRequest(r *http.Request) *URLBuilder {
|
||||||
|
u := &url.URL{
|
||||||
|
Scheme: r.URL.Scheme,
|
||||||
|
Host: r.Host,
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewURLBuilder(u)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildBaseURL constructs a base url for the API, typically just "/v2/".
|
||||||
|
func (ub *URLBuilder) BuildBaseURL() (string, error) {
|
||||||
|
route := ub.cloneRoute(RouteNameBase)
|
||||||
|
|
||||||
|
baseURL, err := route.URL()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return baseURL.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildTagsURL constructs a url to list the tags in the named repository.
|
||||||
|
func (ub *URLBuilder) BuildTagsURL(name string) (string, error) {
|
||||||
|
route := ub.cloneRoute(RouteNameTags)
|
||||||
|
|
||||||
|
tagsURL, err := route.URL("name", name)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tagsURL.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildManifestURL constructs a url for the manifest identified by name and tag.
|
||||||
|
func (ub *URLBuilder) BuildManifestURL(name, tag string) (string, error) {
|
||||||
|
route := ub.cloneRoute(RouteNameManifest)
|
||||||
|
|
||||||
|
manifestURL, err := route.URL("name", name, "tag", tag)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return manifestURL.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildBlobURL constructs the url for the blob identified by name and dgst.
|
||||||
|
func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) {
|
||||||
|
route := ub.cloneRoute(RouteNameBlob)
|
||||||
|
|
||||||
|
layerURL, err := route.URL("name", name, "digest", dgst.String())
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return layerURL.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildBlobUploadURL constructs a url to begin a blob upload in the
|
||||||
|
// repository identified by name.
|
||||||
|
func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (string, error) {
|
||||||
|
route := ub.cloneRoute(RouteNameBlobUpload)
|
||||||
|
|
||||||
|
uploadURL, err := route.URL("name", name)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return appendValuesURL(uploadURL, values...).String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid,
|
||||||
|
// including any url values. This should generally not be used by clients, as
|
||||||
|
// this url is provided by server implementations during the blob upload
|
||||||
|
// process.
|
||||||
|
func (ub *URLBuilder) BuildBlobUploadChunkURL(name, uuid string, values ...url.Values) (string, error) {
|
||||||
|
route := ub.cloneRoute(RouteNameBlobUploadChunk)
|
||||||
|
|
||||||
|
uploadURL, err := route.URL("name", name, "uuid", uuid)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return appendValuesURL(uploadURL, values...).String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// clondedRoute returns a clone of the named route from the router. Routes
|
||||||
|
// must be cloned to avoid modifying them during url generation.
|
||||||
|
func (ub *URLBuilder) cloneRoute(name string) *mux.Route {
|
||||||
|
route := new(mux.Route)
|
||||||
|
*route = *ub.router.GetRoute(name) // clone the route
|
||||||
|
|
||||||
|
return route.
|
||||||
|
Schemes(ub.root.Scheme).
|
||||||
|
Host(ub.root.Host)
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendValuesURL appends the parameters to the url.
|
||||||
|
func appendValuesURL(u *url.URL, values ...url.Values) *url.URL {
|
||||||
|
merged := u.Query()
|
||||||
|
|
||||||
|
for _, v := range values {
|
||||||
|
for k, vv := range v {
|
||||||
|
merged[k] = append(merged[k], vv...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u.RawQuery = merged.Encode()
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendValues appends the parameters to the url. Panics if the string is not
|
||||||
|
// a url.
|
||||||
|
func appendValues(u string, values ...url.Values) string {
|
||||||
|
up, err := url.Parse(u)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
panic(err) // should never happen
|
||||||
|
}
|
||||||
|
|
||||||
|
return appendValuesURL(up, values...).String()
|
||||||
|
}
|
100
api/v2/urls_test.go
Normal file
100
api/v2/urls_test.go
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
type urlBuilderTestCase struct {
|
||||||
|
description string
|
||||||
|
expected string
|
||||||
|
build func() (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestURLBuilder tests the various url building functions, ensuring they are
|
||||||
|
// returning the expected values.
|
||||||
|
func TestURLBuilder(t *testing.T) {
|
||||||
|
|
||||||
|
root := "http://localhost:5000/"
|
||||||
|
urlBuilder, err := NewURLBuilderFromString(root)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating urlbuilder: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testcase := range []struct {
|
||||||
|
description string
|
||||||
|
expected string
|
||||||
|
build func() (string, error)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
description: "test base url",
|
||||||
|
expected: "http://localhost:5000/v2/",
|
||||||
|
build: urlBuilder.BuildBaseURL,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "test tags url",
|
||||||
|
expected: "http://localhost:5000/v2/foo/bar/tags/list",
|
||||||
|
build: func() (string, error) {
|
||||||
|
return urlBuilder.BuildTagsURL("foo/bar")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "test manifest url",
|
||||||
|
expected: "http://localhost:5000/v2/foo/bar/manifests/tag",
|
||||||
|
build: func() (string, error) {
|
||||||
|
return urlBuilder.BuildManifestURL("foo/bar", "tag")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "build blob url",
|
||||||
|
expected: "http://localhost:5000/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789",
|
||||||
|
build: func() (string, error) {
|
||||||
|
return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "build blob upload url",
|
||||||
|
expected: "http://localhost:5000/v2/foo/bar/blobs/uploads/",
|
||||||
|
build: func() (string, error) {
|
||||||
|
return urlBuilder.BuildBlobUploadURL("foo/bar")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "build blob upload url with digest and size",
|
||||||
|
expected: "http://localhost:5000/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000",
|
||||||
|
build: func() (string, error) {
|
||||||
|
return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{
|
||||||
|
"size": []string{"10000"},
|
||||||
|
"digest": []string{"tarsum.v1+sha256:abcdef0123456789"},
|
||||||
|
})
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "build blob upload chunk url",
|
||||||
|
expected: "http://localhost:5000/v2/foo/bar/blobs/uploads/uuid-part",
|
||||||
|
build: func() (string, error) {
|
||||||
|
return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "build blob upload chunk url with digest and size",
|
||||||
|
expected: "http://localhost:5000/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000",
|
||||||
|
build: func() (string, error) {
|
||||||
|
return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{
|
||||||
|
"size": []string{"10000"},
|
||||||
|
"digest": []string{"tarsum.v1+sha256:abcdef0123456789"},
|
||||||
|
})
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
u, err := testcase.build()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%s: error building url: %v", testcase.description, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if u != testcase.expected {
|
||||||
|
t.Fatalf("%s: %q != %q", testcase.description, u, testcase.expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
539
api_test.go
Normal file
539
api_test.go
Normal file
|
@ -0,0 +1,539 @@
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"net/http/httputil"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/api/v2"
|
||||||
|
"github.com/docker/docker-registry/common/testutil"
|
||||||
|
"github.com/docker/docker-registry/configuration"
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
"github.com/docker/docker-registry/storage"
|
||||||
|
_ "github.com/docker/docker-registry/storagedriver/inmemory"
|
||||||
|
"github.com/docker/libtrust"
|
||||||
|
"github.com/gorilla/handlers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified
|
||||||
|
// 200 OK response.
|
||||||
|
func TestCheckAPI(t *testing.T) {
|
||||||
|
config := configuration.Configuration{
|
||||||
|
Storage: configuration.Storage{
|
||||||
|
"inmemory": configuration.Parameters{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
app := NewApp(config)
|
||||||
|
server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app))
|
||||||
|
builder, err := v2.NewURLBuilderFromString(server.URL)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating url builder: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
baseURL, err := builder.BuildBaseURL()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error building base url: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := http.Get(baseURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error issuing request: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
checkResponse(t, "issuing api base check", resp, http.StatusOK)
|
||||||
|
checkHeaders(t, resp, http.Header{
|
||||||
|
"Content-Type": []string{"application/json"},
|
||||||
|
"Content-Length": []string{"2"},
|
||||||
|
})
|
||||||
|
|
||||||
|
p, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error reading response body: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(p) != "{}" {
|
||||||
|
t.Fatalf("unexpected response body: %v", string(p))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestLayerAPI conducts a full of the of the layer api.
|
||||||
|
func TestLayerAPI(t *testing.T) {
|
||||||
|
// TODO(stevvooe): This test code is complete junk but it should cover the
|
||||||
|
// complete flow. This must be broken down and checked against the
|
||||||
|
// specification *before* we submit the final to docker core.
|
||||||
|
|
||||||
|
config := configuration.Configuration{
|
||||||
|
Storage: configuration.Storage{
|
||||||
|
"inmemory": configuration.Parameters{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
app := NewApp(config)
|
||||||
|
server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app))
|
||||||
|
builder, err := v2.NewURLBuilderFromString(server.URL)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating url builder: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
imageName := "foo/bar"
|
||||||
|
// "build" our layer file
|
||||||
|
layerFile, tarSumStr, err := testutil.CreateRandomTarFile()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating random layer file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
layerDigest := digest.Digest(tarSumStr)
|
||||||
|
|
||||||
|
// -----------------------------------
|
||||||
|
// Test fetch for non-existent content
|
||||||
|
layerURL, err := builder.BuildBlobURL(imageName, layerDigest)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error building url: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := http.Get(layerURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error fetching non-existent layer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
checkResponse(t, "fetching non-existent content", resp, http.StatusNotFound)
|
||||||
|
|
||||||
|
// ------------------------------------------
|
||||||
|
// Test head request for non-existent content
|
||||||
|
resp, err = http.Head(layerURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error checking head on non-existent layer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
checkResponse(t, "checking head on non-existent layer", resp, http.StatusNotFound)
|
||||||
|
|
||||||
|
// ------------------------------------------
|
||||||
|
// Upload a layer
|
||||||
|
layerUploadURL, err := builder.BuildBlobUploadURL(imageName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error building upload url: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err = http.Post(layerUploadURL, "", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error starting layer upload: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
checkResponse(t, "starting layer upload", resp, http.StatusAccepted)
|
||||||
|
checkHeaders(t, resp, http.Header{
|
||||||
|
"Location": []string{"*"},
|
||||||
|
"Content-Length": []string{"0"},
|
||||||
|
})
|
||||||
|
|
||||||
|
layerLength, _ := layerFile.Seek(0, os.SEEK_END)
|
||||||
|
layerFile.Seek(0, os.SEEK_SET)
|
||||||
|
|
||||||
|
// TODO(sday): Cancel the layer upload here and restart.
|
||||||
|
|
||||||
|
uploadURLBase := startPushLayer(t, builder, imageName)
|
||||||
|
pushLayer(t, builder, imageName, layerDigest, uploadURLBase, layerFile)
|
||||||
|
|
||||||
|
// ------------------------
|
||||||
|
// Use a head request to see if the layer exists.
|
||||||
|
resp, err = http.Head(layerURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error checking head on existing layer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
checkResponse(t, "checking head on existing layer", resp, http.StatusOK)
|
||||||
|
checkHeaders(t, resp, http.Header{
|
||||||
|
"Content-Length": []string{fmt.Sprint(layerLength)},
|
||||||
|
})
|
||||||
|
|
||||||
|
// ----------------
|
||||||
|
// Fetch the layer!
|
||||||
|
resp, err = http.Get(layerURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error fetching layer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
checkResponse(t, "fetching layer", resp, http.StatusOK)
|
||||||
|
checkHeaders(t, resp, http.Header{
|
||||||
|
"Content-Length": []string{fmt.Sprint(layerLength)},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Verify the body
|
||||||
|
verifier := digest.NewDigestVerifier(layerDigest)
|
||||||
|
io.Copy(verifier, resp.Body)
|
||||||
|
|
||||||
|
if !verifier.Verified() {
|
||||||
|
t.Fatalf("response body did not pass verification")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Missing tests:
|
||||||
|
// - Upload the same tarsum file under and different repository and
|
||||||
|
// ensure the content remains uncorrupted.
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestManifestAPI(t *testing.T) {
|
||||||
|
pk, err := libtrust.GenerateECP256PrivateKey()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error generating private key: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
config := configuration.Configuration{
|
||||||
|
Storage: configuration.Storage{
|
||||||
|
"inmemory": configuration.Parameters{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
app := NewApp(config)
|
||||||
|
server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app))
|
||||||
|
builder, err := v2.NewURLBuilderFromString(server.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating url builder: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
imageName := "foo/bar"
|
||||||
|
tag := "thetag"
|
||||||
|
|
||||||
|
manifestURL, err := builder.BuildManifestURL(imageName, tag)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error getting manifest url: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// -----------------------------
|
||||||
|
// Attempt to fetch the manifest
|
||||||
|
resp, err := http.Get(manifestURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error getting manifest: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound)
|
||||||
|
|
||||||
|
// TODO(stevvooe): Shoot. The error setup is not working out. The content-
|
||||||
|
// type headers are being set after writing the status code.
|
||||||
|
// if resp.Header.Get("Content-Type") != "application/json" {
|
||||||
|
// t.Fatalf("unexpected content type: %v != 'application/json'",
|
||||||
|
// resp.Header.Get("Content-Type"))
|
||||||
|
// }
|
||||||
|
dec := json.NewDecoder(resp.Body)
|
||||||
|
|
||||||
|
var respErrs v2.Errors
|
||||||
|
if err := dec.Decode(&respErrs); err != nil {
|
||||||
|
t.Fatalf("unexpected error decoding error response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(respErrs.Errors) == 0 {
|
||||||
|
t.Fatalf("expected errors in response")
|
||||||
|
}
|
||||||
|
|
||||||
|
if respErrs.Errors[0].Code != v2.ErrorCodeManifestUnknown {
|
||||||
|
t.Fatalf("expected manifest unknown error: got %v", respErrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
tagsURL, err := builder.BuildTagsURL(imageName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error building tags url: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err = http.Get(tagsURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error getting unknown tags: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Check that we get an unknown repository error when asking for tags
|
||||||
|
checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound)
|
||||||
|
dec = json.NewDecoder(resp.Body)
|
||||||
|
if err := dec.Decode(&respErrs); err != nil {
|
||||||
|
t.Fatalf("unexpected error decoding error response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(respErrs.Errors) == 0 {
|
||||||
|
t.Fatalf("expected errors in response")
|
||||||
|
}
|
||||||
|
|
||||||
|
if respErrs.Errors[0].Code != v2.ErrorCodeNameUnknown {
|
||||||
|
t.Fatalf("expected respository unknown error: got %v", respErrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------------
|
||||||
|
// Attempt to push unsigned manifest with missing layers
|
||||||
|
unsignedManifest := &storage.Manifest{
|
||||||
|
Name: imageName,
|
||||||
|
Tag: tag,
|
||||||
|
FSLayers: []storage.FSLayer{
|
||||||
|
{
|
||||||
|
BlobSum: "asdf",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlobSum: "qwer",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
resp = putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
checkResponse(t, "posting unsigned manifest", resp, http.StatusBadRequest)
|
||||||
|
|
||||||
|
dec = json.NewDecoder(resp.Body)
|
||||||
|
if err := dec.Decode(&respErrs); err != nil {
|
||||||
|
t.Fatalf("unexpected error decoding error response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var unverified int
|
||||||
|
var missingLayers int
|
||||||
|
var invalidDigests int
|
||||||
|
|
||||||
|
for _, err := range respErrs.Errors {
|
||||||
|
switch err.Code {
|
||||||
|
case v2.ErrorCodeManifestUnverified:
|
||||||
|
unverified++
|
||||||
|
case v2.ErrorCodeBlobUnknown:
|
||||||
|
missingLayers++
|
||||||
|
case v2.ErrorCodeDigestInvalid:
|
||||||
|
// TODO(stevvooe): This error isn't quite descriptive enough --
|
||||||
|
// the layer with an invalid digest isn't identified.
|
||||||
|
invalidDigests++
|
||||||
|
default:
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if unverified != 1 {
|
||||||
|
t.Fatalf("should have received one unverified manifest error: %v", respErrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
if missingLayers != 2 {
|
||||||
|
t.Fatalf("should have received two missing layer errors: %v", respErrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
if invalidDigests != 2 {
|
||||||
|
t.Fatalf("should have received two invalid digest errors: %v", respErrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(stevvooe): Add a test case where we take a mostly valid registry,
|
||||||
|
// tamper with the content and ensure that we get a unverified manifest
|
||||||
|
// error.
|
||||||
|
|
||||||
|
// Push 2 random layers
|
||||||
|
expectedLayers := make(map[digest.Digest]io.ReadSeeker)
|
||||||
|
|
||||||
|
for i := range unsignedManifest.FSLayers {
|
||||||
|
rs, dgstStr, err := testutil.CreateRandomTarFile()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating random layer %d: %v", i, err)
|
||||||
|
}
|
||||||
|
dgst := digest.Digest(dgstStr)
|
||||||
|
|
||||||
|
expectedLayers[dgst] = rs
|
||||||
|
unsignedManifest.FSLayers[i].BlobSum = dgst
|
||||||
|
|
||||||
|
uploadURLBase := startPushLayer(t, builder, imageName)
|
||||||
|
pushLayer(t, builder, imageName, dgst, uploadURLBase, rs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// -------------------
|
||||||
|
// Push the signed manifest with all layers pushed.
|
||||||
|
signedManifest, err := unsignedManifest.Sign(pk)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error signing manifest: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp = putManifest(t, "putting signed manifest", manifestURL, signedManifest)
|
||||||
|
|
||||||
|
checkResponse(t, "putting signed manifest", resp, http.StatusOK)
|
||||||
|
|
||||||
|
resp, err = http.Get(manifestURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error fetching manifest: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK)
|
||||||
|
|
||||||
|
var fetchedManifest storage.SignedManifest
|
||||||
|
dec = json.NewDecoder(resp.Body)
|
||||||
|
if err := dec.Decode(&fetchedManifest); err != nil {
|
||||||
|
t.Fatalf("error decoding fetched manifest: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(fetchedManifest.Raw, signedManifest.Raw) {
|
||||||
|
t.Fatalf("manifests do not match")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that the tag is listed.
|
||||||
|
resp, err = http.Get(tagsURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error getting unknown tags: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Check that we get an unknown repository error when asking for tags
|
||||||
|
checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK)
|
||||||
|
dec = json.NewDecoder(resp.Body)
|
||||||
|
|
||||||
|
var tagsResponse tagsAPIResponse
|
||||||
|
|
||||||
|
if err := dec.Decode(&tagsResponse); err != nil {
|
||||||
|
t.Fatalf("unexpected error decoding error response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tagsResponse.Name != imageName {
|
||||||
|
t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tagsResponse.Tags) != 1 {
|
||||||
|
t.Fatalf("expected some tags in response: %v", tagsResponse.Tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tagsResponse.Tags[0] != tag {
|
||||||
|
t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response {
|
||||||
|
var body []byte
|
||||||
|
if sm, ok := v.(*storage.SignedManifest); ok {
|
||||||
|
body = sm.Raw
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
body, err = json.MarshalIndent(v, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error marshaling %v: %v", v, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("PUT", url, bytes.NewReader(body))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating request for %s: %v", msg, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error doing put request while %s: %v", msg, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) string {
|
||||||
|
layerUploadURL, err := ub.BuildBlobUploadURL(name)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error building layer upload url: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := http.Post(layerUploadURL, "", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error starting layer push: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name), resp, http.StatusAccepted)
|
||||||
|
checkHeaders(t, resp, http.Header{
|
||||||
|
"Location": []string{"*"},
|
||||||
|
"Content-Length": []string{"0"},
|
||||||
|
})
|
||||||
|
|
||||||
|
return resp.Header.Get("Location")
|
||||||
|
}
|
||||||
|
|
||||||
|
// pushLayer pushes the layer content returning the url on success.
|
||||||
|
func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, rs io.ReadSeeker) string {
|
||||||
|
rsLength, _ := rs.Seek(0, os.SEEK_END)
|
||||||
|
rs.Seek(0, os.SEEK_SET)
|
||||||
|
|
||||||
|
u, err := url.Parse(uploadURLBase)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error parsing pushLayer url: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
u.RawQuery = url.Values{
|
||||||
|
"digest": []string{dgst.String()},
|
||||||
|
|
||||||
|
// TODO(stevvooe): Layer upload can be completed with and without size
|
||||||
|
// argument. We'll need to add a test that checks the latter path.
|
||||||
|
"size": []string{fmt.Sprint(rsLength)},
|
||||||
|
}.Encode()
|
||||||
|
|
||||||
|
uploadURL := u.String()
|
||||||
|
|
||||||
|
// Just do a monolithic upload
|
||||||
|
req, err := http.NewRequest("PUT", uploadURL, rs)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating new request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error doing put: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated)
|
||||||
|
|
||||||
|
expectedLayerURL, err := ub.BuildBlobURL(name, dgst)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error building expected layer url: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
checkHeaders(t, resp, http.Header{
|
||||||
|
"Location": []string{expectedLayerURL},
|
||||||
|
"Content-Length": []string{"0"},
|
||||||
|
})
|
||||||
|
|
||||||
|
return resp.Header.Get("Location")
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) {
|
||||||
|
if resp.StatusCode != expectedStatus {
|
||||||
|
t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus)
|
||||||
|
maybeDumpResponse(t, resp)
|
||||||
|
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func maybeDumpResponse(t *testing.T, resp *http.Response) {
|
||||||
|
if d, err := httputil.DumpResponse(resp, true); err != nil {
|
||||||
|
t.Logf("error dumping response: %v", err)
|
||||||
|
} else {
|
||||||
|
t.Logf("response:\n%s", string(d))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchHeaders checks that the response has at least the headers. If not, the
|
||||||
|
// test will fail. If a passed in header value is "*", any non-zero value will
|
||||||
|
// suffice as a match.
|
||||||
|
func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) {
|
||||||
|
for k, vs := range headers {
|
||||||
|
if resp.Header.Get(k) == "" {
|
||||||
|
t.Fatalf("response missing header %q", k)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range vs {
|
||||||
|
if v == "*" {
|
||||||
|
// Just ensure there is some value.
|
||||||
|
if len(resp.Header[k]) > 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hv := range resp.Header[k] {
|
||||||
|
if hv != v {
|
||||||
|
t.Fatalf("header value not matched in response: %q != %q", hv, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
260
app.go
Normal file
260
app.go
Normal file
|
@ -0,0 +1,260 @@
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/api/v2"
|
||||||
|
"github.com/docker/docker-registry/auth"
|
||||||
|
"github.com/docker/docker-registry/configuration"
|
||||||
|
"github.com/docker/docker-registry/storage"
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/factory"
|
||||||
|
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
)
|
||||||
|
|
||||||
|
// App is a global registry application object. Shared resources can be placed
|
||||||
|
// on this object that will be accessible from all requests. Any writable
|
||||||
|
// fields should be protected.
|
||||||
|
type App struct {
|
||||||
|
Config configuration.Configuration
|
||||||
|
|
||||||
|
router *mux.Router
|
||||||
|
|
||||||
|
// driver maintains the app global storage driver instance.
|
||||||
|
driver storagedriver.StorageDriver
|
||||||
|
|
||||||
|
// services contains the main services instance for the application.
|
||||||
|
services *storage.Services
|
||||||
|
|
||||||
|
accessController auth.AccessController
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewApp takes a configuration and returns a configured app, ready to serve
|
||||||
|
// requests. The app only implements ServeHTTP and can be wrapped in other
|
||||||
|
// handlers accordingly.
|
||||||
|
func NewApp(configuration configuration.Configuration) *App {
|
||||||
|
app := &App{
|
||||||
|
Config: configuration,
|
||||||
|
router: v2.Router(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register the handler dispatchers.
|
||||||
|
app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler {
|
||||||
|
return http.HandlerFunc(apiBase)
|
||||||
|
})
|
||||||
|
app.register(v2.RouteNameManifest, imageManifestDispatcher)
|
||||||
|
app.register(v2.RouteNameTags, tagsDispatcher)
|
||||||
|
app.register(v2.RouteNameBlob, layerDispatcher)
|
||||||
|
app.register(v2.RouteNameBlobUpload, layerUploadDispatcher)
|
||||||
|
app.register(v2.RouteNameBlobUploadChunk, layerUploadDispatcher)
|
||||||
|
|
||||||
|
driver, err := factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// TODO(stevvooe): Move the creation of a service into a protected
|
||||||
|
// method, where this is created lazily. Its status can be queried via
|
||||||
|
// a health check.
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
app.driver = driver
|
||||||
|
app.services = storage.NewServices(app.driver)
|
||||||
|
|
||||||
|
authType := configuration.Auth.Type()
|
||||||
|
|
||||||
|
if authType != "" {
|
||||||
|
accessController, err := auth.GetAccessController(configuration.Auth.Type(), configuration.Auth.Parameters())
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err))
|
||||||
|
}
|
||||||
|
app.accessController = accessController
|
||||||
|
}
|
||||||
|
|
||||||
|
return app
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
app.router.ServeHTTP(w, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// register a handler with the application, by route name. The handler will be
|
||||||
|
// passed through the application filters and context will be constructed at
|
||||||
|
// request time.
|
||||||
|
func (app *App) register(routeName string, dispatch dispatchFunc) {
|
||||||
|
|
||||||
|
// TODO(stevvooe): This odd dispatcher/route registration is by-product of
|
||||||
|
// some limitations in the gorilla/mux router. We are using it to keep
|
||||||
|
// routing consistent between the client and server, but we may want to
|
||||||
|
// replace it with manual routing and structure-based dispatch for better
|
||||||
|
// control over the request execution.
|
||||||
|
|
||||||
|
app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch))
|
||||||
|
}
|
||||||
|
|
||||||
|
// dispatchFunc takes a context and request and returns a constructed handler
|
||||||
|
// for the route. The dispatcher will use this to dynamically create request
|
||||||
|
// specific handlers for each endpoint without creating a new router for each
|
||||||
|
// request.
|
||||||
|
type dispatchFunc func(ctx *Context, r *http.Request) http.Handler
|
||||||
|
|
||||||
|
// TODO(stevvooe): dispatchers should probably have some validation error
|
||||||
|
// chain with proper error reporting.
|
||||||
|
|
||||||
|
// singleStatusResponseWriter only allows the first status to be written to be
|
||||||
|
// the valid request status. The current use case of this class should be
|
||||||
|
// factored out.
|
||||||
|
type singleStatusResponseWriter struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
status int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ssrw *singleStatusResponseWriter) WriteHeader(status int) {
|
||||||
|
if ssrw.status != 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ssrw.status = status
|
||||||
|
ssrw.ResponseWriter.WriteHeader(status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dispatcher returns a handler that constructs a request specific context and
|
||||||
|
// handler, using the dispatch factory function.
|
||||||
|
func (app *App) dispatcher(dispatch dispatchFunc) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
context := app.context(r)
|
||||||
|
|
||||||
|
if err := app.authorized(w, r, context); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
context.log = log.WithField("name", context.Name)
|
||||||
|
handler := dispatch(context, r)
|
||||||
|
|
||||||
|
ssrw := &singleStatusResponseWriter{ResponseWriter: w}
|
||||||
|
context.log.Infoln("handler", resolveHandlerName(r.Method, handler))
|
||||||
|
handler.ServeHTTP(ssrw, r)
|
||||||
|
|
||||||
|
// Automated error response handling here. Handlers may return their
|
||||||
|
// own errors if they need different behavior (such as range errors
|
||||||
|
// for layer upload).
|
||||||
|
if context.Errors.Len() > 0 {
|
||||||
|
if ssrw.status == 0 {
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
serveJSON(w, context.Errors)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// context constructs the context object for the application. This only be
|
||||||
|
// called once per request.
|
||||||
|
func (app *App) context(r *http.Request) *Context {
|
||||||
|
vars := mux.Vars(r)
|
||||||
|
context := &Context{
|
||||||
|
App: app,
|
||||||
|
Name: vars["name"],
|
||||||
|
urlBuilder: v2.NewURLBuilderFromRequest(r),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store vars for underlying handlers.
|
||||||
|
context.vars = vars
|
||||||
|
|
||||||
|
return context
|
||||||
|
}
|
||||||
|
|
||||||
|
// authorized checks if the request can proceed with with request access-
|
||||||
|
// level. If it cannot, the method will return an error.
|
||||||
|
func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error {
|
||||||
|
if app.accessController == nil {
|
||||||
|
return nil // access controller is not enabled.
|
||||||
|
}
|
||||||
|
|
||||||
|
var accessRecords []auth.Access
|
||||||
|
|
||||||
|
if context.Name != "" {
|
||||||
|
resource := auth.Resource{
|
||||||
|
Type: "repository",
|
||||||
|
Name: context.Name,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch r.Method {
|
||||||
|
case "GET", "HEAD":
|
||||||
|
accessRecords = append(accessRecords,
|
||||||
|
auth.Access{
|
||||||
|
Resource: resource,
|
||||||
|
Action: "pull",
|
||||||
|
})
|
||||||
|
case "POST", "PUT", "PATCH":
|
||||||
|
accessRecords = append(accessRecords,
|
||||||
|
auth.Access{
|
||||||
|
Resource: resource,
|
||||||
|
Action: "pull",
|
||||||
|
},
|
||||||
|
auth.Access{
|
||||||
|
Resource: resource,
|
||||||
|
Action: "push",
|
||||||
|
})
|
||||||
|
case "DELETE":
|
||||||
|
// DELETE access requires full admin rights, which is represented
|
||||||
|
// as "*". This may not be ideal.
|
||||||
|
accessRecords = append(accessRecords,
|
||||||
|
auth.Access{
|
||||||
|
Resource: resource,
|
||||||
|
Action: "*",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Only allow the name not to be set on the base route.
|
||||||
|
route := mux.CurrentRoute(r)
|
||||||
|
|
||||||
|
if route == nil || route.GetName() != v2.RouteNameBase {
|
||||||
|
// For this to be properly secured, context.Name must always be set
|
||||||
|
// for a resource that may make a modification. The only condition
|
||||||
|
// under which name is not set and we still allow access is when the
|
||||||
|
// base route is accessed. This section prevents us from making that
|
||||||
|
// mistake elsewhere in the code, allowing any operation to proceed.
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(http.StatusForbidden)
|
||||||
|
|
||||||
|
var errs v2.Errors
|
||||||
|
errs.Push(v2.ErrorCodeUnauthorized)
|
||||||
|
serveJSON(w, errs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := app.accessController.Authorized(r, accessRecords...); err != nil {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case auth.Challenge:
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
err.ServeHTTP(w, r)
|
||||||
|
|
||||||
|
var errs v2.Errors
|
||||||
|
errs.Push(v2.ErrorCodeUnauthorized, accessRecords)
|
||||||
|
serveJSON(w, errs)
|
||||||
|
default:
|
||||||
|
// This condition is a potential security problem either in
|
||||||
|
// the configuration or whatever is backing the access
|
||||||
|
// controller. Just return a bad request with no information
|
||||||
|
// to avoid exposure. The request should not proceed.
|
||||||
|
context.log.Errorf("error checking authorization: %v", err)
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// apiBase implements a simple yes-man for doing overall checks against the
|
||||||
|
// api. This can support auth roundtrips to support docker login.
|
||||||
|
func apiBase(w http.ResponseWriter, r *http.Request) {
|
||||||
|
const emptyJSON = "{}"
|
||||||
|
// Provide a simple /v2/ 200 OK response with empty json response.
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON)))
|
||||||
|
|
||||||
|
fmt.Fprint(w, emptyJSON)
|
||||||
|
}
|
194
app_test.go
Normal file
194
app_test.go
Normal file
|
@ -0,0 +1,194 @@
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/api/v2"
|
||||||
|
_ "github.com/docker/docker-registry/auth/silly"
|
||||||
|
"github.com/docker/docker-registry/configuration"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestAppDispatcher builds an application with a test dispatcher and ensures
|
||||||
|
// that requests are properly dispatched and the handlers are constructed.
|
||||||
|
// This only tests the dispatch mechanism. The underlying dispatchers must be
|
||||||
|
// tested individually.
|
||||||
|
func TestAppDispatcher(t *testing.T) {
|
||||||
|
app := &App{
|
||||||
|
Config: configuration.Configuration{},
|
||||||
|
router: v2.Router(),
|
||||||
|
}
|
||||||
|
server := httptest.NewServer(app)
|
||||||
|
router := v2.Router()
|
||||||
|
|
||||||
|
serverURL, err := url.Parse(server.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error parsing server url: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc {
|
||||||
|
return func(ctx *Context, r *http.Request) http.Handler {
|
||||||
|
// Always checks the same name context
|
||||||
|
if ctx.Name != ctx.vars["name"] {
|
||||||
|
t.Fatalf("unexpected name: %q != %q", ctx.Name, "foo/bar")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that we have all that is expected
|
||||||
|
for expectedK, expectedV := range expectedVars {
|
||||||
|
if ctx.vars[expectedK] != expectedV {
|
||||||
|
t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.vars[expectedK], expectedV)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that we only have variables that are expected
|
||||||
|
for k, v := range ctx.vars {
|
||||||
|
_, ok := expectedVars[k]
|
||||||
|
|
||||||
|
if !ok { // name is checked on context
|
||||||
|
// We have an unexpected key, fail
|
||||||
|
t.Fatalf("unexpected key %q in vars with value %q", k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unflatten a list of variables, suitable for gorilla/mux, to a map[string]string
|
||||||
|
unflatten := func(vars []string) map[string]string {
|
||||||
|
m := make(map[string]string)
|
||||||
|
for i := 0; i < len(vars)-1; i = i + 2 {
|
||||||
|
m[vars[i]] = vars[i+1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testcase := range []struct {
|
||||||
|
endpoint string
|
||||||
|
vars []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
endpoint: v2.RouteNameManifest,
|
||||||
|
vars: []string{
|
||||||
|
"name", "foo/bar",
|
||||||
|
"tag", "sometag",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
endpoint: v2.RouteNameTags,
|
||||||
|
vars: []string{
|
||||||
|
"name", "foo/bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
endpoint: v2.RouteNameBlob,
|
||||||
|
vars: []string{
|
||||||
|
"name", "foo/bar",
|
||||||
|
"digest", "tarsum.v1+bogus:abcdef0123456789",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
endpoint: v2.RouteNameBlobUpload,
|
||||||
|
vars: []string{
|
||||||
|
"name", "foo/bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
endpoint: v2.RouteNameBlobUploadChunk,
|
||||||
|
vars: []string{
|
||||||
|
"name", "foo/bar",
|
||||||
|
"uuid", "theuuid",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars)))
|
||||||
|
route := router.GetRoute(testcase.endpoint).Host(serverURL.Host)
|
||||||
|
u, err := route.URL(testcase.vars...)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := http.Get(u.String())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
t.Fatalf("unexpected status code: %v != %v", resp.StatusCode, http.StatusOK)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNewApp covers the creation of an application via NewApp with a
|
||||||
|
// configuration.
|
||||||
|
func TestNewApp(t *testing.T) {
|
||||||
|
config := configuration.Configuration{
|
||||||
|
Storage: configuration.Storage{
|
||||||
|
"inmemory": nil,
|
||||||
|
},
|
||||||
|
Auth: configuration.Auth{
|
||||||
|
// For now, we simply test that new auth results in a viable
|
||||||
|
// application.
|
||||||
|
"silly": {
|
||||||
|
"realm": "realm-test",
|
||||||
|
"service": "service-test",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mostly, with this test, given a sane configuration, we are simply
|
||||||
|
// ensuring that NewApp doesn't panic. We might want to tweak this
|
||||||
|
// behavior.
|
||||||
|
app := NewApp(config)
|
||||||
|
|
||||||
|
server := httptest.NewServer(app)
|
||||||
|
builder, err := v2.NewURLBuilderFromString(server.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating urlbuilder: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
baseURL, err := builder.BuildBaseURL()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating baseURL: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(stevvooe): The rest of this test might belong in the API tests.
|
||||||
|
|
||||||
|
// Just hit the app and make sure we get a 401 Unauthorized error.
|
||||||
|
req, err := http.Get(baseURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error during GET: %v", err)
|
||||||
|
}
|
||||||
|
defer req.Body.Close()
|
||||||
|
|
||||||
|
if req.StatusCode != http.StatusUnauthorized {
|
||||||
|
t.Fatalf("unexpected status code during request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Header.Get("Content-Type") != "application/json" {
|
||||||
|
t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json")
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\""
|
||||||
|
if req.Header.Get("Authorization") != expectedAuthHeader {
|
||||||
|
t.Fatalf("unexpected authorization header: %q != %q", req.Header.Get("Authorization"), expectedAuthHeader)
|
||||||
|
}
|
||||||
|
|
||||||
|
var errs v2.Errors
|
||||||
|
dec := json.NewDecoder(req.Body)
|
||||||
|
if err := dec.Decode(&errs); err != nil {
|
||||||
|
t.Fatalf("error decoding error response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if errs.Errors[0].Code != v2.ErrorCodeUnauthorized {
|
||||||
|
t.Fatalf("unexpected error code: %v != %v", errs.Errors[0].Code, v2.ErrorCodeUnauthorized)
|
||||||
|
}
|
||||||
|
}
|
107
auth/auth.go
Normal file
107
auth/auth.go
Normal file
|
@ -0,0 +1,107 @@
|
||||||
|
// Package auth defines a standard interface for request access controllers.
|
||||||
|
//
|
||||||
|
// An access controller has a simple interface with a single `Authorized`
|
||||||
|
// method which checks that a given request is authorized to perform one or
|
||||||
|
// more actions on one or more resources. This method should return a non-nil
|
||||||
|
// error if the requset is not authorized.
|
||||||
|
//
|
||||||
|
// An implementation registers its access controller by name with a constructor
|
||||||
|
// which accepts an options map for configuring the access controller.
|
||||||
|
//
|
||||||
|
// options := map[string]interface{}{"sillySecret": "whysosilly?"}
|
||||||
|
// accessController, _ := auth.GetAccessController("silly", options)
|
||||||
|
//
|
||||||
|
// This `accessController` can then be used in a request handler like so:
|
||||||
|
//
|
||||||
|
// func updateOrder(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// orderNumber := r.FormValue("orderNumber")
|
||||||
|
// resource := auth.Resource{Type: "customerOrder", Name: orderNumber}
|
||||||
|
// access := auth.Access{Resource: resource, Action: "update"}
|
||||||
|
//
|
||||||
|
// if err := accessController.Authorized(r, access); err != nil {
|
||||||
|
// if challenge, ok := err.(auth.Challenge) {
|
||||||
|
// // Let the challenge write the response.
|
||||||
|
// challenge.ServeHTTP(w, r)
|
||||||
|
// } else {
|
||||||
|
// // Some other error.
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Resource describes a resource by type and name.
|
||||||
|
type Resource struct {
|
||||||
|
Type string
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Access describes a specific action that is
|
||||||
|
// requested or allowed for a given recource.
|
||||||
|
type Access struct {
|
||||||
|
Resource
|
||||||
|
Action string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Challenge is a special error type which is used for HTTP 401 Unauthorized
|
||||||
|
// responses and is able to write the response with WWW-Authenticate challenge
|
||||||
|
// header values based on the error.
|
||||||
|
type Challenge interface {
|
||||||
|
error
|
||||||
|
// ServeHTTP prepares the request to conduct the appropriate challenge
|
||||||
|
// response. For most implementations, simply calling ServeHTTP should be
|
||||||
|
// sufficient. Because no body is written, users may write a custom body after
|
||||||
|
// calling ServeHTTP, but any headers must be written before the call and may
|
||||||
|
// be overwritten.
|
||||||
|
ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccessController controls access to registry resources based on a request
|
||||||
|
// and required access levels for a request. Implementations can support both
|
||||||
|
// complete denial and http authorization challenges.
|
||||||
|
type AccessController interface {
|
||||||
|
// Authorized returns non-nil if the request is granted access. If one or
|
||||||
|
// more Access structs are provided, the requested access will be compared
|
||||||
|
// with what is available to the request. If the error is non-nil, access
|
||||||
|
// should always be denied. The error may be of type Challenge, in which
|
||||||
|
// case the caller may have the Challenge handle the request or choose
|
||||||
|
// what action to take based on the Challenge header or response status.
|
||||||
|
Authorized(req *http.Request, access ...Access) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// InitFunc is the type of an AccessController factory function and is used
|
||||||
|
// to register the contsructor for different AccesController backends.
|
||||||
|
type InitFunc func(options map[string]interface{}) (AccessController, error)
|
||||||
|
|
||||||
|
var accessControllers map[string]InitFunc
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
accessControllers = make(map[string]InitFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register is used to register an InitFunc for
|
||||||
|
// an AccessController backend with the given name.
|
||||||
|
func Register(name string, initFunc InitFunc) error {
|
||||||
|
if _, exists := accessControllers[name]; exists {
|
||||||
|
return fmt.Errorf("name already registered: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
accessControllers[name] = initFunc
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAccessController constructs an AccessController
|
||||||
|
// with the given options using the named backend.
|
||||||
|
func GetAccessController(name string, options map[string]interface{}) (AccessController, error) {
|
||||||
|
if initFunc, exists := accessControllers[name]; exists {
|
||||||
|
return initFunc(options)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("no access controller registered with name: %s", name)
|
||||||
|
}
|
89
auth/silly/access.go
Normal file
89
auth/silly/access.go
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
// Package silly provides a simple authentication scheme that checks for the
|
||||||
|
// existence of an Authorization header and issues access if is present and
|
||||||
|
// non-empty.
|
||||||
|
//
|
||||||
|
// This package is present as an example implementation of a minimal
|
||||||
|
// auth.AccessController and for testing. This is not suitable for any kind of
|
||||||
|
// production security.
|
||||||
|
package silly
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/auth"
|
||||||
|
)
|
||||||
|
|
||||||
|
// accessController provides a simple implementation of auth.AccessController
|
||||||
|
// that simply checks for a non-empty Authorization header. It is useful for
|
||||||
|
// demonstration and testing.
|
||||||
|
type accessController struct {
|
||||||
|
realm string
|
||||||
|
service string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ auth.AccessController = &accessController{}
|
||||||
|
|
||||||
|
func newAccessController(options map[string]interface{}) (auth.AccessController, error) {
|
||||||
|
realm, present := options["realm"]
|
||||||
|
if _, ok := realm.(string); !present || !ok {
|
||||||
|
return nil, fmt.Errorf(`"realm" must be set for silly access controller`)
|
||||||
|
}
|
||||||
|
|
||||||
|
service, present := options["service"]
|
||||||
|
if _, ok := service.(string); !present || !ok {
|
||||||
|
return nil, fmt.Errorf(`"service" must be set for silly access controller`)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &accessController{realm: realm.(string), service: service.(string)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Authorized simply checks for the existence of the authorization header,
|
||||||
|
// responding with a bearer challenge if it doesn't exist.
|
||||||
|
func (ac *accessController) Authorized(req *http.Request, accessRecords ...auth.Access) error {
|
||||||
|
if req.Header.Get("Authorization") == "" {
|
||||||
|
challenge := challenge{
|
||||||
|
realm: ac.realm,
|
||||||
|
service: ac.service,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(accessRecords) > 0 {
|
||||||
|
var scopes []string
|
||||||
|
for _, access := range accessRecords {
|
||||||
|
scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action))
|
||||||
|
}
|
||||||
|
challenge.scope = strings.Join(scopes, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &challenge
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type challenge struct {
|
||||||
|
realm string
|
||||||
|
service string
|
||||||
|
scope string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service)
|
||||||
|
|
||||||
|
if ch.scope != "" {
|
||||||
|
header = fmt.Sprintf("%s,scope=%q", header, ch.scope)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Authorization", header)
|
||||||
|
w.WriteHeader(http.StatusUnauthorized)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ch *challenge) Error() string {
|
||||||
|
return fmt.Sprintf("silly authentication challenge: %#v", ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// init registers the silly auth backend.
|
||||||
|
func init() {
|
||||||
|
auth.Register("silly", auth.InitFunc(newAccessController))
|
||||||
|
}
|
58
auth/silly/access_test.go
Normal file
58
auth/silly/access_test.go
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
package silly
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/auth"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSillyAccessController(t *testing.T) {
|
||||||
|
ac := &accessController{
|
||||||
|
realm: "test-realm",
|
||||||
|
service: "test-service",
|
||||||
|
}
|
||||||
|
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if err := ac.Authorized(r); err != nil {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case auth.Challenge:
|
||||||
|
err.ServeHTTP(w, r)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
t.Fatalf("unexpected error authorizing request: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
}))
|
||||||
|
|
||||||
|
resp, err := http.Get(server.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error during GET: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Request should not be authorized
|
||||||
|
if resp.StatusCode != http.StatusUnauthorized {
|
||||||
|
t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized)
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", server.URL, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating new request: %v", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", "seriously, anything")
|
||||||
|
|
||||||
|
resp, err = http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error during GET: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Request should not be authorized
|
||||||
|
if resp.StatusCode != http.StatusNoContent {
|
||||||
|
t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent)
|
||||||
|
}
|
||||||
|
}
|
269
auth/token/accesscontroller.go
Normal file
269
auth/token/accesscontroller.go
Normal file
|
@ -0,0 +1,269 @@
|
||||||
|
package token
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/libtrust"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/auth"
|
||||||
|
"github.com/docker/docker-registry/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
// accessSet maps a typed, named resource to
|
||||||
|
// a set of actions requested or authorized.
|
||||||
|
type accessSet map[auth.Resource]actionSet
|
||||||
|
|
||||||
|
// newAccessSet constructs an accessSet from
|
||||||
|
// a variable number of auth.Access items.
|
||||||
|
func newAccessSet(accessItems ...auth.Access) accessSet {
|
||||||
|
accessSet := make(accessSet, len(accessItems))
|
||||||
|
|
||||||
|
for _, access := range accessItems {
|
||||||
|
resource := auth.Resource{
|
||||||
|
Type: access.Type,
|
||||||
|
Name: access.Name,
|
||||||
|
}
|
||||||
|
|
||||||
|
set, exists := accessSet[resource]
|
||||||
|
if !exists {
|
||||||
|
set = newActionSet()
|
||||||
|
accessSet[resource] = set
|
||||||
|
}
|
||||||
|
|
||||||
|
set.Add(access.Action)
|
||||||
|
}
|
||||||
|
|
||||||
|
return accessSet
|
||||||
|
}
|
||||||
|
|
||||||
|
// contains returns whether or not the given access is in this accessSet.
|
||||||
|
func (s accessSet) contains(access auth.Access) bool {
|
||||||
|
actionSet, ok := s[access.Resource]
|
||||||
|
if ok {
|
||||||
|
return actionSet.Contains(access.Action)
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// scopeParam returns a collection of scopes which can
|
||||||
|
// be used for a WWW-Authenticate challenge parameter.
|
||||||
|
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||||
|
func (s accessSet) scopeParam() string {
|
||||||
|
scopes := make([]string, 0, len(s))
|
||||||
|
|
||||||
|
for resource, actionSet := range s {
|
||||||
|
actions := strings.Join(actionSet.Keys(), ",")
|
||||||
|
scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions))
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(scopes, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errors used and exported by this package.
|
||||||
|
var (
|
||||||
|
ErrInsufficientScope = errors.New("insufficient scope")
|
||||||
|
ErrTokenRequired = errors.New("authorization token required")
|
||||||
|
)
|
||||||
|
|
||||||
|
// authChallenge implements the auth.Challenge interface.
|
||||||
|
type authChallenge struct {
|
||||||
|
err error
|
||||||
|
realm string
|
||||||
|
service string
|
||||||
|
accessSet accessSet
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the internal error string for this authChallenge.
|
||||||
|
func (ac *authChallenge) Error() string {
|
||||||
|
return ac.err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status returns the HTTP Response Status Code for this authChallenge.
|
||||||
|
func (ac *authChallenge) Status() int {
|
||||||
|
return http.StatusUnauthorized
|
||||||
|
}
|
||||||
|
|
||||||
|
// challengeParams constructs the value to be used in
|
||||||
|
// the WWW-Authenticate response challenge header.
|
||||||
|
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||||
|
func (ac *authChallenge) challengeParams() string {
|
||||||
|
str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service)
|
||||||
|
|
||||||
|
if scope := ac.accessSet.scopeParam(); scope != "" {
|
||||||
|
str = fmt.Sprintf("%s,scope=%q", str, scope)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken {
|
||||||
|
str = fmt.Sprintf("%s,error=%q", str, "invalid_token")
|
||||||
|
} else if ac.err == ErrInsufficientScope {
|
||||||
|
str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope")
|
||||||
|
}
|
||||||
|
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetHeader sets the WWW-Authenticate value for the given header.
|
||||||
|
func (ac *authChallenge) SetHeader(header http.Header) {
|
||||||
|
header.Add("WWW-Authenticate", ac.challengeParams())
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeHttp handles writing the challenge response
|
||||||
|
// by setting the challenge header and status code.
|
||||||
|
func (ac *authChallenge) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ac.SetHeader(w.Header())
|
||||||
|
w.WriteHeader(ac.Status())
|
||||||
|
}
|
||||||
|
|
||||||
|
// accessController implements the auth.AccessController interface.
|
||||||
|
type accessController struct {
|
||||||
|
realm string
|
||||||
|
issuer string
|
||||||
|
service string
|
||||||
|
rootCerts *x509.CertPool
|
||||||
|
trustedKeys map[string]libtrust.PublicKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// tokenAccessOptions is a convenience type for handling
|
||||||
|
// options to the contstructor of an accessController.
|
||||||
|
type tokenAccessOptions struct {
|
||||||
|
realm string
|
||||||
|
issuer string
|
||||||
|
service string
|
||||||
|
rootCertBundle string
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkOptions gathers the necessary options
|
||||||
|
// for an accessController from the given map.
|
||||||
|
func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) {
|
||||||
|
var opts tokenAccessOptions
|
||||||
|
|
||||||
|
keys := []string{"realm", "issuer", "service", "rootCertBundle"}
|
||||||
|
vals := make([]string, 0, len(keys))
|
||||||
|
for _, key := range keys {
|
||||||
|
val, ok := options[key].(string)
|
||||||
|
if !ok {
|
||||||
|
return opts, fmt.Errorf("token auth requires a valid option string: %q", key)
|
||||||
|
}
|
||||||
|
vals = append(vals, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3]
|
||||||
|
|
||||||
|
return opts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newAccessController creates an accessController using the given options.
|
||||||
|
func newAccessController(options map[string]interface{}) (auth.AccessController, error) {
|
||||||
|
config, err := checkOptions(options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fp, err := os.Open(config.rootCertBundle)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err)
|
||||||
|
}
|
||||||
|
defer fp.Close()
|
||||||
|
|
||||||
|
rawCertBundle, err := ioutil.ReadAll(fp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var rootCerts []*x509.Certificate
|
||||||
|
pemBlock, rawCertBundle := pem.Decode(rawCertBundle)
|
||||||
|
for pemBlock != nil {
|
||||||
|
cert, err := x509.ParseCertificate(pemBlock.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rootCerts = append(rootCerts, cert)
|
||||||
|
|
||||||
|
pemBlock, rawCertBundle = pem.Decode(rawCertBundle)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rootCerts) == 0 {
|
||||||
|
return nil, errors.New("token auth requires at least one token signing root certificate")
|
||||||
|
}
|
||||||
|
|
||||||
|
rootPool := x509.NewCertPool()
|
||||||
|
trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts))
|
||||||
|
for _, rootCert := range rootCerts {
|
||||||
|
rootPool.AddCert(rootCert)
|
||||||
|
pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err)
|
||||||
|
}
|
||||||
|
trustedKeys[pubKey.KeyID()] = pubKey
|
||||||
|
}
|
||||||
|
|
||||||
|
return &accessController{
|
||||||
|
realm: config.realm,
|
||||||
|
issuer: config.issuer,
|
||||||
|
service: config.service,
|
||||||
|
rootCerts: rootPool,
|
||||||
|
trustedKeys: trustedKeys,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Authorized handles checking whether the given request is authorized
|
||||||
|
// for actions on resources described by the given access items.
|
||||||
|
func (ac *accessController) Authorized(req *http.Request, accessItems ...auth.Access) error {
|
||||||
|
challenge := &authChallenge{
|
||||||
|
realm: ac.realm,
|
||||||
|
service: ac.service,
|
||||||
|
accessSet: newAccessSet(accessItems...),
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.Split(req.Header.Get("Authorization"), " ")
|
||||||
|
|
||||||
|
if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" {
|
||||||
|
challenge.err = ErrTokenRequired
|
||||||
|
return challenge
|
||||||
|
}
|
||||||
|
|
||||||
|
rawToken := parts[1]
|
||||||
|
|
||||||
|
token, err := NewToken(rawToken)
|
||||||
|
if err != nil {
|
||||||
|
challenge.err = err
|
||||||
|
return challenge
|
||||||
|
}
|
||||||
|
|
||||||
|
verifyOpts := VerifyOptions{
|
||||||
|
TrustedIssuers: common.NewStringSet(ac.issuer),
|
||||||
|
AcceptedAudiences: common.NewStringSet(ac.service),
|
||||||
|
Roots: ac.rootCerts,
|
||||||
|
TrustedKeys: ac.trustedKeys,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = token.Verify(verifyOpts); err != nil {
|
||||||
|
challenge.err = err
|
||||||
|
return challenge
|
||||||
|
}
|
||||||
|
|
||||||
|
accessSet := token.accessSet()
|
||||||
|
for _, access := range accessItems {
|
||||||
|
if !accessSet.contains(access) {
|
||||||
|
challenge.err = ErrInsufficientScope
|
||||||
|
return challenge
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// init handles registering the token auth backend.
|
||||||
|
func init() {
|
||||||
|
auth.Register("token", auth.InitFunc(newAccessController))
|
||||||
|
}
|
344
auth/token/token.go
Normal file
344
auth/token/token.go
Normal file
|
@ -0,0 +1,344 @@
|
||||||
|
package token
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/libtrust"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/auth"
|
||||||
|
"github.com/docker/docker-registry/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TokenSeparator is the value which separates the header, claims, and
|
||||||
|
// signature in the compact serialization of a JSON Web Token.
|
||||||
|
TokenSeparator = "."
|
||||||
|
)
|
||||||
|
|
||||||
|
// Errors used by token parsing and verification.
|
||||||
|
var (
|
||||||
|
ErrMalformedToken = errors.New("malformed token")
|
||||||
|
ErrInvalidToken = errors.New("invalid token")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResourceActions stores allowed actions on a named and typed resource.
|
||||||
|
type ResourceActions struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Actions []string `json:"actions"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClaimSet describes the main section of a JSON Web Token.
|
||||||
|
type ClaimSet struct {
|
||||||
|
// Public claims
|
||||||
|
Issuer string `json:"iss"`
|
||||||
|
Subject string `json:"sub"`
|
||||||
|
Audience string `json:"aud"`
|
||||||
|
Expiration int64 `json:"exp"`
|
||||||
|
NotBefore int64 `json:"nbf"`
|
||||||
|
IssuedAt int64 `json:"iat"`
|
||||||
|
JWTID string `json:"jti"`
|
||||||
|
|
||||||
|
// Private claims
|
||||||
|
Access []*ResourceActions
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header describes the header section of a JSON Web Token.
|
||||||
|
type Header struct {
|
||||||
|
Type string `json:"typ"`
|
||||||
|
SigningAlg string `json:"alg"`
|
||||||
|
KeyID string `json:"kid,omitempty"`
|
||||||
|
X5c []string `json:"x5c,omitempty"`
|
||||||
|
RawJWK json.RawMessage `json:"jwk,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Token describes a JSON Web Token.
|
||||||
|
type Token struct {
|
||||||
|
Raw string
|
||||||
|
Header *Header
|
||||||
|
Claims *ClaimSet
|
||||||
|
Signature []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyOptions is used to specify
|
||||||
|
// options when verifying a JSON Web Token.
|
||||||
|
type VerifyOptions struct {
|
||||||
|
TrustedIssuers common.StringSet
|
||||||
|
AcceptedAudiences common.StringSet
|
||||||
|
Roots *x509.CertPool
|
||||||
|
TrustedKeys map[string]libtrust.PublicKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewToken parses the given raw token string
|
||||||
|
// and constructs an unverified JSON Web Token.
|
||||||
|
func NewToken(rawToken string) (*Token, error) {
|
||||||
|
parts := strings.Split(rawToken, TokenSeparator)
|
||||||
|
if len(parts) != 3 {
|
||||||
|
return nil, ErrMalformedToken
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
rawHeader, rawClaims = parts[0], parts[1]
|
||||||
|
headerJSON, claimsJSON []byte
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error while unmarshalling raw token: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil {
|
||||||
|
err = fmt.Errorf("unable to decode header: %s", err)
|
||||||
|
return nil, ErrMalformedToken
|
||||||
|
}
|
||||||
|
|
||||||
|
if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil {
|
||||||
|
err = fmt.Errorf("unable to decode claims: %s", err)
|
||||||
|
return nil, ErrMalformedToken
|
||||||
|
}
|
||||||
|
|
||||||
|
token := new(Token)
|
||||||
|
token.Header = new(Header)
|
||||||
|
token.Claims = new(ClaimSet)
|
||||||
|
|
||||||
|
token.Raw = strings.Join(parts[:2], TokenSeparator)
|
||||||
|
if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil {
|
||||||
|
err = fmt.Errorf("unable to decode signature: %s", err)
|
||||||
|
return nil, ErrMalformedToken
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = json.Unmarshal(headerJSON, token.Header); err != nil {
|
||||||
|
return nil, ErrMalformedToken
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = json.Unmarshal(claimsJSON, token.Claims); err != nil {
|
||||||
|
return nil, ErrMalformedToken
|
||||||
|
}
|
||||||
|
|
||||||
|
return token, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify attempts to verify this token using the given options.
|
||||||
|
// Returns a nil error if the token is valid.
|
||||||
|
func (t *Token) Verify(verifyOpts VerifyOptions) error {
|
||||||
|
// Verify that the Issuer claim is a trusted authority.
|
||||||
|
if !verifyOpts.TrustedIssuers.Contains(t.Claims.Issuer) {
|
||||||
|
log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer)
|
||||||
|
return ErrInvalidToken
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the Audience claim is allowed.
|
||||||
|
if !verifyOpts.AcceptedAudiences.Contains(t.Claims.Audience) {
|
||||||
|
log.Errorf("token intended for another audience: %q", t.Claims.Audience)
|
||||||
|
return ErrInvalidToken
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the token is currently usable and not expired.
|
||||||
|
currentUnixTime := time.Now().Unix()
|
||||||
|
if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) {
|
||||||
|
log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime)
|
||||||
|
return ErrInvalidToken
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the token signature.
|
||||||
|
if len(t.Signature) == 0 {
|
||||||
|
log.Error("token has no signature")
|
||||||
|
return ErrInvalidToken
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the signing key is trusted.
|
||||||
|
signingKey, err := t.VerifySigningKey(verifyOpts)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
return ErrInvalidToken
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, verify the signature of the token using the key which signed it.
|
||||||
|
if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil {
|
||||||
|
log.Errorf("unable to verify token signature: %s", err)
|
||||||
|
return ErrInvalidToken
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifySigningKey attempts to get the key which was used to sign this token.
|
||||||
|
// The token header should contain either of these 3 fields:
|
||||||
|
// `x5c` - The x509 certificate chain for the signing key. Needs to be
|
||||||
|
// verified.
|
||||||
|
// `jwk` - The JSON Web Key representation of the signing key.
|
||||||
|
// May contain its own `x5c` field which needs to be verified.
|
||||||
|
// `kid` - The unique identifier for the key. This library interprets it
|
||||||
|
// as a libtrust fingerprint. The key itself can be looked up in
|
||||||
|
// the trustedKeys field of the given verify options.
|
||||||
|
// Each of these methods are tried in that order of preference until the
|
||||||
|
// signing key is found or an error is returned.
|
||||||
|
func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) {
|
||||||
|
// First attempt to get an x509 certificate chain from the header.
|
||||||
|
var (
|
||||||
|
x5c = t.Header.X5c
|
||||||
|
rawJWK = t.Header.RawJWK
|
||||||
|
keyID = t.Header.KeyID
|
||||||
|
)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case len(x5c) > 0:
|
||||||
|
signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots)
|
||||||
|
case len(rawJWK) > 0:
|
||||||
|
signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts)
|
||||||
|
case len(keyID) > 0:
|
||||||
|
signingKey = verifyOpts.TrustedKeys[keyID]
|
||||||
|
if signingKey == nil {
|
||||||
|
err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
err = errors.New("unable to get token signing key")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) {
|
||||||
|
if len(x5c) == 0 {
|
||||||
|
return nil, errors.New("empty x509 certificate chain")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the first element is encoded correctly.
|
||||||
|
leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to decode leaf certificate: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// And that it is a valid x509 certificate.
|
||||||
|
leafCert, err := x509.ParseCertificate(leafCertDer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to parse leaf certificate: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The rest of the certificate chain are intermediate certificates.
|
||||||
|
intermediates := x509.NewCertPool()
|
||||||
|
for i := 1; i < len(x5c); i++ {
|
||||||
|
intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
intermediateCert, err := x509.ParseCertificate(intermediateCertDer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
intermediates.AddCert(intermediateCert)
|
||||||
|
}
|
||||||
|
|
||||||
|
verifyOpts := x509.VerifyOptions{
|
||||||
|
Intermediates: intermediates,
|
||||||
|
Roots: roots,
|
||||||
|
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: this call returns certificate chains which we ignore for now, but
|
||||||
|
// we should check them for revocations if we have the ability later.
|
||||||
|
if _, err = leafCert.Verify(verifyOpts); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to verify certificate chain: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the public key from the leaf certificate.
|
||||||
|
leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("unable to get leaf cert public key value")
|
||||||
|
}
|
||||||
|
|
||||||
|
leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseAndVerifyRawJWK(rawJWK json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) {
|
||||||
|
pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(rawJWK))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to decode raw JWK value: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check to see if the key includes a certificate chain.
|
||||||
|
x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{})
|
||||||
|
if !ok {
|
||||||
|
// The JWK should be one of the trusted root keys.
|
||||||
|
if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted {
|
||||||
|
return nil, errors.New("untrusted JWK with no certificate chain")
|
||||||
|
}
|
||||||
|
|
||||||
|
// The JWK is one of the trusted keys.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure each item in the chain is of the correct type.
|
||||||
|
x5c := make([]string, len(x5cVal))
|
||||||
|
for i, val := range x5cVal {
|
||||||
|
certString, ok := val.(string)
|
||||||
|
if !ok || len(certString) == 0 {
|
||||||
|
return nil, errors.New("malformed certificate chain")
|
||||||
|
}
|
||||||
|
x5c[i] = certString
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that the x509 certificate chain can
|
||||||
|
// be verified up to one of our trusted roots.
|
||||||
|
leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the public key in the leaf cert *is* the signing key.
|
||||||
|
if pubKey.KeyID() != leafKey.KeyID() {
|
||||||
|
return nil, errors.New("leaf certificate public key ID does not match JWK key ID")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// accessSet returns a set of actions available for the resource
|
||||||
|
// actions listed in the `access` section of this token.
|
||||||
|
func (t *Token) accessSet() accessSet {
|
||||||
|
if t.Claims == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
accessSet := make(accessSet, len(t.Claims.Access))
|
||||||
|
|
||||||
|
for _, resourceActions := range t.Claims.Access {
|
||||||
|
resource := auth.Resource{
|
||||||
|
Type: resourceActions.Type,
|
||||||
|
Name: resourceActions.Name,
|
||||||
|
}
|
||||||
|
|
||||||
|
set, exists := accessSet[resource]
|
||||||
|
if !exists {
|
||||||
|
set = newActionSet()
|
||||||
|
accessSet[resource] = set
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, action := range resourceActions.Actions {
|
||||||
|
set.Add(action)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return accessSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Token) compactRaw() string {
|
||||||
|
return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature))
|
||||||
|
}
|
364
auth/token/token_test.go
Normal file
364
auth/token/token_test.go
Normal file
|
@ -0,0 +1,364 @@
|
||||||
|
package token
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"encoding/pem"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/libtrust"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/auth"
|
||||||
|
"github.com/docker/docker-registry/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) {
|
||||||
|
keys := make([]libtrust.PrivateKey, 0, numKeys)
|
||||||
|
|
||||||
|
for i := 0; i < numKeys; i++ {
|
||||||
|
key, err := libtrust.GenerateECP256PrivateKey()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return keys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeSigningKeyWithChain(rootKey libtrust.PrivateKey, depth int) (libtrust.PrivateKey, error) {
|
||||||
|
if depth == 0 {
|
||||||
|
// Don't need to build a chain.
|
||||||
|
return rootKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
x5c = make([]string, depth)
|
||||||
|
parentKey = rootKey
|
||||||
|
key libtrust.PrivateKey
|
||||||
|
cert *x509.Certificate
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
for depth > 0 {
|
||||||
|
if key, err = libtrust.GenerateECP256PrivateKey(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if cert, err = libtrust.GenerateCACert(parentKey, key); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
depth--
|
||||||
|
x5c[depth] = base64.StdEncoding.EncodeToString(cert.Raw)
|
||||||
|
parentKey = key
|
||||||
|
}
|
||||||
|
|
||||||
|
key.AddExtendedField("x5c", x5c)
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeRootCerts(rootKeys []libtrust.PrivateKey) ([]*x509.Certificate, error) {
|
||||||
|
certs := make([]*x509.Certificate, 0, len(rootKeys))
|
||||||
|
|
||||||
|
for _, key := range rootKeys {
|
||||||
|
cert, err := libtrust.GenerateCACert(key, key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
certs = append(certs, cert)
|
||||||
|
}
|
||||||
|
|
||||||
|
return certs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeTrustedKeyMap(rootKeys []libtrust.PrivateKey) map[string]libtrust.PublicKey {
|
||||||
|
trustedKeys := make(map[string]libtrust.PublicKey, len(rootKeys))
|
||||||
|
|
||||||
|
for _, key := range rootKeys {
|
||||||
|
trustedKeys[key.KeyID()] = key.PublicKey()
|
||||||
|
}
|
||||||
|
|
||||||
|
return trustedKeys
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey libtrust.PrivateKey, depth int) (*Token, error) {
|
||||||
|
signingKey, err := makeSigningKeyWithChain(rootKey, depth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to amke signing key with chain: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawJWK, err := signingKey.PublicKey().MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to marshal signing key to JSON: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
joseHeader := &Header{
|
||||||
|
Type: "JWT",
|
||||||
|
SigningAlg: "ES256",
|
||||||
|
RawJWK: json.RawMessage(rawJWK),
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
randomBytes := make([]byte, 15)
|
||||||
|
if _, err = rand.Read(randomBytes); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to read random bytes for jwt id: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
claimSet := &ClaimSet{
|
||||||
|
Issuer: issuer,
|
||||||
|
Subject: "foo",
|
||||||
|
Audience: audience,
|
||||||
|
Expiration: now.Add(5 * time.Minute).Unix(),
|
||||||
|
NotBefore: now.Unix(),
|
||||||
|
IssuedAt: now.Unix(),
|
||||||
|
JWTID: base64.URLEncoding.EncodeToString(randomBytes),
|
||||||
|
Access: access,
|
||||||
|
}
|
||||||
|
|
||||||
|
var joseHeaderBytes, claimSetBytes []byte
|
||||||
|
|
||||||
|
if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to marshal jose header: %s", err)
|
||||||
|
}
|
||||||
|
if claimSetBytes, err = json.Marshal(claimSet); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to marshal claim set: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
encodedJoseHeader := joseBase64UrlEncode(joseHeaderBytes)
|
||||||
|
encodedClaimSet := joseBase64UrlEncode(claimSetBytes)
|
||||||
|
encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet)
|
||||||
|
|
||||||
|
var signatureBytes []byte
|
||||||
|
if signatureBytes, _, err = signingKey.Sign(strings.NewReader(encodingToSign), crypto.SHA256); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to sign jwt payload: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
signature := joseBase64UrlEncode(signatureBytes)
|
||||||
|
tokenString := fmt.Sprintf("%s.%s", encodingToSign, signature)
|
||||||
|
|
||||||
|
return NewToken(tokenString)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test makes 4 tokens with a varying number of intermediate
|
||||||
|
// certificates ranging from no intermediate chain to a length of 3
|
||||||
|
// intermediates.
|
||||||
|
func TestTokenVerify(t *testing.T) {
|
||||||
|
var (
|
||||||
|
numTokens = 4
|
||||||
|
issuer = "test-issuer"
|
||||||
|
audience = "test-audience"
|
||||||
|
access = []*ResourceActions{
|
||||||
|
{
|
||||||
|
Type: "repository",
|
||||||
|
Name: "foo/bar",
|
||||||
|
Actions: []string{"pull", "push"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
rootKeys, err := makeRootKeys(numTokens)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rootCerts, err := makeRootCerts(rootKeys)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rootPool := x509.NewCertPool()
|
||||||
|
for _, rootCert := range rootCerts {
|
||||||
|
rootPool.AddCert(rootCert)
|
||||||
|
}
|
||||||
|
|
||||||
|
trustedKeys := makeTrustedKeyMap(rootKeys)
|
||||||
|
|
||||||
|
tokens := make([]*Token, 0, numTokens)
|
||||||
|
|
||||||
|
for i := 0; i < numTokens; i++ {
|
||||||
|
token, err := makeTestToken(issuer, audience, access, rootKeys[i], i)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
tokens = append(tokens, token)
|
||||||
|
}
|
||||||
|
|
||||||
|
verifyOps := VerifyOptions{
|
||||||
|
TrustedIssuers: common.NewStringSet(issuer),
|
||||||
|
AcceptedAudiences: common.NewStringSet(audience),
|
||||||
|
Roots: rootPool,
|
||||||
|
TrustedKeys: trustedKeys,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, token := range tokens {
|
||||||
|
if err := token.Verify(verifyOps); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err error) {
|
||||||
|
rootCerts, err := makeRootCerts(rootKeys)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
tempFile, err := ioutil.TempFile("", "rootCertBundle")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer tempFile.Close()
|
||||||
|
|
||||||
|
for _, cert := range rootCerts {
|
||||||
|
if err = pem.Encode(tempFile, &pem.Block{
|
||||||
|
Type: "CERTIFICATE",
|
||||||
|
Bytes: cert.Raw,
|
||||||
|
}); err != nil {
|
||||||
|
os.Remove(tempFile.Name())
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tempFile.Name(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestAccessController tests complete integration of the token auth package.
|
||||||
|
// It starts by mocking the options for a token auth accessController which
|
||||||
|
// it creates. It then tries a few mock requests:
|
||||||
|
// - don't supply a token; should error with challenge
|
||||||
|
// - supply an invalid token; should error with challenge
|
||||||
|
// - supply a token with insufficient access; should error with challenge
|
||||||
|
// - supply a valid token; should not error
|
||||||
|
func TestAccessController(t *testing.T) {
|
||||||
|
// Make 2 keys; only the first is to be a trusted root key.
|
||||||
|
rootKeys, err := makeRootKeys(2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rootCertBundleFilename, err := writeTempRootCerts(rootKeys[:1])
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer os.Remove(rootCertBundleFilename)
|
||||||
|
|
||||||
|
realm := "https://auth.example.com/token/"
|
||||||
|
issuer := "test-issuer.example.com"
|
||||||
|
service := "test-service.example.com"
|
||||||
|
|
||||||
|
options := map[string]interface{}{
|
||||||
|
"realm": realm,
|
||||||
|
"issuer": issuer,
|
||||||
|
"service": service,
|
||||||
|
"rootCertBundle": rootCertBundleFilename,
|
||||||
|
}
|
||||||
|
|
||||||
|
accessController, err := newAccessController(options)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. Make a mock http.Request with no token.
|
||||||
|
req, err := http.NewRequest("GET", "http://example.com/foo", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
testAccess := auth.Access{
|
||||||
|
Resource: auth.Resource{
|
||||||
|
Type: "foo",
|
||||||
|
Name: "bar",
|
||||||
|
},
|
||||||
|
Action: "baz",
|
||||||
|
}
|
||||||
|
|
||||||
|
err = accessController.Authorized(req, testAccess)
|
||||||
|
challenge, ok := err.(auth.Challenge)
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("accessController did not return a challenge")
|
||||||
|
}
|
||||||
|
|
||||||
|
if challenge.Error() != ErrTokenRequired.Error() {
|
||||||
|
t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Supply an invalid token.
|
||||||
|
token, err := makeTestToken(
|
||||||
|
issuer, service,
|
||||||
|
[]*ResourceActions{{
|
||||||
|
Type: testAccess.Type,
|
||||||
|
Name: testAccess.Name,
|
||||||
|
Actions: []string{testAccess.Action},
|
||||||
|
}},
|
||||||
|
rootKeys[1], 1, // Everything is valid except the key which signed it.
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw()))
|
||||||
|
|
||||||
|
err = accessController.Authorized(req, testAccess)
|
||||||
|
challenge, ok = err.(auth.Challenge)
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("accessController did not return a challenge")
|
||||||
|
}
|
||||||
|
|
||||||
|
if challenge.Error() != ErrInvalidToken.Error() {
|
||||||
|
t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Supply a token with insufficient access.
|
||||||
|
token, err = makeTestToken(
|
||||||
|
issuer, service,
|
||||||
|
[]*ResourceActions{}, // No access specified.
|
||||||
|
rootKeys[0], 1,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw()))
|
||||||
|
|
||||||
|
err = accessController.Authorized(req, testAccess)
|
||||||
|
challenge, ok = err.(auth.Challenge)
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("accessController did not return a challenge")
|
||||||
|
}
|
||||||
|
|
||||||
|
if challenge.Error() != ErrInsufficientScope.Error() {
|
||||||
|
t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrInsufficientScope)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Supply the token we need, or deserve, or whatever.
|
||||||
|
token, err = makeTestToken(
|
||||||
|
issuer, service,
|
||||||
|
[]*ResourceActions{{
|
||||||
|
Type: testAccess.Type,
|
||||||
|
Name: testAccess.Name,
|
||||||
|
Actions: []string{testAccess.Action},
|
||||||
|
}},
|
||||||
|
rootKeys[0], 1, // Everything is valid except the key which signed it.
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw()))
|
||||||
|
|
||||||
|
if err = accessController.Authorized(req, testAccess); err != nil {
|
||||||
|
t.Fatalf("accessController returned unexpected error: %s", err)
|
||||||
|
}
|
||||||
|
}
|
49
auth/token/util.go
Normal file
49
auth/token/util.go
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
package token
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
// joseBase64UrlEncode encodes the given data using the standard base64 url
|
||||||
|
// encoding format but with all trailing '=' characters ommitted in accordance
|
||||||
|
// with the jose specification.
|
||||||
|
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
|
||||||
|
func joseBase64UrlEncode(b []byte) string {
|
||||||
|
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
|
||||||
|
}
|
||||||
|
|
||||||
|
// joseBase64UrlDecode decodes the given string using the standard base64 url
|
||||||
|
// decoder but first adds the appropriate number of trailing '=' characters in
|
||||||
|
// accordance with the jose specification.
|
||||||
|
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
|
||||||
|
func joseBase64UrlDecode(s string) ([]byte, error) {
|
||||||
|
switch len(s) % 4 {
|
||||||
|
case 0:
|
||||||
|
case 2:
|
||||||
|
s += "=="
|
||||||
|
case 3:
|
||||||
|
s += "="
|
||||||
|
default:
|
||||||
|
return nil, errors.New("illegal base64url string")
|
||||||
|
}
|
||||||
|
return base64.URLEncoding.DecodeString(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// actionSet is a special type of stringSet.
|
||||||
|
type actionSet struct {
|
||||||
|
common.StringSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func newActionSet(actions ...string) actionSet {
|
||||||
|
return actionSet{common.NewStringSet(actions...)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains calls StringSet.Contains() for
|
||||||
|
// either "*" or the given action string.
|
||||||
|
func (s actionSet) Contains(action string) bool {
|
||||||
|
return s.StringSet.Contains("*") || s.StringSet.Contains(action)
|
||||||
|
}
|
123
circle.yml
Normal file
123
circle.yml
Normal file
|
@ -0,0 +1,123 @@
|
||||||
|
# Pony-up!
|
||||||
|
machine:
|
||||||
|
pre:
|
||||||
|
# Install gvm
|
||||||
|
- bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer)
|
||||||
|
|
||||||
|
post:
|
||||||
|
# Install many go versions
|
||||||
|
- gvm install go1.3.3 -B --name=old
|
||||||
|
- gvm install go1.4 -B --name=stable
|
||||||
|
# - gvm install tip --name=bleed
|
||||||
|
|
||||||
|
environment:
|
||||||
|
# Convenient shortcuts to "common" locations
|
||||||
|
CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME
|
||||||
|
BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
|
||||||
|
# Trick circle brainflat "no absolute path" behavior
|
||||||
|
BASE_OLD: ../../../$HOME/.gvm/pkgsets/old/global/$BASE_DIR
|
||||||
|
BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR
|
||||||
|
# BASE_BLEED: ../../../$HOME/.gvm/pkgsets/bleed/global/$BASE_DIR
|
||||||
|
# Workaround Circle parsing dumb bugs and/or YAML wonkyness
|
||||||
|
CIRCLE_PAIN: "mode: set"
|
||||||
|
|
||||||
|
hosts:
|
||||||
|
# Not used yet
|
||||||
|
fancy: 127.0.0.1
|
||||||
|
|
||||||
|
dependencies:
|
||||||
|
pre:
|
||||||
|
# Copy the code to the gopath of all go versions
|
||||||
|
- >
|
||||||
|
gvm use old &&
|
||||||
|
mkdir -p "$(dirname $BASE_OLD)" &&
|
||||||
|
cp -R "$CHECKOUT" "$BASE_OLD"
|
||||||
|
|
||||||
|
- >
|
||||||
|
gvm use stable &&
|
||||||
|
mkdir -p "$(dirname $BASE_STABLE)" &&
|
||||||
|
cp -R "$CHECKOUT" "$BASE_STABLE"
|
||||||
|
|
||||||
|
# - >
|
||||||
|
# gvm use bleed &&
|
||||||
|
# mkdir -p "$(dirname $BASE_BLEED)" &&
|
||||||
|
# cp -R "$CHECKOUT" "$BASE_BLEED"
|
||||||
|
|
||||||
|
override:
|
||||||
|
# Install dependencies for every copied clone/go version
|
||||||
|
- gvm use old && go get -t -d -v ./...:
|
||||||
|
pwd: $BASE_OLD
|
||||||
|
|
||||||
|
- gvm use stable && go get -t -d -v ./...:
|
||||||
|
pwd: $BASE_STABLE
|
||||||
|
|
||||||
|
# - gvm use bleed && go get -t -d -v ./...:
|
||||||
|
# pwd: $BASE_BLEED
|
||||||
|
|
||||||
|
post:
|
||||||
|
# For the stable go version, additionally install linting tools
|
||||||
|
- >
|
||||||
|
gvm use stable &&
|
||||||
|
go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint
|
||||||
|
|
||||||
|
test:
|
||||||
|
pre:
|
||||||
|
# Output the go versions we are going to test
|
||||||
|
- gvm use old && go version
|
||||||
|
- gvm use stable && go version
|
||||||
|
# - gvm use bleed && go version
|
||||||
|
|
||||||
|
# FMT
|
||||||
|
- gvm use stable && test -z "$(gofmt -s -l . | tee /dev/stderr)":
|
||||||
|
pwd: $BASE_STABLE
|
||||||
|
|
||||||
|
# VET
|
||||||
|
- gvm use stable && go vet ./...:
|
||||||
|
pwd: $BASE_STABLE
|
||||||
|
|
||||||
|
# LINT
|
||||||
|
- gvm use stable && test -z "$(golint ./... | tee /dev/stderr)":
|
||||||
|
pwd: $BASE_STABLE
|
||||||
|
|
||||||
|
override:
|
||||||
|
# Test every version we have (but stable)
|
||||||
|
- gvm use old; go test -test.v -test.short ./...:
|
||||||
|
timeout: 600
|
||||||
|
pwd: $BASE_OLD
|
||||||
|
|
||||||
|
# - gvm use bleed; go test -test.v -test.short ./...:
|
||||||
|
# timeout: 600
|
||||||
|
# pwd: $BASE_BLEED
|
||||||
|
|
||||||
|
# Test stable, and report
|
||||||
|
# Preset the goverall report file
|
||||||
|
- echo "$CIRCLE_PAIN" > ~/goverage.report
|
||||||
|
- gvm use stable; go list ./... | xargs -L 1 -I{} rm -f $GOPATH/src/{}/coverage.out:
|
||||||
|
pwd: $BASE_STABLE
|
||||||
|
|
||||||
|
- gvm use stable; go list ./... | xargs -L 1 -I{} go test -test.short -coverprofile=$GOPATH/src/{}/coverage.out {}:
|
||||||
|
timeout: 600
|
||||||
|
pwd: $BASE_STABLE
|
||||||
|
|
||||||
|
post:
|
||||||
|
# Aggregate and report to coveralls
|
||||||
|
- gvm use stable; go list ./... | xargs -L 1 -I{} cat "$GOPATH/src/{}/coverage.out" | grep -v "$CIRCLE_PAIN" >> ~/goverage.report:
|
||||||
|
pwd: $BASE_STABLE
|
||||||
|
- gvm use stable; goveralls -service circleci -coverprofile=/home/ubuntu/goverage.report -repotoken $COVERALLS_TOKEN:
|
||||||
|
pwd: $BASE_STABLE
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
# Disabled the -race detector due to massive memory usage.
|
||||||
|
# Do we want these as well?
|
||||||
|
# - go get code.google.com/p/go.tools/cmd/goimports
|
||||||
|
# - test -z "$(goimports -l -w ./... | tee /dev/stderr)"
|
||||||
|
# http://labix.org/gocheck
|
||||||
|
|
||||||
|
general:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- master
|
||||||
|
- 0.7
|
||||||
|
- 0.8
|
||||||
|
- 0.9
|
||||||
|
- 1.0
|
568
client/client.go
Normal file
568
client/client.go
Normal file
|
@ -0,0 +1,568 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/api/v2"
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
"github.com/docker/docker-registry/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client implements the client interface to the registry http api
|
||||||
|
type Client interface {
|
||||||
|
// GetImageManifest returns an image manifest for the image at the given
|
||||||
|
// name, tag pair.
|
||||||
|
GetImageManifest(name, tag string) (*storage.SignedManifest, error)
|
||||||
|
|
||||||
|
// PutImageManifest uploads an image manifest for the image at the given
|
||||||
|
// name, tag pair.
|
||||||
|
PutImageManifest(name, tag string, imageManifest *storage.SignedManifest) error
|
||||||
|
|
||||||
|
// DeleteImage removes the image at the given name, tag pair.
|
||||||
|
DeleteImage(name, tag string) error
|
||||||
|
|
||||||
|
// ListImageTags returns a list of all image tags with the given repository
|
||||||
|
// name.
|
||||||
|
ListImageTags(name string) ([]string, error)
|
||||||
|
|
||||||
|
// BlobLength returns the length of the blob stored at the given name,
|
||||||
|
// digest pair.
|
||||||
|
// Returns a length value of -1 on error or if the blob does not exist.
|
||||||
|
BlobLength(name string, dgst digest.Digest) (int, error)
|
||||||
|
|
||||||
|
// GetBlob returns the blob stored at the given name, digest pair in the
|
||||||
|
// form of an io.ReadCloser with the length of this blob.
|
||||||
|
// A nonzero byteOffset can be provided to receive a partial blob beginning
|
||||||
|
// at the given offset.
|
||||||
|
GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error)
|
||||||
|
|
||||||
|
// InitiateBlobUpload starts a blob upload in the given repository namespace
|
||||||
|
// and returns a unique location url to use for other blob upload methods.
|
||||||
|
InitiateBlobUpload(name string) (string, error)
|
||||||
|
|
||||||
|
// GetBlobUploadStatus returns the byte offset and length of the blob at the
|
||||||
|
// given upload location.
|
||||||
|
GetBlobUploadStatus(location string) (int, int, error)
|
||||||
|
|
||||||
|
// UploadBlob uploads a full blob to the registry.
|
||||||
|
UploadBlob(location string, blob io.ReadCloser, length int, dgst digest.Digest) error
|
||||||
|
|
||||||
|
// UploadBlobChunk uploads a blob chunk with a given length and startByte to
|
||||||
|
// the registry.
|
||||||
|
// FinishChunkedBlobUpload must be called to finalize this upload.
|
||||||
|
UploadBlobChunk(location string, blobChunk io.ReadCloser, length, startByte int) error
|
||||||
|
|
||||||
|
// FinishChunkedBlobUpload completes a chunked blob upload at a given
|
||||||
|
// location.
|
||||||
|
FinishChunkedBlobUpload(location string, length int, dgst digest.Digest) error
|
||||||
|
|
||||||
|
// CancelBlobUpload deletes all content at the unfinished blob upload
|
||||||
|
// location and invalidates any future calls to this blob upload.
|
||||||
|
CancelBlobUpload(location string) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new Client which operates against a registry with the
|
||||||
|
// given base endpoint
|
||||||
|
// This endpoint should not include /v2/ or any part of the url after this.
|
||||||
|
func New(endpoint string) (Client, error) {
|
||||||
|
ub, err := v2.NewURLBuilderFromString(endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &clientImpl{
|
||||||
|
endpoint: endpoint,
|
||||||
|
ub: ub,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// clientImpl is the default implementation of the Client interface
|
||||||
|
type clientImpl struct {
|
||||||
|
endpoint string
|
||||||
|
ub *v2.URLBuilder
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(bbland): use consistent route generation between server and client
|
||||||
|
|
||||||
|
func (r *clientImpl) GetImageManifest(name, tag string) (*storage.SignedManifest, error) {
|
||||||
|
manifestURL, err := r.ub.BuildManifestURL(name, tag)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := http.Get(manifestURL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
// TODO(bbland): handle other status codes, like 5xx errors
|
||||||
|
switch {
|
||||||
|
case response.StatusCode == http.StatusOK:
|
||||||
|
break
|
||||||
|
case response.StatusCode == http.StatusNotFound:
|
||||||
|
return nil, &ImageManifestNotFoundError{Name: name, Tag: tag}
|
||||||
|
case response.StatusCode >= 400 && response.StatusCode < 500:
|
||||||
|
var errs v2.Errors
|
||||||
|
|
||||||
|
decoder := json.NewDecoder(response.Body)
|
||||||
|
err = decoder.Decode(&errs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, &errs
|
||||||
|
default:
|
||||||
|
return nil, &UnexpectedHTTPStatusError{Status: response.Status}
|
||||||
|
}
|
||||||
|
|
||||||
|
decoder := json.NewDecoder(response.Body)
|
||||||
|
|
||||||
|
manifest := new(storage.SignedManifest)
|
||||||
|
err = decoder.Decode(manifest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return manifest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *clientImpl) PutImageManifest(name, tag string, manifest *storage.SignedManifest) error {
|
||||||
|
manifestURL, err := r.ub.BuildManifestURL(name, tag)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(manifest.Raw))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := http.DefaultClient.Do(putRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
// TODO(bbland): handle other status codes, like 5xx errors
|
||||||
|
switch {
|
||||||
|
case response.StatusCode == http.StatusOK:
|
||||||
|
return nil
|
||||||
|
case response.StatusCode >= 400 && response.StatusCode < 500:
|
||||||
|
var errors v2.Errors
|
||||||
|
decoder := json.NewDecoder(response.Body)
|
||||||
|
err = decoder.Decode(&errors)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &errors
|
||||||
|
default:
|
||||||
|
return &UnexpectedHTTPStatusError{Status: response.Status}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *clientImpl) DeleteImage(name, tag string) error {
|
||||||
|
manifestURL, err := r.ub.BuildManifestURL(name, tag)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
deleteRequest, err := http.NewRequest("DELETE", manifestURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := http.DefaultClient.Do(deleteRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
// TODO(bbland): handle other status codes, like 5xx errors
|
||||||
|
switch {
|
||||||
|
case response.StatusCode == http.StatusNoContent:
|
||||||
|
break
|
||||||
|
case response.StatusCode == http.StatusNotFound:
|
||||||
|
return &ImageManifestNotFoundError{Name: name, Tag: tag}
|
||||||
|
case response.StatusCode >= 400 && response.StatusCode < 500:
|
||||||
|
var errs v2.Errors
|
||||||
|
decoder := json.NewDecoder(response.Body)
|
||||||
|
err = decoder.Decode(&errs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return &errs
|
||||||
|
default:
|
||||||
|
return &UnexpectedHTTPStatusError{Status: response.Status}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *clientImpl) ListImageTags(name string) ([]string, error) {
|
||||||
|
tagsURL, err := r.ub.BuildTagsURL(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := http.Get(tagsURL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
// TODO(bbland): handle other status codes, like 5xx errors
|
||||||
|
switch {
|
||||||
|
case response.StatusCode == http.StatusOK:
|
||||||
|
break
|
||||||
|
case response.StatusCode == http.StatusNotFound:
|
||||||
|
return nil, &RepositoryNotFoundError{Name: name}
|
||||||
|
case response.StatusCode >= 400 && response.StatusCode < 500:
|
||||||
|
var errs v2.Errors
|
||||||
|
decoder := json.NewDecoder(response.Body)
|
||||||
|
err = decoder.Decode(&errs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, &errs
|
||||||
|
default:
|
||||||
|
return nil, &UnexpectedHTTPStatusError{Status: response.Status}
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := struct {
|
||||||
|
Tags []string `json:"tags"`
|
||||||
|
}{}
|
||||||
|
|
||||||
|
decoder := json.NewDecoder(response.Body)
|
||||||
|
err = decoder.Decode(&tags)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tags.Tags, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *clientImpl) BlobLength(name string, dgst digest.Digest) (int, error) {
|
||||||
|
blobURL, err := r.ub.BuildBlobURL(name, dgst)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := http.Head(blobURL)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
// TODO(bbland): handle other status codes, like 5xx errors
|
||||||
|
switch {
|
||||||
|
case response.StatusCode == http.StatusOK:
|
||||||
|
lengthHeader := response.Header.Get("Content-Length")
|
||||||
|
length, err := strconv.ParseInt(lengthHeader, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
return int(length), nil
|
||||||
|
case response.StatusCode == http.StatusNotFound:
|
||||||
|
return -1, nil
|
||||||
|
case response.StatusCode >= 400 && response.StatusCode < 500:
|
||||||
|
var errs v2.Errors
|
||||||
|
decoder := json.NewDecoder(response.Body)
|
||||||
|
err = decoder.Decode(&errs)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
return -1, &errs
|
||||||
|
default:
|
||||||
|
response.Body.Close()
|
||||||
|
return -1, &UnexpectedHTTPStatusError{Status: response.Status}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *clientImpl) GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error) {
|
||||||
|
blobURL, err := r.ub.BuildBlobURL(name, dgst)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
getRequest, err := http.NewRequest("GET", blobURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
getRequest.Header.Add("Range", fmt.Sprintf("%d-", byteOffset))
|
||||||
|
response, err := http.DefaultClient.Do(getRequest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(bbland): handle other status codes, like 5xx errors
|
||||||
|
switch {
|
||||||
|
case response.StatusCode == http.StatusOK:
|
||||||
|
lengthHeader := response.Header.Get("Content-Length")
|
||||||
|
length, err := strconv.ParseInt(lengthHeader, 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
return response.Body, int(length), nil
|
||||||
|
case response.StatusCode == http.StatusNotFound:
|
||||||
|
response.Body.Close()
|
||||||
|
return nil, 0, &BlobNotFoundError{Name: name, Digest: dgst}
|
||||||
|
case response.StatusCode >= 400 && response.StatusCode < 500:
|
||||||
|
var errs v2.Errors
|
||||||
|
decoder := json.NewDecoder(response.Body)
|
||||||
|
err = decoder.Decode(&errs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
return nil, 0, &errs
|
||||||
|
default:
|
||||||
|
response.Body.Close()
|
||||||
|
return nil, 0, &UnexpectedHTTPStatusError{Status: response.Status}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *clientImpl) InitiateBlobUpload(name string) (string, error) {
|
||||||
|
uploadURL, err := r.ub.BuildBlobUploadURL(name)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
postRequest, err := http.NewRequest("POST", uploadURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := http.DefaultClient.Do(postRequest)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
// TODO(bbland): handle other status codes, like 5xx errors
|
||||||
|
switch {
|
||||||
|
case response.StatusCode == http.StatusAccepted:
|
||||||
|
return response.Header.Get("Location"), nil
|
||||||
|
// case response.StatusCode == http.StatusNotFound:
|
||||||
|
// return
|
||||||
|
case response.StatusCode >= 400 && response.StatusCode < 500:
|
||||||
|
var errs v2.Errors
|
||||||
|
decoder := json.NewDecoder(response.Body)
|
||||||
|
err = decoder.Decode(&errs)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return "", &errs
|
||||||
|
default:
|
||||||
|
return "", &UnexpectedHTTPStatusError{Status: response.Status}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *clientImpl) GetBlobUploadStatus(location string) (int, int, error) {
|
||||||
|
response, err := http.Get(location)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
// TODO(bbland): handle other status codes, like 5xx errors
|
||||||
|
switch {
|
||||||
|
case response.StatusCode == http.StatusNoContent:
|
||||||
|
return parseRangeHeader(response.Header.Get("Range"))
|
||||||
|
case response.StatusCode == http.StatusNotFound:
|
||||||
|
return 0, 0, &BlobUploadNotFoundError{Location: location}
|
||||||
|
case response.StatusCode >= 400 && response.StatusCode < 500:
|
||||||
|
var errs v2.Errors
|
||||||
|
decoder := json.NewDecoder(response.Body)
|
||||||
|
err = decoder.Decode(&errs)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
return 0, 0, &errs
|
||||||
|
default:
|
||||||
|
return 0, 0, &UnexpectedHTTPStatusError{Status: response.Status}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, dgst digest.Digest) error {
|
||||||
|
defer blob.Close()
|
||||||
|
|
||||||
|
putRequest, err := http.NewRequest("PUT", location, blob)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
queryValues := url.Values{}
|
||||||
|
queryValues.Set("digest", dgst.String())
|
||||||
|
putRequest.URL.RawQuery = queryValues.Encode()
|
||||||
|
|
||||||
|
putRequest.Header.Set("Content-Type", "application/octet-stream")
|
||||||
|
putRequest.Header.Set("Content-Length", fmt.Sprint(length))
|
||||||
|
|
||||||
|
response, err := http.DefaultClient.Do(putRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
// TODO(bbland): handle other status codes, like 5xx errors
|
||||||
|
switch {
|
||||||
|
case response.StatusCode == http.StatusCreated:
|
||||||
|
return nil
|
||||||
|
case response.StatusCode == http.StatusNotFound:
|
||||||
|
return &BlobUploadNotFoundError{Location: location}
|
||||||
|
case response.StatusCode >= 400 && response.StatusCode < 500:
|
||||||
|
var errs v2.Errors
|
||||||
|
decoder := json.NewDecoder(response.Body)
|
||||||
|
err = decoder.Decode(&errs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return &errs
|
||||||
|
default:
|
||||||
|
return &UnexpectedHTTPStatusError{Status: response.Status}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *clientImpl) UploadBlobChunk(location string, blobChunk io.ReadCloser, length, startByte int) error {
|
||||||
|
defer blobChunk.Close()
|
||||||
|
|
||||||
|
putRequest, err := http.NewRequest("PUT", location, blobChunk)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
endByte := startByte + length
|
||||||
|
|
||||||
|
putRequest.Header.Set("Content-Type", "application/octet-stream")
|
||||||
|
putRequest.Header.Set("Content-Length", fmt.Sprint(length))
|
||||||
|
putRequest.Header.Set("Content-Range",
|
||||||
|
fmt.Sprintf("%d-%d/%d", startByte, endByte, endByte))
|
||||||
|
|
||||||
|
response, err := http.DefaultClient.Do(putRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
// TODO(bbland): handle other status codes, like 5xx errors
|
||||||
|
switch {
|
||||||
|
case response.StatusCode == http.StatusAccepted:
|
||||||
|
return nil
|
||||||
|
case response.StatusCode == http.StatusRequestedRangeNotSatisfiable:
|
||||||
|
lastValidRange, blobSize, err := parseRangeHeader(response.Header.Get("Range"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return &BlobUploadInvalidRangeError{
|
||||||
|
Location: location,
|
||||||
|
LastValidRange: lastValidRange,
|
||||||
|
BlobSize: blobSize,
|
||||||
|
}
|
||||||
|
case response.StatusCode == http.StatusNotFound:
|
||||||
|
return &BlobUploadNotFoundError{Location: location}
|
||||||
|
case response.StatusCode >= 400 && response.StatusCode < 500:
|
||||||
|
var errs v2.Errors
|
||||||
|
decoder := json.NewDecoder(response.Body)
|
||||||
|
err = decoder.Decode(&errs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return &errs
|
||||||
|
default:
|
||||||
|
return &UnexpectedHTTPStatusError{Status: response.Status}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, dgst digest.Digest) error {
|
||||||
|
putRequest, err := http.NewRequest("PUT", location, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
queryValues := new(url.Values)
|
||||||
|
queryValues.Set("digest", dgst.String())
|
||||||
|
putRequest.URL.RawQuery = queryValues.Encode()
|
||||||
|
|
||||||
|
putRequest.Header.Set("Content-Type", "application/octet-stream")
|
||||||
|
putRequest.Header.Set("Content-Length", "0")
|
||||||
|
putRequest.Header.Set("Content-Range",
|
||||||
|
fmt.Sprintf("%d-%d/%d", length, length, length))
|
||||||
|
|
||||||
|
response, err := http.DefaultClient.Do(putRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
// TODO(bbland): handle other status codes, like 5xx errors
|
||||||
|
switch {
|
||||||
|
case response.StatusCode == http.StatusCreated:
|
||||||
|
return nil
|
||||||
|
case response.StatusCode == http.StatusNotFound:
|
||||||
|
return &BlobUploadNotFoundError{Location: location}
|
||||||
|
case response.StatusCode >= 400 && response.StatusCode < 500:
|
||||||
|
var errs v2.Errors
|
||||||
|
decoder := json.NewDecoder(response.Body)
|
||||||
|
err = decoder.Decode(&errs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return &errs
|
||||||
|
default:
|
||||||
|
return &UnexpectedHTTPStatusError{Status: response.Status}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *clientImpl) CancelBlobUpload(location string) error {
|
||||||
|
deleteRequest, err := http.NewRequest("DELETE", location, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := http.DefaultClient.Do(deleteRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
// TODO(bbland): handle other status codes, like 5xx errors
|
||||||
|
switch {
|
||||||
|
case response.StatusCode == http.StatusNoContent:
|
||||||
|
return nil
|
||||||
|
case response.StatusCode == http.StatusNotFound:
|
||||||
|
return &BlobUploadNotFoundError{Location: location}
|
||||||
|
case response.StatusCode >= 400 && response.StatusCode < 500:
|
||||||
|
var errs v2.Errors
|
||||||
|
decoder := json.NewDecoder(response.Body)
|
||||||
|
err = decoder.Decode(&errs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return &errs
|
||||||
|
default:
|
||||||
|
return &UnexpectedHTTPStatusError{Status: response.Status}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseRangeHeader parses out the offset and length from a returned Range
|
||||||
|
// header
|
||||||
|
func parseRangeHeader(byteRangeHeader string) (int, int, error) {
|
||||||
|
r := regexp.MustCompile("bytes=0-(\\d+)/(\\d+)")
|
||||||
|
submatches := r.FindStringSubmatch(byteRangeHeader)
|
||||||
|
offset, err := strconv.ParseInt(submatches[1], 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
length, err := strconv.ParseInt(submatches[2], 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
return int(offset), int(length), nil
|
||||||
|
}
|
409
client/client_test.go
Normal file
409
client/client_test.go
Normal file
|
@ -0,0 +1,409 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/common/testutil"
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
"github.com/docker/docker-registry/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testBlob struct {
|
||||||
|
digest digest.Digest
|
||||||
|
contents []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPush(t *testing.T) {
|
||||||
|
name := "hello/world"
|
||||||
|
tag := "sometag"
|
||||||
|
testBlobs := []testBlob{
|
||||||
|
{
|
||||||
|
digest: "tarsum.v2+sha256:12345",
|
||||||
|
contents: []byte("some contents"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
digest: "tarsum.v2+sha256:98765",
|
||||||
|
contents: []byte("some other contents"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
uploadLocations := make([]string, len(testBlobs))
|
||||||
|
blobs := make([]storage.FSLayer, len(testBlobs))
|
||||||
|
history := make([]storage.ManifestHistory, len(testBlobs))
|
||||||
|
|
||||||
|
for i, blob := range testBlobs {
|
||||||
|
// TODO(bbland): this is returning the same location for all uploads,
|
||||||
|
// because we can't know which blob will get which location.
|
||||||
|
// It's sort of okay because we're using unique digests, but this needs
|
||||||
|
// to change at some point.
|
||||||
|
uploadLocations[i] = fmt.Sprintf("/v2/%s/blobs/test-uuid", name)
|
||||||
|
blobs[i] = storage.FSLayer{BlobSum: blob.digest}
|
||||||
|
history[i] = storage.ManifestHistory{V1Compatibility: blob.digest.String()}
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest := &storage.SignedManifest{
|
||||||
|
Manifest: storage.Manifest{
|
||||||
|
Name: name,
|
||||||
|
Tag: tag,
|
||||||
|
Architecture: "x86",
|
||||||
|
FSLayers: blobs,
|
||||||
|
History: history,
|
||||||
|
Versioned: storage.Versioned{
|
||||||
|
SchemaVersion: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
manifest.Raw, err = json.Marshal(manifest)
|
||||||
|
|
||||||
|
blobRequestResponseMappings := make([]testutil.RequestResponseMapping, 2*len(testBlobs))
|
||||||
|
for i, blob := range testBlobs {
|
||||||
|
blobRequestResponseMappings[2*i] = testutil.RequestResponseMapping{
|
||||||
|
Request: testutil.Request{
|
||||||
|
Method: "POST",
|
||||||
|
Route: "/v2/" + name + "/blobs/uploads/",
|
||||||
|
},
|
||||||
|
Response: testutil.Response{
|
||||||
|
StatusCode: http.StatusAccepted,
|
||||||
|
Headers: http.Header(map[string][]string{
|
||||||
|
"Location": {uploadLocations[i]},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
blobRequestResponseMappings[2*i+1] = testutil.RequestResponseMapping{
|
||||||
|
Request: testutil.Request{
|
||||||
|
Method: "PUT",
|
||||||
|
Route: uploadLocations[i],
|
||||||
|
QueryParams: map[string][]string{
|
||||||
|
"digest": {blob.digest.String()},
|
||||||
|
},
|
||||||
|
Body: blob.contents,
|
||||||
|
},
|
||||||
|
Response: testutil.Response{
|
||||||
|
StatusCode: http.StatusCreated,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMapping{
|
||||||
|
Request: testutil.Request{
|
||||||
|
Method: "PUT",
|
||||||
|
Route: "/v2/" + name + "/manifests/" + tag,
|
||||||
|
Body: manifest.Raw,
|
||||||
|
},
|
||||||
|
Response: testutil.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
},
|
||||||
|
}))
|
||||||
|
var server *httptest.Server
|
||||||
|
|
||||||
|
// HACK(stevvooe): Super hack to follow: the request response map approach
|
||||||
|
// above does not let us correctly format the location header to the
|
||||||
|
// server url. This handler intercepts and re-writes the location header
|
||||||
|
// to the server url.
|
||||||
|
|
||||||
|
hack := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w = &headerInterceptingResponseWriter{ResponseWriter: w, serverURL: server.URL}
|
||||||
|
handler.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
|
||||||
|
server = httptest.NewServer(hack)
|
||||||
|
client, err := New(server.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating client: %v", err)
|
||||||
|
}
|
||||||
|
objectStore := &memoryObjectStore{
|
||||||
|
mutex: new(sync.Mutex),
|
||||||
|
manifestStorage: make(map[string]*storage.SignedManifest),
|
||||||
|
layerStorage: make(map[digest.Digest]Layer),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, blob := range testBlobs {
|
||||||
|
l, err := objectStore.Layer(blob.digest)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
writer, err := l.Writer()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
writer.SetSize(len(blob.contents))
|
||||||
|
writer.Write(blob.contents)
|
||||||
|
writer.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
objectStore.WriteManifest(name, tag, manifest)
|
||||||
|
|
||||||
|
err = Push(client, objectStore, name, tag)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPull(t *testing.T) {
|
||||||
|
name := "hello/world"
|
||||||
|
tag := "sometag"
|
||||||
|
testBlobs := []testBlob{
|
||||||
|
{
|
||||||
|
digest: "tarsum.v2+sha256:12345",
|
||||||
|
contents: []byte("some contents"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
digest: "tarsum.v2+sha256:98765",
|
||||||
|
contents: []byte("some other contents"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
blobs := make([]storage.FSLayer, len(testBlobs))
|
||||||
|
history := make([]storage.ManifestHistory, len(testBlobs))
|
||||||
|
|
||||||
|
for i, blob := range testBlobs {
|
||||||
|
blobs[i] = storage.FSLayer{BlobSum: blob.digest}
|
||||||
|
history[i] = storage.ManifestHistory{V1Compatibility: blob.digest.String()}
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest := &storage.SignedManifest{
|
||||||
|
Manifest: storage.Manifest{
|
||||||
|
Name: name,
|
||||||
|
Tag: tag,
|
||||||
|
Architecture: "x86",
|
||||||
|
FSLayers: blobs,
|
||||||
|
History: history,
|
||||||
|
Versioned: storage.Versioned{
|
||||||
|
SchemaVersion: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
manifestBytes, err := json.Marshal(manifest)
|
||||||
|
|
||||||
|
blobRequestResponseMappings := make([]testutil.RequestResponseMapping, len(testBlobs))
|
||||||
|
for i, blob := range testBlobs {
|
||||||
|
blobRequestResponseMappings[i] = testutil.RequestResponseMapping{
|
||||||
|
Request: testutil.Request{
|
||||||
|
Method: "GET",
|
||||||
|
Route: "/v2/" + name + "/blobs/" + blob.digest.String(),
|
||||||
|
},
|
||||||
|
Response: testutil.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Body: blob.contents,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMapping{
|
||||||
|
Request: testutil.Request{
|
||||||
|
Method: "GET",
|
||||||
|
Route: "/v2/" + name + "/manifests/" + tag,
|
||||||
|
},
|
||||||
|
Response: testutil.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Body: manifestBytes,
|
||||||
|
},
|
||||||
|
}))
|
||||||
|
server := httptest.NewServer(handler)
|
||||||
|
client, err := New(server.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating client: %v", err)
|
||||||
|
}
|
||||||
|
objectStore := &memoryObjectStore{
|
||||||
|
mutex: new(sync.Mutex),
|
||||||
|
manifestStorage: make(map[string]*storage.SignedManifest),
|
||||||
|
layerStorage: make(map[digest.Digest]Layer),
|
||||||
|
}
|
||||||
|
|
||||||
|
err = Pull(client, objectStore, name, tag)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := objectStore.Manifest(name, tag)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mBytes, err := json.Marshal(m)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(mBytes) != string(manifestBytes) {
|
||||||
|
t.Fatal("Incorrect manifest")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, blob := range testBlobs {
|
||||||
|
l, err := objectStore.Layer(blob.digest)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
reader, err := l.Reader()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
|
blobBytes, err := ioutil.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(blobBytes) != string(blob.contents) {
|
||||||
|
t.Fatal("Incorrect blob")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPullResume(t *testing.T) {
|
||||||
|
name := "hello/world"
|
||||||
|
tag := "sometag"
|
||||||
|
testBlobs := []testBlob{
|
||||||
|
{
|
||||||
|
digest: "tarsum.v2+sha256:12345",
|
||||||
|
contents: []byte("some contents"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
digest: "tarsum.v2+sha256:98765",
|
||||||
|
contents: []byte("some other contents"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
layers := make([]storage.FSLayer, len(testBlobs))
|
||||||
|
history := make([]storage.ManifestHistory, len(testBlobs))
|
||||||
|
|
||||||
|
for i, layer := range testBlobs {
|
||||||
|
layers[i] = storage.FSLayer{BlobSum: layer.digest}
|
||||||
|
history[i] = storage.ManifestHistory{V1Compatibility: layer.digest.String()}
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest := &storage.Manifest{
|
||||||
|
Name: name,
|
||||||
|
Tag: tag,
|
||||||
|
Architecture: "x86",
|
||||||
|
FSLayers: layers,
|
||||||
|
History: history,
|
||||||
|
Versioned: storage.Versioned{
|
||||||
|
SchemaVersion: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
manifestBytes, err := json.Marshal(manifest)
|
||||||
|
|
||||||
|
layerRequestResponseMappings := make([]testutil.RequestResponseMapping, 2*len(testBlobs))
|
||||||
|
for i, blob := range testBlobs {
|
||||||
|
layerRequestResponseMappings[2*i] = testutil.RequestResponseMapping{
|
||||||
|
Request: testutil.Request{
|
||||||
|
Method: "GET",
|
||||||
|
Route: "/v2/" + name + "/blobs/" + blob.digest.String(),
|
||||||
|
},
|
||||||
|
Response: testutil.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Body: blob.contents[:len(blob.contents)/2],
|
||||||
|
Headers: http.Header(map[string][]string{
|
||||||
|
"Content-Length": {fmt.Sprint(len(blob.contents))},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
layerRequestResponseMappings[2*i+1] = testutil.RequestResponseMapping{
|
||||||
|
Request: testutil.Request{
|
||||||
|
Method: "GET",
|
||||||
|
Route: "/v2/" + name + "/blobs/" + blob.digest.String(),
|
||||||
|
},
|
||||||
|
Response: testutil.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Body: blob.contents[len(blob.contents)/2:],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
layerRequestResponseMappings = append(layerRequestResponseMappings, testutil.RequestResponseMapping{
|
||||||
|
Request: testutil.Request{
|
||||||
|
Method: "GET",
|
||||||
|
Route: "/v2/" + name + "/manifests/" + tag,
|
||||||
|
},
|
||||||
|
Response: testutil.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Body: manifestBytes,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
handler := testutil.NewHandler(layerRequestResponseMappings)
|
||||||
|
server := httptest.NewServer(handler)
|
||||||
|
client, err := New(server.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating client: %v", err)
|
||||||
|
}
|
||||||
|
objectStore := &memoryObjectStore{
|
||||||
|
mutex: new(sync.Mutex),
|
||||||
|
manifestStorage: make(map[string]*storage.SignedManifest),
|
||||||
|
layerStorage: make(map[digest.Digest]Layer),
|
||||||
|
}
|
||||||
|
|
||||||
|
for attempts := 0; attempts < 3; attempts++ {
|
||||||
|
err = Pull(client, objectStore, name, tag)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := objectStore.Manifest(name, tag)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mBytes, err := json.Marshal(m)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(mBytes) != string(manifestBytes) {
|
||||||
|
t.Fatal("Incorrect manifest")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, blob := range testBlobs {
|
||||||
|
l, err := objectStore.Layer(blob.digest)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
reader, err := l.Reader()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
|
layerBytes, err := ioutil.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(layerBytes) != string(blob.contents) {
|
||||||
|
t.Fatal("Incorrect blob")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// headerInterceptingResponseWriter is a hacky workaround to re-write the
|
||||||
|
// location header to have the server url.
|
||||||
|
type headerInterceptingResponseWriter struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
serverURL string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hirw *headerInterceptingResponseWriter) WriteHeader(status int) {
|
||||||
|
location := hirw.Header().Get("Location")
|
||||||
|
if location != "" {
|
||||||
|
hirw.Header().Set("Location", hirw.serverURL+location)
|
||||||
|
}
|
||||||
|
|
||||||
|
hirw.ResponseWriter.WriteHeader(status)
|
||||||
|
}
|
79
client/errors.go
Normal file
79
client/errors.go
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RepositoryNotFoundError is returned when making an operation against a
|
||||||
|
// repository that does not exist in the registry.
|
||||||
|
type RepositoryNotFoundError struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RepositoryNotFoundError) Error() string {
|
||||||
|
return fmt.Sprintf("No repository found with Name: %s", e.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImageManifestNotFoundError is returned when making an operation against a
|
||||||
|
// given image manifest that does not exist in the registry.
|
||||||
|
type ImageManifestNotFoundError struct {
|
||||||
|
Name string
|
||||||
|
Tag string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ImageManifestNotFoundError) Error() string {
|
||||||
|
return fmt.Sprintf("No manifest found with Name: %s, Tag: %s",
|
||||||
|
e.Name, e.Tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobNotFoundError is returned when making an operation against a given image
|
||||||
|
// layer that does not exist in the registry.
|
||||||
|
type BlobNotFoundError struct {
|
||||||
|
Name string
|
||||||
|
Digest digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *BlobNotFoundError) Error() string {
|
||||||
|
return fmt.Sprintf("No blob found with Name: %s, Digest: %s",
|
||||||
|
e.Name, e.Digest)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobUploadNotFoundError is returned when making a blob upload operation against an
|
||||||
|
// invalid blob upload location url.
|
||||||
|
// This may be the result of using a cancelled, completed, or stale upload
|
||||||
|
// location.
|
||||||
|
type BlobUploadNotFoundError struct {
|
||||||
|
Location string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *BlobUploadNotFoundError) Error() string {
|
||||||
|
return fmt.Sprintf("No blob upload found at Location: %s", e.Location)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobUploadInvalidRangeError is returned when attempting to upload an image
|
||||||
|
// blob chunk that is out of order.
|
||||||
|
// This provides the known BlobSize and LastValidRange which can be used to
|
||||||
|
// resume the upload.
|
||||||
|
type BlobUploadInvalidRangeError struct {
|
||||||
|
Location string
|
||||||
|
LastValidRange int
|
||||||
|
BlobSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *BlobUploadInvalidRangeError) Error() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"Invalid range provided for upload at Location: %s. Last Valid Range: %d, Blob Size: %d",
|
||||||
|
e.Location, e.LastValidRange, e.BlobSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is
|
||||||
|
// returned when making a registry api call.
|
||||||
|
type UnexpectedHTTPStatusError struct {
|
||||||
|
Status string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnexpectedHTTPStatusError) Error() string {
|
||||||
|
return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status)
|
||||||
|
}
|
239
client/objectstore.go
Normal file
239
client/objectstore.go
Normal file
|
@ -0,0 +1,239 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
"github.com/docker/docker-registry/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrLayerAlreadyExists is returned when attempting to create a layer with
|
||||||
|
// a tarsum that is already in use.
|
||||||
|
ErrLayerAlreadyExists = fmt.Errorf("Layer already exists")
|
||||||
|
|
||||||
|
// ErrLayerLocked is returned when attempting to write to a layer which is
|
||||||
|
// currently being written to.
|
||||||
|
ErrLayerLocked = fmt.Errorf("Layer locked")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ObjectStore is an interface which is designed to approximate the docker
|
||||||
|
// engine storage. This interface is subject to change to conform to the
|
||||||
|
// future requirements of the engine.
|
||||||
|
type ObjectStore interface {
|
||||||
|
// Manifest retrieves the image manifest stored at the given repository name
|
||||||
|
// and tag
|
||||||
|
Manifest(name, tag string) (*storage.SignedManifest, error)
|
||||||
|
|
||||||
|
// WriteManifest stores an image manifest at the given repository name and
|
||||||
|
// tag
|
||||||
|
WriteManifest(name, tag string, manifest *storage.SignedManifest) error
|
||||||
|
|
||||||
|
// Layer returns a handle to a layer for reading and writing
|
||||||
|
Layer(dgst digest.Digest) (Layer, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Layer is a generic image layer interface.
|
||||||
|
// A Layer may not be written to if it is already complete.
|
||||||
|
type Layer interface {
|
||||||
|
// Reader returns a LayerReader or an error if the layer has not been
|
||||||
|
// written to or is currently being written to.
|
||||||
|
Reader() (LayerReader, error)
|
||||||
|
|
||||||
|
// Writer returns a LayerWriter or an error if the layer has been fully
|
||||||
|
// written to or is currently being written to.
|
||||||
|
Writer() (LayerWriter, error)
|
||||||
|
|
||||||
|
// Wait blocks until the Layer can be read from.
|
||||||
|
Wait() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// LayerReader is a read-only handle to a Layer, which exposes the CurrentSize
|
||||||
|
// and full Size in addition to implementing the io.ReadCloser interface.
|
||||||
|
type LayerReader interface {
|
||||||
|
io.ReadCloser
|
||||||
|
|
||||||
|
// CurrentSize returns the number of bytes written to the underlying Layer
|
||||||
|
CurrentSize() int
|
||||||
|
|
||||||
|
// Size returns the full size of the underlying Layer
|
||||||
|
Size() int
|
||||||
|
}
|
||||||
|
|
||||||
|
// LayerWriter is a write-only handle to a Layer, which exposes the CurrentSize
|
||||||
|
// and full Size in addition to implementing the io.WriteCloser interface.
|
||||||
|
// SetSize must be called on this LayerWriter before it can be written to.
|
||||||
|
type LayerWriter interface {
|
||||||
|
io.WriteCloser
|
||||||
|
|
||||||
|
// CurrentSize returns the number of bytes written to the underlying Layer
|
||||||
|
CurrentSize() int
|
||||||
|
|
||||||
|
// Size returns the full size of the underlying Layer
|
||||||
|
Size() int
|
||||||
|
|
||||||
|
// SetSize sets the full size of the underlying Layer.
|
||||||
|
// This must be called before any calls to Write
|
||||||
|
SetSize(int) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// memoryObjectStore is an in-memory implementation of the ObjectStore interface
|
||||||
|
type memoryObjectStore struct {
|
||||||
|
mutex *sync.Mutex
|
||||||
|
manifestStorage map[string]*storage.SignedManifest
|
||||||
|
layerStorage map[digest.Digest]Layer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (objStore *memoryObjectStore) Manifest(name, tag string) (*storage.SignedManifest, error) {
|
||||||
|
objStore.mutex.Lock()
|
||||||
|
defer objStore.mutex.Unlock()
|
||||||
|
|
||||||
|
manifest, ok := objStore.manifestStorage[name+":"+tag]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("No manifest found with Name: %q, Tag: %q", name, tag)
|
||||||
|
}
|
||||||
|
return manifest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (objStore *memoryObjectStore) WriteManifest(name, tag string, manifest *storage.SignedManifest) error {
|
||||||
|
objStore.mutex.Lock()
|
||||||
|
defer objStore.mutex.Unlock()
|
||||||
|
|
||||||
|
objStore.manifestStorage[name+":"+tag] = manifest
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (objStore *memoryObjectStore) Layer(dgst digest.Digest) (Layer, error) {
|
||||||
|
objStore.mutex.Lock()
|
||||||
|
defer objStore.mutex.Unlock()
|
||||||
|
|
||||||
|
layer, ok := objStore.layerStorage[dgst]
|
||||||
|
if !ok {
|
||||||
|
layer = &memoryLayer{cond: sync.NewCond(new(sync.Mutex))}
|
||||||
|
objStore.layerStorage[dgst] = layer
|
||||||
|
}
|
||||||
|
|
||||||
|
return layer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type memoryLayer struct {
|
||||||
|
cond *sync.Cond
|
||||||
|
contents []byte
|
||||||
|
expectedSize int
|
||||||
|
writing bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ml *memoryLayer) Reader() (LayerReader, error) {
|
||||||
|
ml.cond.L.Lock()
|
||||||
|
defer ml.cond.L.Unlock()
|
||||||
|
|
||||||
|
if ml.contents == nil {
|
||||||
|
return nil, fmt.Errorf("Layer has not been written to yet")
|
||||||
|
}
|
||||||
|
if ml.writing {
|
||||||
|
return nil, ErrLayerLocked
|
||||||
|
}
|
||||||
|
|
||||||
|
return &memoryLayerReader{ml: ml, reader: bytes.NewReader(ml.contents)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ml *memoryLayer) Writer() (LayerWriter, error) {
|
||||||
|
ml.cond.L.Lock()
|
||||||
|
defer ml.cond.L.Unlock()
|
||||||
|
|
||||||
|
if ml.contents != nil {
|
||||||
|
if ml.writing {
|
||||||
|
return nil, ErrLayerLocked
|
||||||
|
}
|
||||||
|
if ml.expectedSize == len(ml.contents) {
|
||||||
|
return nil, ErrLayerAlreadyExists
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ml.contents = make([]byte, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
ml.writing = true
|
||||||
|
return &memoryLayerWriter{ml: ml, buffer: bytes.NewBuffer(ml.contents)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ml *memoryLayer) Wait() error {
|
||||||
|
ml.cond.L.Lock()
|
||||||
|
defer ml.cond.L.Unlock()
|
||||||
|
|
||||||
|
if ml.contents == nil {
|
||||||
|
return fmt.Errorf("No writer to wait on")
|
||||||
|
}
|
||||||
|
|
||||||
|
for ml.writing {
|
||||||
|
ml.cond.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type memoryLayerReader struct {
|
||||||
|
ml *memoryLayer
|
||||||
|
reader *bytes.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mlr *memoryLayerReader) Read(p []byte) (int, error) {
|
||||||
|
return mlr.reader.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mlr *memoryLayerReader) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mlr *memoryLayerReader) CurrentSize() int {
|
||||||
|
return len(mlr.ml.contents)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mlr *memoryLayerReader) Size() int {
|
||||||
|
return mlr.ml.expectedSize
|
||||||
|
}
|
||||||
|
|
||||||
|
type memoryLayerWriter struct {
|
||||||
|
ml *memoryLayer
|
||||||
|
buffer *bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mlw *memoryLayerWriter) Write(p []byte) (int, error) {
|
||||||
|
if mlw.ml.expectedSize == 0 {
|
||||||
|
return 0, fmt.Errorf("Must set size before writing to layer")
|
||||||
|
}
|
||||||
|
wrote, err := mlw.buffer.Write(p)
|
||||||
|
mlw.ml.contents = mlw.buffer.Bytes()
|
||||||
|
return wrote, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mlw *memoryLayerWriter) Close() error {
|
||||||
|
mlw.ml.cond.L.Lock()
|
||||||
|
defer mlw.ml.cond.L.Unlock()
|
||||||
|
|
||||||
|
return mlw.close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mlw *memoryLayerWriter) close() error {
|
||||||
|
mlw.ml.writing = false
|
||||||
|
mlw.ml.cond.Broadcast()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mlw *memoryLayerWriter) CurrentSize() int {
|
||||||
|
return len(mlw.ml.contents)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mlw *memoryLayerWriter) Size() int {
|
||||||
|
return mlw.ml.expectedSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mlw *memoryLayerWriter) SetSize(size int) error {
|
||||||
|
if !mlw.ml.writing {
|
||||||
|
return fmt.Errorf("Layer is closed for writing")
|
||||||
|
}
|
||||||
|
mlw.ml.expectedSize = size
|
||||||
|
return nil
|
||||||
|
}
|
151
client/pull.go
Normal file
151
client/pull.go
Normal file
|
@ -0,0 +1,151 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storage"
|
||||||
|
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// simultaneousLayerPullWindow is the size of the parallel layer pull window.
|
||||||
|
// A layer may not be pulled until the layer preceeding it by the length of the
|
||||||
|
// pull window has been successfully pulled.
|
||||||
|
const simultaneousLayerPullWindow = 4
|
||||||
|
|
||||||
|
// Pull implements a client pull workflow for the image defined by the given
|
||||||
|
// name and tag pair, using the given ObjectStore for local manifest and layer
|
||||||
|
// storage
|
||||||
|
func Pull(c Client, objectStore ObjectStore, name, tag string) error {
|
||||||
|
manifest, err := c.GetImageManifest(name, tag)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.WithField("manifest", manifest).Info("Pulled manifest")
|
||||||
|
|
||||||
|
if len(manifest.FSLayers) != len(manifest.History) {
|
||||||
|
return fmt.Errorf("Length of history not equal to number of layers")
|
||||||
|
}
|
||||||
|
if len(manifest.FSLayers) == 0 {
|
||||||
|
return fmt.Errorf("Image has no layers")
|
||||||
|
}
|
||||||
|
|
||||||
|
errChans := make([]chan error, len(manifest.FSLayers))
|
||||||
|
for i := range manifest.FSLayers {
|
||||||
|
errChans[i] = make(chan error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// To avoid leak of goroutines we must notify
|
||||||
|
// pullLayer goroutines about a cancelation,
|
||||||
|
// otherwise they will lock forever.
|
||||||
|
cancelCh := make(chan struct{})
|
||||||
|
|
||||||
|
// Iterate over each layer in the manifest, simultaneously pulling no more
|
||||||
|
// than simultaneousLayerPullWindow layers at a time. If an error is
|
||||||
|
// received from a layer pull, we abort the push.
|
||||||
|
for i := 0; i < len(manifest.FSLayers)+simultaneousLayerPullWindow; i++ {
|
||||||
|
dependentLayer := i - simultaneousLayerPullWindow
|
||||||
|
if dependentLayer >= 0 {
|
||||||
|
err := <-errChans[dependentLayer]
|
||||||
|
if err != nil {
|
||||||
|
log.WithField("error", err).Warn("Pull aborted")
|
||||||
|
close(cancelCh)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i < len(manifest.FSLayers) {
|
||||||
|
go func(i int) {
|
||||||
|
select {
|
||||||
|
case errChans[i] <- pullLayer(c, objectStore, name, manifest.FSLayers[i]):
|
||||||
|
case <-cancelCh: // no chance to recv until cancelCh's closed
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = objectStore.WriteManifest(name, tag, manifest)
|
||||||
|
if err != nil {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"error": err,
|
||||||
|
"manifest": manifest,
|
||||||
|
}).Warn("Unable to write image manifest")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func pullLayer(c Client, objectStore ObjectStore, name string, fsLayer storage.FSLayer) error {
|
||||||
|
log.WithField("layer", fsLayer).Info("Pulling layer")
|
||||||
|
|
||||||
|
layer, err := objectStore.Layer(fsLayer.BlobSum)
|
||||||
|
if err != nil {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"error": err,
|
||||||
|
"layer": fsLayer,
|
||||||
|
}).Warn("Unable to write local layer")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
layerWriter, err := layer.Writer()
|
||||||
|
if err == ErrLayerAlreadyExists {
|
||||||
|
log.WithField("layer", fsLayer).Info("Layer already exists")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err == ErrLayerLocked {
|
||||||
|
log.WithField("layer", fsLayer).Info("Layer download in progress, waiting")
|
||||||
|
layer.Wait()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"error": err,
|
||||||
|
"layer": fsLayer,
|
||||||
|
}).Warn("Unable to write local layer")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer layerWriter.Close()
|
||||||
|
|
||||||
|
if layerWriter.CurrentSize() > 0 {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"layer": fsLayer,
|
||||||
|
"currentSize": layerWriter.CurrentSize(),
|
||||||
|
"size": layerWriter.Size(),
|
||||||
|
}).Info("Layer partially downloaded, resuming")
|
||||||
|
}
|
||||||
|
|
||||||
|
layerReader, length, err := c.GetBlob(name, fsLayer.BlobSum, layerWriter.CurrentSize())
|
||||||
|
if err != nil {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"error": err,
|
||||||
|
"layer": fsLayer,
|
||||||
|
}).Warn("Unable to download layer")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer layerReader.Close()
|
||||||
|
|
||||||
|
layerWriter.SetSize(layerWriter.CurrentSize() + length)
|
||||||
|
|
||||||
|
_, err = io.Copy(layerWriter, layerReader)
|
||||||
|
if err != nil {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"error": err,
|
||||||
|
"layer": fsLayer,
|
||||||
|
}).Warn("Unable to download layer")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if layerWriter.CurrentSize() != layerWriter.Size() {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"size": layerWriter.Size(),
|
||||||
|
"currentSize": layerWriter.CurrentSize(),
|
||||||
|
"layer": fsLayer,
|
||||||
|
}).Warn("Layer invalid size")
|
||||||
|
return fmt.Errorf(
|
||||||
|
"Wrote incorrect number of bytes for layer %v. Expected %d, Wrote %d",
|
||||||
|
fsLayer, layerWriter.Size(), layerWriter.CurrentSize(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
137
client/push.go
Normal file
137
client/push.go
Normal file
|
@ -0,0 +1,137 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker-registry/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// simultaneousLayerPushWindow is the size of the parallel layer push window.
|
||||||
|
// A layer may not be pushed until the layer preceeding it by the length of the
|
||||||
|
// push window has been successfully pushed.
|
||||||
|
const simultaneousLayerPushWindow = 4
|
||||||
|
|
||||||
|
type pushFunction func(fsLayer storage.FSLayer) error
|
||||||
|
|
||||||
|
// Push implements a client push workflow for the image defined by the given
|
||||||
|
// name and tag pair, using the given ObjectStore for local manifest and layer
|
||||||
|
// storage
|
||||||
|
func Push(c Client, objectStore ObjectStore, name, tag string) error {
|
||||||
|
manifest, err := objectStore.Manifest(name, tag)
|
||||||
|
if err != nil {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"error": err,
|
||||||
|
"name": name,
|
||||||
|
"tag": tag,
|
||||||
|
}).Info("No image found")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
errChans := make([]chan error, len(manifest.FSLayers))
|
||||||
|
for i := range manifest.FSLayers {
|
||||||
|
errChans[i] = make(chan error)
|
||||||
|
}
|
||||||
|
|
||||||
|
cancelCh := make(chan struct{})
|
||||||
|
|
||||||
|
// Iterate over each layer in the manifest, simultaneously pushing no more
|
||||||
|
// than simultaneousLayerPushWindow layers at a time. If an error is
|
||||||
|
// received from a layer push, we abort the push.
|
||||||
|
for i := 0; i < len(manifest.FSLayers)+simultaneousLayerPushWindow; i++ {
|
||||||
|
dependentLayer := i - simultaneousLayerPushWindow
|
||||||
|
if dependentLayer >= 0 {
|
||||||
|
err := <-errChans[dependentLayer]
|
||||||
|
if err != nil {
|
||||||
|
log.WithField("error", err).Warn("Push aborted")
|
||||||
|
close(cancelCh)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i < len(manifest.FSLayers) {
|
||||||
|
go func(i int) {
|
||||||
|
select {
|
||||||
|
case errChans[i] <- pushLayer(c, objectStore, name, manifest.FSLayers[i]):
|
||||||
|
case <-cancelCh: // recv broadcast notification about cancelation
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.PutImageManifest(name, tag, manifest)
|
||||||
|
if err != nil {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"error": err,
|
||||||
|
"manifest": manifest,
|
||||||
|
}).Warn("Unable to upload manifest")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func pushLayer(c Client, objectStore ObjectStore, name string, fsLayer storage.FSLayer) error {
|
||||||
|
log.WithField("layer", fsLayer).Info("Pushing layer")
|
||||||
|
|
||||||
|
layer, err := objectStore.Layer(fsLayer.BlobSum)
|
||||||
|
if err != nil {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"error": err,
|
||||||
|
"layer": fsLayer,
|
||||||
|
}).Warn("Unable to read local layer")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
layerReader, err := layer.Reader()
|
||||||
|
if err != nil {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"error": err,
|
||||||
|
"layer": fsLayer,
|
||||||
|
}).Warn("Unable to read local layer")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer layerReader.Close()
|
||||||
|
|
||||||
|
if layerReader.CurrentSize() != layerReader.Size() {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"layer": fsLayer,
|
||||||
|
"currentSize": layerReader.CurrentSize(),
|
||||||
|
"size": layerReader.Size(),
|
||||||
|
}).Warn("Local layer incomplete")
|
||||||
|
return fmt.Errorf("Local layer incomplete")
|
||||||
|
}
|
||||||
|
|
||||||
|
length, err := c.BlobLength(name, fsLayer.BlobSum)
|
||||||
|
if err != nil {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"error": err,
|
||||||
|
"layer": fsLayer,
|
||||||
|
}).Warn("Unable to check existence of remote layer")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if length >= 0 {
|
||||||
|
log.WithField("layer", fsLayer).Info("Layer already exists")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
location, err := c.InitiateBlobUpload(name)
|
||||||
|
if err != nil {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"error": err,
|
||||||
|
"layer": fsLayer,
|
||||||
|
}).Warn("Unable to upload layer")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.UploadBlob(location, layerReader, int(layerReader.CurrentSize()), fsLayer.BlobSum)
|
||||||
|
if err != nil {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"error": err,
|
||||||
|
"layer": fsLayer,
|
||||||
|
}).Warn("Unable to upload layer")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
95
cmd/registry-api-doctable-gen/main.go
Normal file
95
cmd/registry-api-doctable-gen/main.go
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
// registry-api-doctable-gen uses various descriptors within the registry code
|
||||||
|
// base to generate markdown tables for use in documentation. This is only
|
||||||
|
// meant to facilitate updates to documentation and not as an automated tool.
|
||||||
|
//
|
||||||
|
// For now, this only includes support for error codes:
|
||||||
|
//
|
||||||
|
// $ registry-api-doctable-gen errors
|
||||||
|
//
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"text/tabwriter"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/api/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
|
||||||
|
if len(os.Args) < 2 {
|
||||||
|
log.Fatalln("please specify a table to generate: (errors)")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch os.Args[1] {
|
||||||
|
case "errors":
|
||||||
|
dumpErrors(os.Stdout)
|
||||||
|
default:
|
||||||
|
log.Fatalln("unknown descriptor table:", os.Args[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func dumpErrors(wr io.Writer) {
|
||||||
|
writer := tabwriter.NewWriter(os.Stdout, 8, 8, 0, '\t', 0)
|
||||||
|
defer writer.Flush()
|
||||||
|
|
||||||
|
fmt.Fprint(writer, "|")
|
||||||
|
dtype := reflect.TypeOf(v2.ErrorDescriptor{})
|
||||||
|
var fieldsPrinted int
|
||||||
|
for i := 0; i < dtype.NumField(); i++ {
|
||||||
|
field := dtype.Field(i)
|
||||||
|
if field.Name == "Value" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprint(writer, field.Name, "|")
|
||||||
|
fieldsPrinted++
|
||||||
|
}
|
||||||
|
|
||||||
|
divider := strings.Repeat("-", 8)
|
||||||
|
var parts []string
|
||||||
|
for i := 0; i < fieldsPrinted; i++ {
|
||||||
|
parts = append(parts, divider)
|
||||||
|
}
|
||||||
|
divider = strings.Join(parts, "|")
|
||||||
|
|
||||||
|
fmt.Fprintln(writer, "\n"+divider)
|
||||||
|
|
||||||
|
for _, descriptor := range v2.ErrorDescriptors {
|
||||||
|
fmt.Fprint(writer, "|")
|
||||||
|
|
||||||
|
v := reflect.ValueOf(descriptor)
|
||||||
|
for i := 0; i < dtype.NumField(); i++ {
|
||||||
|
value := v.Field(i).Interface()
|
||||||
|
field := v.Type().Field(i)
|
||||||
|
if field.Name == "Value" {
|
||||||
|
continue
|
||||||
|
} else if field.Name == "Description" {
|
||||||
|
value = strings.Replace(value.(string), "\n", " ", -1)
|
||||||
|
} else if field.Name == "Code" {
|
||||||
|
value = fmt.Sprintf("`%s`", value)
|
||||||
|
} else if field.Name == "HTTPStatusCodes" {
|
||||||
|
if len(value.([]int)) > 0 {
|
||||||
|
var codes []string
|
||||||
|
for _, code := range value.([]int) {
|
||||||
|
codes = append(codes, fmt.Sprint(code))
|
||||||
|
}
|
||||||
|
value = strings.Join(codes, ", ")
|
||||||
|
} else {
|
||||||
|
value = "Any"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprint(writer, value, "|")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprint(writer, "\n")
|
||||||
|
}
|
||||||
|
}
|
31
cmd/registry-storagedriver-azure/main.go
Normal file
31
cmd/registry-storagedriver-azure/main.go
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/azure"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/ipc"
|
||||||
|
)
|
||||||
|
|
||||||
|
// An out-of-process Azure Storage driver, intended to be run by ipc.NewDriverClient
|
||||||
|
func main() {
|
||||||
|
parametersBytes := []byte(os.Args[1])
|
||||||
|
var parameters map[string]string
|
||||||
|
err := json.Unmarshal(parametersBytes, ¶meters)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
driver, err := azure.FromParameters(parameters)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ipc.StorageDriverServer(driver); err != nil {
|
||||||
|
log.Fatalln("driver error:", err)
|
||||||
|
}
|
||||||
|
}
|
27
cmd/registry-storagedriver-filesystem/main.go
Normal file
27
cmd/registry-storagedriver-filesystem/main.go
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storagedriver/filesystem"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/ipc"
|
||||||
|
)
|
||||||
|
|
||||||
|
// An out-of-process filesystem driver, intended to be run by ipc.NewDriverClient
|
||||||
|
func main() {
|
||||||
|
parametersBytes := []byte(os.Args[1])
|
||||||
|
var parameters map[string]string
|
||||||
|
err := json.Unmarshal(parametersBytes, ¶meters)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ipc.StorageDriverServer(filesystem.FromParameters(parameters)); err != nil {
|
||||||
|
logrus.Fatalln(err)
|
||||||
|
}
|
||||||
|
}
|
17
cmd/registry-storagedriver-inmemory/main.go
Normal file
17
cmd/registry-storagedriver-inmemory/main.go
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/inmemory"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/ipc"
|
||||||
|
)
|
||||||
|
|
||||||
|
// An out-of-process inmemory driver, intended to be run by ipc.NewDriverClient
|
||||||
|
// This exists primarily for example and testing purposes
|
||||||
|
func main() {
|
||||||
|
if err := ipc.StorageDriverServer(inmemory.New()); err != nil {
|
||||||
|
logrus.Fatalln(err)
|
||||||
|
}
|
||||||
|
}
|
32
cmd/registry-storagedriver-s3/main.go
Normal file
32
cmd/registry-storagedriver-s3/main.go
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storagedriver/ipc"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/s3"
|
||||||
|
)
|
||||||
|
|
||||||
|
// An out-of-process S3 driver, intended to be run by ipc.NewDriverClient
|
||||||
|
func main() {
|
||||||
|
parametersBytes := []byte(os.Args[1])
|
||||||
|
var parameters map[string]string
|
||||||
|
err := json.Unmarshal(parametersBytes, ¶meters)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
driver, err := s3.FromParameters(parameters)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ipc.StorageDriverServer(driver); err != nil {
|
||||||
|
logrus.Fatalln(err)
|
||||||
|
}
|
||||||
|
}
|
7
cmd/registry/config.yml
Normal file
7
cmd/registry/config.yml
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
version: 0.1
|
||||||
|
loglevel: debug
|
||||||
|
storage:
|
||||||
|
filesystem:
|
||||||
|
rootdirectory: /tmp/registry-dev
|
||||||
|
http:
|
||||||
|
addr: :5000
|
124
cmd/registry/main.go
Normal file
124
cmd/registry/main.go
Normal file
|
@ -0,0 +1,124 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
_ "net/http/pprof"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
"github.com/bugsnag/bugsnag-go"
|
||||||
|
"github.com/gorilla/handlers"
|
||||||
|
"github.com/yvasiyarov/gorelic"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry"
|
||||||
|
_ "github.com/docker/docker-registry/auth/silly"
|
||||||
|
_ "github.com/docker/docker-registry/auth/token"
|
||||||
|
"github.com/docker/docker-registry/configuration"
|
||||||
|
_ "github.com/docker/docker-registry/storagedriver/filesystem"
|
||||||
|
_ "github.com/docker/docker-registry/storagedriver/inmemory"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Usage = usage
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
config, err := resolveConfiguration()
|
||||||
|
if err != nil {
|
||||||
|
fatalf("configuration error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
app := registry.NewApp(*config)
|
||||||
|
handler := configureReporting(app)
|
||||||
|
handler = handlers.CombinedLoggingHandler(os.Stdout, handler)
|
||||||
|
log.SetLevel(logLevel(config.Loglevel))
|
||||||
|
|
||||||
|
log.Infof("listening on %v", config.HTTP.Addr)
|
||||||
|
if err := http.ListenAndServe(config.HTTP.Addr, handler); err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func usage() {
|
||||||
|
fmt.Fprintln(os.Stderr, "usage:", os.Args[0], "<config>")
|
||||||
|
flag.PrintDefaults()
|
||||||
|
}
|
||||||
|
|
||||||
|
func fatalf(format string, args ...interface{}) {
|
||||||
|
fmt.Fprintf(os.Stderr, format+"\n", args...)
|
||||||
|
usage()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveConfiguration() (*configuration.Configuration, error) {
|
||||||
|
var configurationPath string
|
||||||
|
|
||||||
|
if flag.NArg() > 0 {
|
||||||
|
configurationPath = flag.Arg(0)
|
||||||
|
} else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" {
|
||||||
|
configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH")
|
||||||
|
}
|
||||||
|
|
||||||
|
if configurationPath == "" {
|
||||||
|
return nil, fmt.Errorf("configuration path unspecified")
|
||||||
|
}
|
||||||
|
|
||||||
|
fp, err := os.Open(configurationPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := configuration.Parse(fp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return config, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func logLevel(level configuration.Loglevel) log.Level {
|
||||||
|
l, err := log.ParseLevel(string(level))
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error parsing level %q: %v", level, err)
|
||||||
|
l = log.InfoLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func configureReporting(app *registry.App) http.Handler {
|
||||||
|
var handler http.Handler = app
|
||||||
|
|
||||||
|
if app.Config.Reporting.Bugsnag.APIKey != "" {
|
||||||
|
bugsnagConfig := bugsnag.Configuration{
|
||||||
|
APIKey: app.Config.Reporting.Bugsnag.APIKey,
|
||||||
|
// TODO(brianbland): provide the registry version here
|
||||||
|
// AppVersion: "2.0",
|
||||||
|
}
|
||||||
|
if app.Config.Reporting.Bugsnag.ReleaseStage != "" {
|
||||||
|
bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage
|
||||||
|
}
|
||||||
|
if app.Config.Reporting.Bugsnag.Endpoint != "" {
|
||||||
|
bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint
|
||||||
|
}
|
||||||
|
bugsnag.Configure(bugsnagConfig)
|
||||||
|
|
||||||
|
handler = bugsnag.Handler(handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
if app.Config.Reporting.NewRelic.LicenseKey != "" {
|
||||||
|
agent := gorelic.NewAgent()
|
||||||
|
agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey
|
||||||
|
if app.Config.Reporting.NewRelic.Name != "" {
|
||||||
|
agent.NewrelicName = app.Config.Reporting.NewRelic.Name
|
||||||
|
}
|
||||||
|
agent.CollectHTTPStat = true
|
||||||
|
agent.Verbose = true
|
||||||
|
agent.Run()
|
||||||
|
|
||||||
|
handler = agent.WrapHTTPHandler(handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
return handler
|
||||||
|
}
|
115
common/names.go
Normal file
115
common/names.go
Normal file
|
@ -0,0 +1,115 @@
|
||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RepositoryNameComponentMinLength is the minimum number of characters in a
|
||||||
|
// single repository name slash-delimited component
|
||||||
|
RepositoryNameComponentMinLength = 2
|
||||||
|
|
||||||
|
// RepositoryNameComponentMaxLength is the maximum number of characters in a
|
||||||
|
// single repository name slash-delimited component
|
||||||
|
RepositoryNameComponentMaxLength = 30
|
||||||
|
|
||||||
|
// RepositoryNameMinComponents is the minimum number of slash-delimited
|
||||||
|
// components that a repository name must have
|
||||||
|
RepositoryNameMinComponents = 2
|
||||||
|
|
||||||
|
// RepositoryNameMaxComponents is the maximum number of slash-delimited
|
||||||
|
// components that a repository name must have
|
||||||
|
RepositoryNameMaxComponents = 5
|
||||||
|
|
||||||
|
// RepositoryNameTotalLengthMax is the maximum total number of characters in
|
||||||
|
// a repository name
|
||||||
|
RepositoryNameTotalLengthMax = 255
|
||||||
|
)
|
||||||
|
|
||||||
|
// RepositoryNameComponentRegexp restricts registtry path components names to
|
||||||
|
// start with at least two letters or numbers, with following parts able to
|
||||||
|
// separated by one period, dash or underscore.
|
||||||
|
var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[._-][a-z0-9]+)*`)
|
||||||
|
|
||||||
|
// RepositoryNameComponentAnchoredRegexp is the version of
|
||||||
|
// RepositoryNameComponentRegexp which must completely match the content
|
||||||
|
var RepositoryNameComponentAnchoredRegexp = regexp.MustCompile(`^` + RepositoryNameComponentRegexp.String() + `$`)
|
||||||
|
|
||||||
|
// TODO(stevvooe): RepositoryName needs to be limited to some fixed length.
|
||||||
|
// Looking path prefixes and s3 limitation of 1024, this should likely be
|
||||||
|
// around 512 bytes. 256 bytes might be more manageable.
|
||||||
|
|
||||||
|
// RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow 2 to
|
||||||
|
// 5 path components, separated by a forward slash.
|
||||||
|
var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/){1,4}` + RepositoryNameComponentRegexp.String())
|
||||||
|
|
||||||
|
// TagNameRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||||
|
var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`)
|
||||||
|
|
||||||
|
// TODO(stevvooe): Contribute these exports back to core, so they are shared.
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrRepositoryNameComponentShort is returned when a repository name
|
||||||
|
// contains a component which is shorter than
|
||||||
|
// RepositoryNameComponentMinLength
|
||||||
|
ErrRepositoryNameComponentShort = fmt.Errorf("respository name component must be %v or more characters", RepositoryNameComponentMinLength)
|
||||||
|
|
||||||
|
// ErrRepositoryNameComponentLong is returned when a repository name
|
||||||
|
// contains a component which is longer than
|
||||||
|
// RepositoryNameComponentMaxLength
|
||||||
|
ErrRepositoryNameComponentLong = fmt.Errorf("respository name component must be %v characters or less", RepositoryNameComponentMaxLength)
|
||||||
|
|
||||||
|
// ErrRepositoryNameMissingComponents is returned when a repository name
|
||||||
|
// contains fewer than RepositoryNameMinComponents components
|
||||||
|
ErrRepositoryNameMissingComponents = fmt.Errorf("repository name must have at least %v components", RepositoryNameMinComponents)
|
||||||
|
|
||||||
|
// ErrRepositoryNameTooManyComponents is returned when a repository name
|
||||||
|
// contains more than RepositoryNameMaxComponents components
|
||||||
|
ErrRepositoryNameTooManyComponents = fmt.Errorf("repository name %v or less components", RepositoryNameMaxComponents)
|
||||||
|
|
||||||
|
// ErrRepositoryNameLong is returned when a repository name is longer than
|
||||||
|
// RepositoryNameTotalLengthMax
|
||||||
|
ErrRepositoryNameLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax)
|
||||||
|
|
||||||
|
// ErrRepositoryNameComponentInvalid is returned when a repository name does
|
||||||
|
// not match RepositoryNameComponentRegexp
|
||||||
|
ErrRepositoryNameComponentInvalid = fmt.Errorf("repository name component must match %q", RepositoryNameComponentRegexp.String())
|
||||||
|
)
|
||||||
|
|
||||||
|
// ValidateRespositoryName ensures the repository name is valid for use in the
|
||||||
|
// registry. This function accepts a superset of what might be accepted by
|
||||||
|
// docker core or docker hub. If the name does not pass validation, an error,
|
||||||
|
// describing the conditions, is returned.
|
||||||
|
func ValidateRespositoryName(name string) error {
|
||||||
|
if len(name) > RepositoryNameTotalLengthMax {
|
||||||
|
return ErrRepositoryNameLong
|
||||||
|
}
|
||||||
|
|
||||||
|
components := strings.Split(name, "/")
|
||||||
|
|
||||||
|
if len(components) < RepositoryNameMinComponents {
|
||||||
|
return ErrRepositoryNameMissingComponents
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(components) > RepositoryNameMaxComponents {
|
||||||
|
return ErrRepositoryNameTooManyComponents
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, component := range components {
|
||||||
|
if len(component) < RepositoryNameComponentMinLength {
|
||||||
|
return ErrRepositoryNameComponentShort
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(component) > RepositoryNameComponentMaxLength {
|
||||||
|
return ErrRepositoryNameComponentLong
|
||||||
|
}
|
||||||
|
|
||||||
|
if !RepositoryNameComponentAnchoredRegexp.MatchString(component) {
|
||||||
|
return ErrRepositoryNameComponentInvalid
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
91
common/names_test.go
Normal file
91
common/names_test.go
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRepositoryNameRegexp(t *testing.T) {
|
||||||
|
for _, testcase := range []struct {
|
||||||
|
input string
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
input: "simple/name",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "library/ubuntu",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "docker/stevvooe/app",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb",
|
||||||
|
err: ErrRepositoryNameTooManyComponents,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "aa/aa/bb/bb/bb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "a/a/a/b/b",
|
||||||
|
err: ErrRepositoryNameComponentShort,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "a/a/a/a/",
|
||||||
|
err: ErrRepositoryNameComponentShort,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "foo.com/bar/baz",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "blog.foo.com/bar/baz",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "asdf",
|
||||||
|
err: ErrRepositoryNameMissingComponents,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "asdf$$^/aa",
|
||||||
|
err: ErrRepositoryNameComponentInvalid,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "aa-a/aa",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "aa/aa",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "a-a/a-a",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "a",
|
||||||
|
err: ErrRepositoryNameMissingComponents,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "a-/a/a/a",
|
||||||
|
err: ErrRepositoryNameComponentInvalid,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
|
||||||
|
failf := func(format string, v ...interface{}) {
|
||||||
|
t.Logf(testcase.input+": "+format, v...)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ValidateRespositoryName(testcase.input); err != testcase.err {
|
||||||
|
if testcase.err != nil {
|
||||||
|
if err != nil {
|
||||||
|
failf("unexpected error for invalid repository: got %v, expected %v", err, testcase.err)
|
||||||
|
} else {
|
||||||
|
failf("expected invalid repository: %v", testcase.err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err != nil {
|
||||||
|
// Wrong error returned.
|
||||||
|
failf("unexpected error validating repository name: %v, expected %v", err, testcase.err)
|
||||||
|
} else {
|
||||||
|
failf("unexpected error validating repository name: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
35
common/stringset.go
Normal file
35
common/stringset.go
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
package common
|
||||||
|
|
||||||
|
// StringSet is a useful type for looking up strings.
|
||||||
|
type StringSet map[string]struct{}
|
||||||
|
|
||||||
|
// NewStringSet creates a new StringSet with the given strings.
|
||||||
|
func NewStringSet(keys ...string) StringSet {
|
||||||
|
ss := make(StringSet, len(keys))
|
||||||
|
ss.Add(keys...)
|
||||||
|
return ss
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add inserts the given keys into this StringSet.
|
||||||
|
func (ss StringSet) Add(keys ...string) {
|
||||||
|
for _, key := range keys {
|
||||||
|
ss[key] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains returns whether the given key is in this StringSet.
|
||||||
|
func (ss StringSet) Contains(key string) bool {
|
||||||
|
_, ok := ss[key]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keys returns a slice of all keys in this StringSet.
|
||||||
|
func (ss StringSet) Keys() []string {
|
||||||
|
keys := make([]string, 0, len(ss))
|
||||||
|
|
||||||
|
for key := range ss {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return keys
|
||||||
|
}
|
72
common/tarsum.go
Normal file
72
common/tarsum.go
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TarSumRegexp defines a reguler expression to match tarsum identifiers.
|
||||||
|
var TarsumRegexp = regexp.MustCompile("tarsum(?:.[a-z0-9]+)?\\+[a-zA-Z0-9]+:[A-Fa-f0-9]+")
|
||||||
|
|
||||||
|
// TarsumRegexpCapturing defines a reguler expression to match tarsum identifiers with
|
||||||
|
// capture groups corresponding to each component.
|
||||||
|
var TarsumRegexpCapturing = regexp.MustCompile("(tarsum)(.([a-z0-9]+))?\\+([a-zA-Z0-9]+):([A-Fa-f0-9]+)")
|
||||||
|
|
||||||
|
// TarSumInfo contains information about a parsed tarsum.
|
||||||
|
type TarSumInfo struct {
|
||||||
|
// Version contains the version of the tarsum.
|
||||||
|
Version string
|
||||||
|
|
||||||
|
// Algorithm contains the algorithm for the final digest
|
||||||
|
Algorithm string
|
||||||
|
|
||||||
|
// Digest contains the hex-encoded digest.
|
||||||
|
Digest string
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidTarSumError provides informations about a TarSum that cannot be parsed
|
||||||
|
// by ParseTarSum.
|
||||||
|
type InvalidTarSumError struct {
|
||||||
|
TarSum string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e InvalidTarSumError) Error() string {
|
||||||
|
return fmt.Sprintf("invalid tarsum: %q", e.TarSum)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseTarSum parses a tarsum string into its components of interest. For
|
||||||
|
// example, this method may receive the tarsum in the following format:
|
||||||
|
//
|
||||||
|
// tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e
|
||||||
|
//
|
||||||
|
// The function will return the following:
|
||||||
|
//
|
||||||
|
// TarSumInfo{
|
||||||
|
// Version: "v1",
|
||||||
|
// Algorithm: "sha256",
|
||||||
|
// Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e",
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
func ParseTarSum(tarSum string) (tsi TarSumInfo, err error) {
|
||||||
|
components := TarsumRegexpCapturing.FindStringSubmatch(tarSum)
|
||||||
|
|
||||||
|
if len(components) != 1+TarsumRegexpCapturing.NumSubexp() {
|
||||||
|
return TarSumInfo{}, InvalidTarSumError{TarSum: tarSum}
|
||||||
|
}
|
||||||
|
|
||||||
|
return TarSumInfo{
|
||||||
|
Version: components[3],
|
||||||
|
Algorithm: components[4],
|
||||||
|
Digest: components[5],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the valid, string representation of the tarsum info.
|
||||||
|
func (tsi TarSumInfo) String() string {
|
||||||
|
if tsi.Version == "" {
|
||||||
|
return fmt.Sprintf("tarsum+%s:%s", tsi.Algorithm, tsi.Digest)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("tarsum.%s+%s:%s", tsi.Version, tsi.Algorithm, tsi.Digest)
|
||||||
|
}
|
79
common/tarsum_test.go
Normal file
79
common/tarsum_test.go
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseTarSumComponents(t *testing.T) {
|
||||||
|
for _, testcase := range []struct {
|
||||||
|
input string
|
||||||
|
expected TarSumInfo
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
input: "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e",
|
||||||
|
expected: TarSumInfo{
|
||||||
|
Version: "v1",
|
||||||
|
Algorithm: "sha256",
|
||||||
|
Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "",
|
||||||
|
err: InvalidTarSumError{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "purejunk",
|
||||||
|
err: InvalidTarSumError{TarSum: "purejunk"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "tarsum.v23+test:12341234123412341effefefe",
|
||||||
|
expected: TarSumInfo{
|
||||||
|
Version: "v23",
|
||||||
|
Algorithm: "test",
|
||||||
|
Digest: "12341234123412341effefefe",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// The following test cases are ported from docker core
|
||||||
|
{
|
||||||
|
// Version 0 tarsum
|
||||||
|
input: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||||
|
expected: TarSumInfo{
|
||||||
|
Algorithm: "sha256",
|
||||||
|
Digest: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Dev version tarsum
|
||||||
|
input: "tarsum.dev+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||||
|
expected: TarSumInfo{
|
||||||
|
Version: "dev",
|
||||||
|
Algorithm: "sha256",
|
||||||
|
Digest: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
tsi, err := ParseTarSum(testcase.input)
|
||||||
|
if err != nil {
|
||||||
|
if testcase.err != nil && err == testcase.err {
|
||||||
|
continue // passes
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Fatalf("unexpected error parsing tarsum: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if testcase.err != nil {
|
||||||
|
t.Fatalf("expected error not encountered on %q: %v", testcase.input, testcase.err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(tsi, testcase.expected) {
|
||||||
|
t.Fatalf("expected tarsum info: %v != %v", tsi, testcase.expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
if testcase.input != tsi.String() {
|
||||||
|
t.Fatalf("input should equal output: %q != %q", tsi.String(), testcase.input)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
121
common/testutil/handler.go
Normal file
121
common/testutil/handler.go
Normal file
|
@ -0,0 +1,121 @@
|
||||||
|
package testutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RequestResponseMap is an ordered mapping from Requests to Responses
|
||||||
|
type RequestResponseMap []RequestResponseMapping
|
||||||
|
|
||||||
|
// RequestResponseMapping defines a Response to be sent in response to a given
|
||||||
|
// Request
|
||||||
|
type RequestResponseMapping struct {
|
||||||
|
Request Request
|
||||||
|
Response Response
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(bbland): add support for request headers
|
||||||
|
|
||||||
|
// Request is a simplified http.Request object
|
||||||
|
type Request struct {
|
||||||
|
// Method is the http method of the request, for example GET
|
||||||
|
Method string
|
||||||
|
|
||||||
|
// Route is the http route of this request
|
||||||
|
Route string
|
||||||
|
|
||||||
|
// QueryParams are the query parameters of this request
|
||||||
|
QueryParams map[string][]string
|
||||||
|
|
||||||
|
// Body is the byte contents of the http request
|
||||||
|
Body []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r Request) String() string {
|
||||||
|
queryString := ""
|
||||||
|
if len(r.QueryParams) > 0 {
|
||||||
|
queryString = "?"
|
||||||
|
keys := make([]string, 0, len(r.QueryParams))
|
||||||
|
for k := range r.QueryParams {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
for _, k := range keys {
|
||||||
|
queryString += strings.Join(r.QueryParams[k], "&") + "&"
|
||||||
|
}
|
||||||
|
queryString = queryString[:len(queryString)-1]
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s %s%s\n%s", r.Method, r.Route, queryString, r.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response is a simplified http.Response object
|
||||||
|
type Response struct {
|
||||||
|
// Statuscode is the http status code of the Response
|
||||||
|
StatusCode int
|
||||||
|
|
||||||
|
// Headers are the http headers of this Response
|
||||||
|
Headers http.Header
|
||||||
|
|
||||||
|
// Body is the response body
|
||||||
|
Body []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// testHandler is an http.Handler with a defined mapping from Request to an
|
||||||
|
// ordered list of Response objects
|
||||||
|
type testHandler struct {
|
||||||
|
responseMap map[string][]Response
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHandler returns a new test handler that responds to defined requests
|
||||||
|
// with specified responses
|
||||||
|
// Each time a Request is received, the next Response is returned in the
|
||||||
|
// mapping, until no Responses are defined, at which point a 404 is sent back
|
||||||
|
func NewHandler(requestResponseMap RequestResponseMap) http.Handler {
|
||||||
|
responseMap := make(map[string][]Response)
|
||||||
|
for _, mapping := range requestResponseMap {
|
||||||
|
responses, ok := responseMap[mapping.Request.String()]
|
||||||
|
if ok {
|
||||||
|
responseMap[mapping.Request.String()] = append(responses, mapping.Response)
|
||||||
|
} else {
|
||||||
|
responseMap[mapping.Request.String()] = []Response{mapping.Response}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &testHandler{responseMap: responseMap}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
defer r.Body.Close()
|
||||||
|
|
||||||
|
requestBody, _ := ioutil.ReadAll(r.Body)
|
||||||
|
request := Request{
|
||||||
|
Method: r.Method,
|
||||||
|
Route: r.URL.Path,
|
||||||
|
QueryParams: r.URL.Query(),
|
||||||
|
Body: requestBody,
|
||||||
|
}
|
||||||
|
|
||||||
|
responses, ok := app.responseMap[request.String()]
|
||||||
|
|
||||||
|
if !ok || len(responses) == 0 {
|
||||||
|
http.NotFound(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response := responses[0]
|
||||||
|
app.responseMap[request.String()] = responses[1:]
|
||||||
|
|
||||||
|
responseHeader := w.Header()
|
||||||
|
for k, v := range response.Headers {
|
||||||
|
responseHeader[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(response.StatusCode)
|
||||||
|
|
||||||
|
io.Copy(w, bytes.NewReader(response.Body))
|
||||||
|
}
|
95
common/testutil/tarfile.go
Normal file
95
common/testutil/tarfile.go
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
package testutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
mrand "math/rand"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/tarsum"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateRandomTarFile creates a random tarfile, returning it as an
|
||||||
|
// io.ReadSeeker along with its tarsum. An error is returned if there is a
|
||||||
|
// problem generating valid content.
|
||||||
|
func CreateRandomTarFile() (rs io.ReadSeeker, tarSum string, err error) {
|
||||||
|
nFiles := mrand.Intn(10) + 10
|
||||||
|
target := &bytes.Buffer{}
|
||||||
|
wr := tar.NewWriter(target)
|
||||||
|
|
||||||
|
// Perturb this on each iteration of the loop below.
|
||||||
|
header := &tar.Header{
|
||||||
|
Mode: 0644,
|
||||||
|
ModTime: time.Now(),
|
||||||
|
Typeflag: tar.TypeReg,
|
||||||
|
Uname: "randocalrissian",
|
||||||
|
Gname: "cloudcity",
|
||||||
|
AccessTime: time.Now(),
|
||||||
|
ChangeTime: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
for fileNumber := 0; fileNumber < nFiles; fileNumber++ {
|
||||||
|
fileSize := mrand.Int63n(1<<20) + 1<<20
|
||||||
|
|
||||||
|
header.Name = fmt.Sprint(fileNumber)
|
||||||
|
header.Size = fileSize
|
||||||
|
|
||||||
|
if err := wr.WriteHeader(header); err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
randomData := make([]byte, fileSize)
|
||||||
|
|
||||||
|
// Fill up the buffer with some random data.
|
||||||
|
n, err := rand.Read(randomData)
|
||||||
|
|
||||||
|
if n != len(randomData) {
|
||||||
|
return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
nn, err := io.Copy(wr, bytes.NewReader(randomData))
|
||||||
|
if nn != fileSize {
|
||||||
|
return nil, "", fmt.Errorf("short copy writing random file to tar")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := wr.Flush(); err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := wr.Close(); err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := bytes.NewReader(target.Bytes())
|
||||||
|
|
||||||
|
// A tar builder that supports tarsum inline calculation would be awesome
|
||||||
|
// here.
|
||||||
|
ts, err := tarsum.NewTarSum(reader, true, tarsum.Version1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
nn, err := io.Copy(ioutil.Discard, ts)
|
||||||
|
if nn != int64(len(target.Bytes())) {
|
||||||
|
return nil, "", fmt.Errorf("short copy when getting tarsum of random layer: %v != %v", nn, len(target.Bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return bytes.NewReader(target.Bytes()), ts.Sum(nil), nil
|
||||||
|
}
|
73
configuration/README.md
Normal file
73
configuration/README.md
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
Docker-Registry Configuration
|
||||||
|
=============================
|
||||||
|
|
||||||
|
This document describes the registry configuration model and how to specify a custom configuration with a configuration file and/or environment variables.
|
||||||
|
|
||||||
|
Semantic-ish Versioning
|
||||||
|
-----------------------
|
||||||
|
|
||||||
|
The configuration file is designed with versioning in mind, such that most upgrades will not require a change in configuration files, and such that configuration files can be "upgraded" from one version to another.
|
||||||
|
|
||||||
|
The version is specified as a string of the form `MajorVersion.MinorVersion`, where MajorVersion and MinorVersion are both non-negative integer values. Much like [semantic versioning](http://semver.org/), minor version increases denote inherently backwards-compatible changes, such as the addition of optional fields, whereas major version increases denote a restructuring, such as renaming fields or adding required fields. Because of the explicit version definition in the configuration file, it should be possible to parse old configuration files and port them to the current configuration version, although this is not guaranteed for all future versions.
|
||||||
|
|
||||||
|
File Structure (as of Version 0.1)
|
||||||
|
------------------------------------
|
||||||
|
|
||||||
|
The configuration structure is defined by the `Configuration` struct in `configuration.go`, and is best described by the following two examples:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
version: 0.1
|
||||||
|
loglevel: info
|
||||||
|
storage:
|
||||||
|
s3:
|
||||||
|
region: us-east-1
|
||||||
|
bucket: my-bucket
|
||||||
|
rootpath: /registry
|
||||||
|
encrypt: true
|
||||||
|
secure: false
|
||||||
|
accesskey: SAMPLEACCESSKEY
|
||||||
|
secretkey: SUPERSECRET
|
||||||
|
host: ~
|
||||||
|
port: ~
|
||||||
|
```
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
version: 0.1
|
||||||
|
loglevel: debug
|
||||||
|
storage: inmemory
|
||||||
|
```
|
||||||
|
|
||||||
|
### version
|
||||||
|
The version is expected to remain a top-level field, as to allow for a consistent version check before parsing the remainder of the configuration file.
|
||||||
|
|
||||||
|
### loglevel
|
||||||
|
This specifies the log level of the registry.
|
||||||
|
|
||||||
|
Supported values:
|
||||||
|
* `error`
|
||||||
|
* `warn`
|
||||||
|
* `info`
|
||||||
|
* `debug`
|
||||||
|
|
||||||
|
### storage
|
||||||
|
This specifies the storage driver, and may be provided either as a string (only the driver type) or as a driver name with a parameters map, as seen in the first example above.
|
||||||
|
|
||||||
|
The parameters map will be passed into the factory constructor of the given storage driver type.
|
||||||
|
|
||||||
|
### Notes
|
||||||
|
|
||||||
|
All keys in the configuration file **must** be provided as a string of lowercase letters and numbers only, and values must be string-like (booleans and numerical values are fine to parse as strings).
|
||||||
|
|
||||||
|
Environment Variables
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
To support the workflow of running a docker registry from a standard container without having to modify configuration files, the registry configuration also supports environment variables for overriding fields.
|
||||||
|
|
||||||
|
Any configuration field other than version can be replaced by providing an environment variable of the following form: `REGISTRY_<uppercase key>[_<uppercase key>]...`.
|
||||||
|
|
||||||
|
For example, to change the loglevel to `error`, one can provide `REGISTRY_LOGLEVEL=error`, and to change the s3 storage driver's region parameter to `us-west-1`, one can provide `REGISTRY_STORAGE_S3_LOGLEVEL=us-west-1`.
|
||||||
|
|
||||||
|
### Notes
|
||||||
|
If an environment variable changes a map value into a string, such as replacing the storage driver type with `REGISTRY_STORAGE=filesystem`, then all sub-fields will be erased. As such, specifying the storage type in the environment will remove all parameters related to the old storage configuration.
|
||||||
|
|
||||||
|
By restricting all keys in the configuration file to lowercase letters and numbers, we can avoid any potential environment variable mapping ambiguity.
|
279
configuration/configuration.go
Normal file
279
configuration/configuration.go
Normal file
|
@ -0,0 +1,279 @@
|
||||||
|
package configuration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and
|
||||||
|
// optionally modified by environment variables
|
||||||
|
type Configuration struct {
|
||||||
|
// Version is the version which defines the format of the rest of the configuration
|
||||||
|
Version Version `yaml:"version"`
|
||||||
|
|
||||||
|
// Loglevel is the level at which registry operations are logged
|
||||||
|
Loglevel Loglevel `yaml:"loglevel"`
|
||||||
|
|
||||||
|
// Storage is the configuration for the registry's storage driver
|
||||||
|
Storage Storage `yaml:"storage"`
|
||||||
|
|
||||||
|
// Auth allows configuration of various authorization methods that may be
|
||||||
|
// used to gate requests.
|
||||||
|
Auth Auth `yaml:"auth"`
|
||||||
|
|
||||||
|
// Reporting is the configuration for error reporting
|
||||||
|
Reporting Reporting `yaml:"reporting"`
|
||||||
|
|
||||||
|
// HTTP contains configuration parameters for the registry's http
|
||||||
|
// interface.
|
||||||
|
HTTP struct {
|
||||||
|
// Addr specifies the bind address for the registry instance.
|
||||||
|
Addr string `yaml:"addr"`
|
||||||
|
} `yaml:"http"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// v0_1Configuration is a Version 0.1 Configuration struct
|
||||||
|
// This is currently aliased to Configuration, as it is the current version
|
||||||
|
type v0_1Configuration Configuration
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaler interface
|
||||||
|
// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent uints
|
||||||
|
func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
var versionString string
|
||||||
|
err := unmarshal(&versionString)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
newVersion := Version(versionString)
|
||||||
|
if _, err := newVersion.major(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := newVersion.minor(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*version = newVersion
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentVersion is the most recent Version that can be parsed
|
||||||
|
var CurrentVersion = MajorMinorVersion(0, 1)
|
||||||
|
|
||||||
|
// Loglevel is the level at which operations are logged
|
||||||
|
// This can be error, warn, info, or debug
|
||||||
|
type Loglevel string
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Umarshaler interface
|
||||||
|
// Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a
|
||||||
|
// valid loglevel
|
||||||
|
func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
var loglevelString string
|
||||||
|
err := unmarshal(&loglevelString)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
loglevelString = strings.ToLower(loglevelString)
|
||||||
|
switch loglevelString {
|
||||||
|
case "error", "warn", "info", "debug":
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString)
|
||||||
|
}
|
||||||
|
|
||||||
|
*loglevel = Loglevel(loglevelString)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parameters defines a key-value parameters mapping
|
||||||
|
type Parameters map[string]interface{}
|
||||||
|
|
||||||
|
// Storage defines the configuration for registry object storage
|
||||||
|
type Storage map[string]Parameters
|
||||||
|
|
||||||
|
// Type returns the storage driver type, such as filesystem or s3
|
||||||
|
func (storage Storage) Type() string {
|
||||||
|
// Return only key in this map
|
||||||
|
for k := range storage {
|
||||||
|
return k
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parameters returns the Parameters map for a Storage configuration
|
||||||
|
func (storage Storage) Parameters() Parameters {
|
||||||
|
return storage[storage.Type()]
|
||||||
|
}
|
||||||
|
|
||||||
|
// setParameter changes the parameter at the provided key to the new value
|
||||||
|
func (storage Storage) setParameter(key string, value interface{}) {
|
||||||
|
storage[storage.Type()][key] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaler interface
|
||||||
|
// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters
|
||||||
|
func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
var storageMap map[string]Parameters
|
||||||
|
err := unmarshal(&storageMap)
|
||||||
|
if err == nil {
|
||||||
|
if len(storageMap) > 1 {
|
||||||
|
types := make([]string, 0, len(storageMap))
|
||||||
|
for k := range storageMap {
|
||||||
|
types = append(types, k)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types)
|
||||||
|
}
|
||||||
|
*storage = storageMap
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageType string
|
||||||
|
err = unmarshal(&storageType)
|
||||||
|
if err == nil {
|
||||||
|
*storage = Storage{storageType: Parameters{}}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalYAML implements the yaml.Marshaler interface
|
||||||
|
func (storage Storage) MarshalYAML() (interface{}, error) {
|
||||||
|
if storage.Parameters() == nil {
|
||||||
|
return storage.Type(), nil
|
||||||
|
}
|
||||||
|
return map[string]Parameters(storage), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Auth defines the configuration for registry authorization.
|
||||||
|
type Auth map[string]Parameters
|
||||||
|
|
||||||
|
// Type returns the storage driver type, such as filesystem or s3
|
||||||
|
func (auth Auth) Type() string {
|
||||||
|
// Return only key in this map
|
||||||
|
for k := range auth {
|
||||||
|
return k
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parameters returns the Parameters map for an Auth configuration
|
||||||
|
func (auth Auth) Parameters() Parameters {
|
||||||
|
return auth[auth.Type()]
|
||||||
|
}
|
||||||
|
|
||||||
|
// setParameter changes the parameter at the provided key to the new value
|
||||||
|
func (auth Auth) setParameter(key string, value interface{}) {
|
||||||
|
auth[auth.Type()][key] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaler interface
|
||||||
|
// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters
|
||||||
|
func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
var m map[string]Parameters
|
||||||
|
err := unmarshal(&m)
|
||||||
|
if err == nil {
|
||||||
|
if len(m) > 1 {
|
||||||
|
types := make([]string, 0, len(m))
|
||||||
|
for k := range m {
|
||||||
|
types = append(types, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(stevvooe): May want to change this slightly for
|
||||||
|
// authorization to allow multiple challenges.
|
||||||
|
return fmt.Errorf("must provide exactly one type. Provided: %v", types)
|
||||||
|
|
||||||
|
}
|
||||||
|
*auth = m
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var authType string
|
||||||
|
err = unmarshal(&authType)
|
||||||
|
if err == nil {
|
||||||
|
*auth = Auth{authType: Parameters{}}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalYAML implements the yaml.Marshaler interface
|
||||||
|
func (auth Auth) MarshalYAML() (interface{}, error) {
|
||||||
|
if auth.Parameters() == nil {
|
||||||
|
return auth.Type(), nil
|
||||||
|
}
|
||||||
|
return map[string]Parameters(auth), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reporting defines error reporting methods.
|
||||||
|
type Reporting struct {
|
||||||
|
// Bugsnag configures error reporting for Bugsnag (bugsnag.com).
|
||||||
|
Bugsnag BugsnagReporting `yaml:"bugsnag"`
|
||||||
|
// NewRelic configures error reporting for NewRelic (newrelic.com)
|
||||||
|
NewRelic NewRelicReporting `yaml:"newrelic"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BugsnagReporting configures error reporting for Bugsnag (bugsnag.com).
|
||||||
|
type BugsnagReporting struct {
|
||||||
|
// APIKey is the Bugsnag api key.
|
||||||
|
APIKey string `yaml:"apikey"`
|
||||||
|
// ReleaseStage tracks where the registry is deployed.
|
||||||
|
// Examples: production, staging, development
|
||||||
|
ReleaseStage string `yaml:"releasestage"`
|
||||||
|
// Endpoint is used for specifying an enterprise Bugsnag endpoint.
|
||||||
|
Endpoint string `yaml:"endpoint"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRelicReporting configures error reporting for NewRelic (newrelic.com)
|
||||||
|
type NewRelicReporting struct {
|
||||||
|
// LicenseKey is the NewRelic user license key
|
||||||
|
LicenseKey string `yaml:"licensekey"`
|
||||||
|
// AppName is the component name of the registry in NewRelic
|
||||||
|
Name string `yaml:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse parses an input configuration yaml document into a Configuration struct
|
||||||
|
// This should generally be capable of handling old configuration format versions
|
||||||
|
//
|
||||||
|
// Environment variables may be used to override configuration parameters other than version,
|
||||||
|
// following the scheme below:
|
||||||
|
// Configuration.Abc may be replaced by the value of REGISTRY_ABC,
|
||||||
|
// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth
|
||||||
|
func Parse(rd io.Reader) (*Configuration, error) {
|
||||||
|
in, err := ioutil.ReadAll(rd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p := NewParser("registry", []VersionedParseInfo{
|
||||||
|
{
|
||||||
|
Version: MajorMinorVersion(0, 1),
|
||||||
|
ParseAs: reflect.TypeOf(v0_1Configuration{}),
|
||||||
|
ConversionFunc: func(c interface{}) (interface{}, error) {
|
||||||
|
if v0_1, ok := c.(*v0_1Configuration); ok {
|
||||||
|
if v0_1.Loglevel == Loglevel("") {
|
||||||
|
v0_1.Loglevel = Loglevel("info")
|
||||||
|
}
|
||||||
|
if v0_1.Storage.Type() == "" {
|
||||||
|
return nil, fmt.Errorf("No storage configuration provided")
|
||||||
|
}
|
||||||
|
return (*Configuration)(v0_1), nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
config := new(Configuration)
|
||||||
|
err = p.Parse(in, config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return config, nil
|
||||||
|
}
|
285
configuration/configuration_test.go
Normal file
285
configuration/configuration_test.go
Normal file
|
@ -0,0 +1,285 @@
|
||||||
|
package configuration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
. "gopkg.in/check.v1"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Hook up gocheck into the "go test" runner
|
||||||
|
func Test(t *testing.T) { TestingT(t) }
|
||||||
|
|
||||||
|
// configStruct is a canonical example configuration, which should map to configYamlV0_1
|
||||||
|
var configStruct = Configuration{
|
||||||
|
Version: "0.1",
|
||||||
|
Loglevel: "info",
|
||||||
|
Storage: Storage{
|
||||||
|
"s3": Parameters{
|
||||||
|
"region": "us-east-1",
|
||||||
|
"bucket": "my-bucket",
|
||||||
|
"rootpath": "/registry",
|
||||||
|
"encrypt": true,
|
||||||
|
"secure": false,
|
||||||
|
"accesskey": "SAMPLEACCESSKEY",
|
||||||
|
"secretkey": "SUPERSECRET",
|
||||||
|
"host": nil,
|
||||||
|
"port": 42,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Auth: Auth{
|
||||||
|
"silly": Parameters{
|
||||||
|
"realm": "silly",
|
||||||
|
"service": "silly",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Reporting: Reporting{
|
||||||
|
Bugsnag: BugsnagReporting{
|
||||||
|
APIKey: "BugsnagApiKey",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// configYamlV0_1 is a Version 0.1 yaml document representing configStruct
|
||||||
|
var configYamlV0_1 = `
|
||||||
|
version: 0.1
|
||||||
|
loglevel: info
|
||||||
|
storage:
|
||||||
|
s3:
|
||||||
|
region: us-east-1
|
||||||
|
bucket: my-bucket
|
||||||
|
rootpath: /registry
|
||||||
|
encrypt: true
|
||||||
|
secure: false
|
||||||
|
accesskey: SAMPLEACCESSKEY
|
||||||
|
secretkey: SUPERSECRET
|
||||||
|
host: ~
|
||||||
|
port: 42
|
||||||
|
auth:
|
||||||
|
silly:
|
||||||
|
realm: silly
|
||||||
|
service: silly
|
||||||
|
reporting:
|
||||||
|
bugsnag:
|
||||||
|
apikey: BugsnagApiKey
|
||||||
|
`
|
||||||
|
|
||||||
|
// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory
|
||||||
|
// storage driver with no parameters
|
||||||
|
var inmemoryConfigYamlV0_1 = `
|
||||||
|
version: 0.1
|
||||||
|
loglevel: info
|
||||||
|
storage: inmemory
|
||||||
|
auth:
|
||||||
|
silly:
|
||||||
|
realm: silly
|
||||||
|
service: silly
|
||||||
|
`
|
||||||
|
|
||||||
|
type ConfigSuite struct {
|
||||||
|
expectedConfig *Configuration
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = Suite(new(ConfigSuite))
|
||||||
|
|
||||||
|
func (suite *ConfigSuite) SetUpTest(c *C) {
|
||||||
|
os.Clearenv()
|
||||||
|
suite.expectedConfig = copyConfig(configStruct)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestMarshalRoundtrip validates that configStruct can be marshaled and
|
||||||
|
// unmarshaled without changing any parameters
|
||||||
|
func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) {
|
||||||
|
configBytes, err := yaml.Marshal(suite.expectedConfig)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
config, err := Parse(bytes.NewReader(configBytes))
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestParseSimple validates that configYamlV0_1 can be parsed into a struct
|
||||||
|
// matching configStruct
|
||||||
|
func (suite *ConfigSuite) TestParseSimple(c *C) {
|
||||||
|
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestParseInmemory validates that configuration yaml with storage provided as
|
||||||
|
// a string can be parsed into a Configuration struct with no storage parameters
|
||||||
|
func (suite *ConfigSuite) TestParseInmemory(c *C) {
|
||||||
|
suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}}
|
||||||
|
suite.expectedConfig.Reporting = Reporting{}
|
||||||
|
|
||||||
|
config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1)))
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestParseIncomplete validates that an incomplete yaml configuration cannot
|
||||||
|
// be parsed without providing environment variables to fill in the missing
|
||||||
|
// components.
|
||||||
|
func (suite *ConfigSuite) TestParseIncomplete(c *C) {
|
||||||
|
incompleteConfigYaml := "version: 0.1"
|
||||||
|
_, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml)))
|
||||||
|
c.Assert(err, NotNil)
|
||||||
|
|
||||||
|
suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}}
|
||||||
|
suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}}
|
||||||
|
suite.expectedConfig.Reporting = Reporting{}
|
||||||
|
|
||||||
|
os.Setenv("REGISTRY_STORAGE", "filesystem")
|
||||||
|
os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot")
|
||||||
|
os.Setenv("REGISTRY_AUTH", "silly")
|
||||||
|
os.Setenv("REGISTRY_AUTH_SILLY_REALM", "silly")
|
||||||
|
|
||||||
|
config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml)))
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestParseWithSameEnvStorage validates that providing environment variables
|
||||||
|
// that match the given storage type will only include environment-defined
|
||||||
|
// parameters and remove yaml-defined parameters
|
||||||
|
func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) {
|
||||||
|
suite.expectedConfig.Storage = Storage{"s3": Parameters{"region": "us-east-1"}}
|
||||||
|
|
||||||
|
os.Setenv("REGISTRY_STORAGE", "s3")
|
||||||
|
os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1")
|
||||||
|
|
||||||
|
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestParseWithDifferentEnvStorageParams validates that providing environment variables that change
|
||||||
|
// and add to the given storage parameters will change and add parameters to the parsed
|
||||||
|
// Configuration struct
|
||||||
|
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) {
|
||||||
|
suite.expectedConfig.Storage.setParameter("region", "us-west-1")
|
||||||
|
suite.expectedConfig.Storage.setParameter("secure", true)
|
||||||
|
suite.expectedConfig.Storage.setParameter("newparam", "some Value")
|
||||||
|
|
||||||
|
os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-west-1")
|
||||||
|
os.Setenv("REGISTRY_STORAGE_S3_SECURE", "true")
|
||||||
|
os.Setenv("REGISTRY_STORAGE_S3_NEWPARAM", "some Value")
|
||||||
|
|
||||||
|
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestParseWithDifferentEnvStorageType validates that providing an environment variable that
|
||||||
|
// changes the storage type will be reflected in the parsed Configuration struct
|
||||||
|
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) {
|
||||||
|
suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}}
|
||||||
|
|
||||||
|
os.Setenv("REGISTRY_STORAGE", "inmemory")
|
||||||
|
|
||||||
|
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestParseWithDifferentEnvStorageTypeAndParams validates that providing an environment variable
|
||||||
|
// that changes the storage type will be reflected in the parsed Configuration struct and that
|
||||||
|
// environment storage parameters will also be included
|
||||||
|
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) {
|
||||||
|
suite.expectedConfig.Storage = Storage{"filesystem": Parameters{}}
|
||||||
|
suite.expectedConfig.Storage.setParameter("rootdirectory", "/tmp/testroot")
|
||||||
|
|
||||||
|
os.Setenv("REGISTRY_STORAGE", "filesystem")
|
||||||
|
os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot")
|
||||||
|
|
||||||
|
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestParseWithSameEnvLoglevel validates that providing an environment variable defining the log
|
||||||
|
// level to the same as the one provided in the yaml will not change the parsed Configuration struct
|
||||||
|
func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) {
|
||||||
|
os.Setenv("REGISTRY_LOGLEVEL", "info")
|
||||||
|
|
||||||
|
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the
|
||||||
|
// log level will override the value provided in the yaml document
|
||||||
|
func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) {
|
||||||
|
suite.expectedConfig.Loglevel = "error"
|
||||||
|
|
||||||
|
os.Setenv("REGISTRY_LOGLEVEL", "error")
|
||||||
|
|
||||||
|
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestParseInvalidLoglevel validates that the parser will fail to parse a
|
||||||
|
// configuration if the loglevel is malformed
|
||||||
|
func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) {
|
||||||
|
invalidConfigYaml := "version: 0.1\nloglevel: derp\nstorage: inmemory"
|
||||||
|
_, err := Parse(bytes.NewReader([]byte(invalidConfigYaml)))
|
||||||
|
c.Assert(err, NotNil)
|
||||||
|
|
||||||
|
os.Setenv("REGISTRY_LOGLEVEL", "derp")
|
||||||
|
|
||||||
|
_, err = Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||||
|
c.Assert(err, NotNil)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestParseWithDifferentEnvReporting validates that environment variables
|
||||||
|
// properly override reporting parameters
|
||||||
|
func (suite *ConfigSuite) TestParseWithDifferentEnvReporting(c *C) {
|
||||||
|
suite.expectedConfig.Reporting.Bugsnag.APIKey = "anotherBugsnagApiKey"
|
||||||
|
suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080"
|
||||||
|
suite.expectedConfig.Reporting.NewRelic.LicenseKey = "NewRelicLicenseKey"
|
||||||
|
suite.expectedConfig.Reporting.NewRelic.Name = "some NewRelic NAME"
|
||||||
|
|
||||||
|
os.Setenv("REGISTRY_REPORTING_BUGSNAG_APIKEY", "anotherBugsnagApiKey")
|
||||||
|
os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080")
|
||||||
|
os.Setenv("REGISTRY_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey")
|
||||||
|
os.Setenv("REGISTRY_REPORTING_NEWRELIC_NAME", "some NewRelic NAME")
|
||||||
|
|
||||||
|
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestParseInvalidVersion validates that the parser will fail to parse a newer configuration
|
||||||
|
// version than the CurrentVersion
|
||||||
|
func (suite *ConfigSuite) TestParseInvalidVersion(c *C) {
|
||||||
|
suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1)
|
||||||
|
configBytes, err := yaml.Marshal(suite.expectedConfig)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
_, err = Parse(bytes.NewReader(configBytes))
|
||||||
|
c.Assert(err, NotNil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyConfig(config Configuration) *Configuration {
|
||||||
|
configCopy := new(Configuration)
|
||||||
|
|
||||||
|
configCopy.Version = MajorMinorVersion(config.Version.Major(), config.Version.Minor())
|
||||||
|
configCopy.Loglevel = config.Loglevel
|
||||||
|
configCopy.Storage = Storage{config.Storage.Type(): Parameters{}}
|
||||||
|
for k, v := range config.Storage.Parameters() {
|
||||||
|
configCopy.Storage.setParameter(k, v)
|
||||||
|
}
|
||||||
|
configCopy.Reporting = Reporting{
|
||||||
|
Bugsnag: BugsnagReporting{config.Reporting.Bugsnag.APIKey, config.Reporting.Bugsnag.ReleaseStage, config.Reporting.Bugsnag.Endpoint},
|
||||||
|
NewRelic: NewRelicReporting{config.Reporting.NewRelic.LicenseKey, config.Reporting.NewRelic.Name},
|
||||||
|
}
|
||||||
|
|
||||||
|
configCopy.Auth = Auth{config.Auth.Type(): Parameters{}}
|
||||||
|
for k, v := range config.Auth.Parameters() {
|
||||||
|
configCopy.Auth.setParameter(k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return configCopy
|
||||||
|
}
|
203
configuration/parser.go
Normal file
203
configuration/parser.go
Normal file
|
@ -0,0 +1,203 @@
|
||||||
|
package configuration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"gopkg.in/BrianBland/yaml.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Version is a major/minor version pair of the form Major.Minor
|
||||||
|
// Major version upgrades indicate structure or type changes
|
||||||
|
// Minor version upgrades should be strictly additive
|
||||||
|
type Version string
|
||||||
|
|
||||||
|
// MajorMinorVersion constructs a Version from its Major and Minor components
|
||||||
|
func MajorMinorVersion(major, minor uint) Version {
|
||||||
|
return Version(fmt.Sprintf("%d.%d", major, minor))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (version Version) major() (uint, error) {
|
||||||
|
majorPart := strings.Split(string(version), ".")[0]
|
||||||
|
major, err := strconv.ParseUint(majorPart, 10, 0)
|
||||||
|
return uint(major), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Major returns the major version portion of a Version
|
||||||
|
func (version Version) Major() uint {
|
||||||
|
major, _ := version.major()
|
||||||
|
return major
|
||||||
|
}
|
||||||
|
|
||||||
|
func (version Version) minor() (uint, error) {
|
||||||
|
minorPart := strings.Split(string(version), ".")[1]
|
||||||
|
minor, err := strconv.ParseUint(minorPart, 10, 0)
|
||||||
|
return uint(minor), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Minor returns the minor version portion of a Version
|
||||||
|
func (version Version) Minor() uint {
|
||||||
|
minor, _ := version.minor()
|
||||||
|
return minor
|
||||||
|
}
|
||||||
|
|
||||||
|
// VersionedParseInfo defines how a specific version of a configuration should
|
||||||
|
// be parsed into the current version
|
||||||
|
type VersionedParseInfo struct {
|
||||||
|
// Version is the version which this parsing information relates to
|
||||||
|
Version Version
|
||||||
|
// ParseAs defines the type which a configuration file of this version
|
||||||
|
// should be parsed into
|
||||||
|
ParseAs reflect.Type
|
||||||
|
// ConversionFunc defines a method for converting the parsed configuration
|
||||||
|
// (of type ParseAs) into the current configuration version
|
||||||
|
// Note: this method signature is very unclear with the absence of generics
|
||||||
|
ConversionFunc func(interface{}) (interface{}, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parser can be used to parse a configuration file and environment of a defined
|
||||||
|
// version into a unified output structure
|
||||||
|
type Parser struct {
|
||||||
|
prefix string
|
||||||
|
mapping map[Version]VersionedParseInfo
|
||||||
|
env map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewParser returns a *Parser with the given environment prefix which handles
|
||||||
|
// versioned configurations which match the given parseInfos
|
||||||
|
func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser {
|
||||||
|
p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo), env: make(map[string]string)}
|
||||||
|
|
||||||
|
for _, parseInfo := range parseInfos {
|
||||||
|
p.mapping[parseInfo.Version] = parseInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, env := range os.Environ() {
|
||||||
|
envParts := strings.SplitN(env, "=", 2)
|
||||||
|
p.env[envParts[0]] = envParts[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return &p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse reads in the given []byte and environment and writes the resulting
|
||||||
|
// configuration into the input v
|
||||||
|
//
|
||||||
|
// Environment variables may be used to override configuration parameters other
|
||||||
|
// than version, following the scheme below:
|
||||||
|
// v.Abc may be replaced by the value of PREFIX_ABC,
|
||||||
|
// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth
|
||||||
|
func (p *Parser) Parse(in []byte, v interface{}) error {
|
||||||
|
var versionedStruct struct {
|
||||||
|
Version Version
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := yaml.Unmarshal(in, &versionedStruct); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
parseInfo, ok := p.mapping[versionedStruct.Version]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Unsupported version: %q", versionedStruct.Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
parseAs := reflect.New(parseInfo.ParseAs)
|
||||||
|
err := yaml.Unmarshal(in, parseAs.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = p.overwriteFields(parseAs, p.prefix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := parseInfo.ConversionFunc(parseAs.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) overwriteFields(v reflect.Value, prefix string) error {
|
||||||
|
for v.Kind() == reflect.Ptr {
|
||||||
|
v = reflect.Indirect(v)
|
||||||
|
}
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
for i := 0; i < v.NumField(); i++ {
|
||||||
|
sf := v.Type().Field(i)
|
||||||
|
fieldPrefix := strings.ToUpper(prefix + "_" + sf.Name)
|
||||||
|
if e, ok := p.env[fieldPrefix]; ok {
|
||||||
|
fieldVal := reflect.New(sf.Type)
|
||||||
|
err := yaml.Unmarshal([]byte(e), fieldVal.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
v.Field(i).Set(reflect.Indirect(fieldVal))
|
||||||
|
}
|
||||||
|
err := p.overwriteFields(v.Field(i), fieldPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
p.overwriteMap(v, prefix)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) overwriteMap(m reflect.Value, prefix string) error {
|
||||||
|
switch m.Type().Elem().Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
for _, k := range m.MapKeys() {
|
||||||
|
err := p.overwriteFields(m.MapIndex(k), strings.ToUpper(fmt.Sprintf("%s_%s", prefix, k)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
envMapRegexp, err := regexp.Compile(fmt.Sprintf("^%s_([A-Z0-9]+)$", strings.ToUpper(prefix)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for key, val := range p.env {
|
||||||
|
if submatches := envMapRegexp.FindStringSubmatch(key); submatches != nil {
|
||||||
|
mapValue := reflect.New(m.Type().Elem())
|
||||||
|
err := yaml.Unmarshal([]byte(val), mapValue.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.SetMapIndex(reflect.ValueOf(strings.ToLower(submatches[1])), reflect.Indirect(mapValue))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
for _, k := range m.MapKeys() {
|
||||||
|
err := p.overwriteMap(m.MapIndex(k), strings.ToUpper(fmt.Sprintf("%s_%s", prefix, k)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
envMapRegexp, err := regexp.Compile(fmt.Sprintf("^%s_([A-Z0-9]+)$", strings.ToUpper(prefix)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, val := range p.env {
|
||||||
|
if submatches := envMapRegexp.FindStringSubmatch(key); submatches != nil {
|
||||||
|
mapValue := reflect.New(m.Type().Elem())
|
||||||
|
err := yaml.Unmarshal([]byte(val), mapValue.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.SetMapIndex(reflect.ValueOf(strings.ToLower(submatches[1])), reflect.Indirect(mapValue))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
32
context.go
Normal file
32
context.go
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker-registry/api/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Context should contain the request specific context for use in across
|
||||||
|
// handlers. Resources that don't need to be shared across handlers should not
|
||||||
|
// be on this object.
|
||||||
|
type Context struct {
|
||||||
|
// App points to the application structure that created this context.
|
||||||
|
*App
|
||||||
|
|
||||||
|
// Name is the prefix for the current request. Corresponds to the
|
||||||
|
// namespace/repository associated with the image.
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// Errors is a collection of errors encountered during the request to be
|
||||||
|
// returned to the client API. If errors are added to the collection, the
|
||||||
|
// handler *must not* start the response via http.ResponseWriter.
|
||||||
|
Errors v2.Errors
|
||||||
|
|
||||||
|
// vars contains the extracted gorilla/mux variables that can be used for
|
||||||
|
// assignment.
|
||||||
|
vars map[string]string
|
||||||
|
|
||||||
|
// log provides a context specific logger.
|
||||||
|
log *logrus.Entry
|
||||||
|
|
||||||
|
urlBuilder *v2.URLBuilder
|
||||||
|
}
|
155
digest/digest.go
Normal file
155
digest/digest.go
Normal file
|
@ -0,0 +1,155 @@
|
||||||
|
package digest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/sha256"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/common"
|
||||||
|
"github.com/docker/docker/pkg/tarsum"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Digest allows simple protection of hex formatted digest strings, prefixed
|
||||||
|
// by their algorithm. Strings of type Digest have some guarantee of being in
|
||||||
|
// the correct format and it provides quick access to the components of a
|
||||||
|
// digest string.
|
||||||
|
//
|
||||||
|
// The following is an example of the contents of Digest types:
|
||||||
|
//
|
||||||
|
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||||
|
//
|
||||||
|
// More important for this code base, this type is compatible with tarsum
|
||||||
|
// digests. For example, the following would be a valid Digest:
|
||||||
|
//
|
||||||
|
// tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b
|
||||||
|
//
|
||||||
|
// This allows to abstract the digest behind this type and work only in those
|
||||||
|
// terms.
|
||||||
|
type Digest string
|
||||||
|
|
||||||
|
// NewDigest returns a Digest from alg and a hash.Hash object.
|
||||||
|
func NewDigest(alg string, h hash.Hash) Digest {
|
||||||
|
return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil)))
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrDigestInvalidFormat returned when digest format invalid.
|
||||||
|
ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
|
||||||
|
|
||||||
|
// ErrDigestUnsupported returned when the digest algorithm is unsupported by registry.
|
||||||
|
ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseDigest parses s and returns the validated digest object. An error will
|
||||||
|
// be returned if the format is invalid.
|
||||||
|
func ParseDigest(s string) (Digest, error) {
|
||||||
|
d := Digest(s)
|
||||||
|
|
||||||
|
return d, d.Validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromReader returns the most valid digest for the underlying content.
|
||||||
|
func FromReader(rd io.Reader) (Digest, error) {
|
||||||
|
|
||||||
|
// TODO(stevvooe): This is pretty inefficient to always be calculating a
|
||||||
|
// sha256 hash to provide fallback, but it provides some nice semantics in
|
||||||
|
// that we never worry about getting the right digest for a given reader.
|
||||||
|
// For the most part, we can detect tar vs non-tar with only a few bytes,
|
||||||
|
// so a scheme that saves those bytes would probably be better here.
|
||||||
|
|
||||||
|
h := sha256.New()
|
||||||
|
tr := io.TeeReader(rd, h)
|
||||||
|
|
||||||
|
ts, err := tarsum.NewTarSum(tr, true, tarsum.Version1)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to copy from the tarsum, if we fail, copy the remaining bytes into
|
||||||
|
// hash directly.
|
||||||
|
if _, err := io.Copy(ioutil.Discard, ts); err != nil {
|
||||||
|
if err.Error() != "archive/tar: invalid tar header" {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(h, rd); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewDigest("sha256", h), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d, err := ParseDigest(ts.Sum(nil))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromBytes digests the input and returns a Digest.
|
||||||
|
func FromBytes(p []byte) (Digest, error) {
|
||||||
|
return FromReader(bytes.NewReader(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks that the contents of d is a valid digest, returning an
|
||||||
|
// error if not.
|
||||||
|
func (d Digest) Validate() error {
|
||||||
|
s := string(d)
|
||||||
|
// Common case will be tarsum
|
||||||
|
_, err := common.ParseTarSum(s)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Continue on for general parser
|
||||||
|
|
||||||
|
i := strings.Index(s, ":")
|
||||||
|
if i < 0 {
|
||||||
|
return ErrDigestInvalidFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
// case: "sha256:" with no hex.
|
||||||
|
if i+1 == len(s) {
|
||||||
|
return ErrDigestInvalidFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
switch s[:i] {
|
||||||
|
case "md5", "sha1", "sha256":
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
return ErrDigestUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Algorithm returns the algorithm portion of the digest. This will panic if
|
||||||
|
// the underlying digest is not in a valid format.
|
||||||
|
func (d Digest) Algorithm() string {
|
||||||
|
return string(d[:d.sepIndex()])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hex returns the hex digest portion of the digest. This will panic if the
|
||||||
|
// underlying digest is not in a valid format.
|
||||||
|
func (d Digest) Hex() string {
|
||||||
|
return string(d[d.sepIndex()+1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Digest) String() string {
|
||||||
|
return string(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Digest) sepIndex() int {
|
||||||
|
i := strings.Index(string(d), ":")
|
||||||
|
|
||||||
|
if i < 0 {
|
||||||
|
panic("invalid digest: " + d)
|
||||||
|
}
|
||||||
|
|
||||||
|
return i
|
||||||
|
}
|
80
digest/digest_test.go
Normal file
80
digest/digest_test.go
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
package digest
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestParseDigest(t *testing.T) {
|
||||||
|
for _, testcase := range []struct {
|
||||||
|
input string
|
||||||
|
err error
|
||||||
|
algorithm string
|
||||||
|
hex string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
input: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||||
|
algorithm: "tarsum+sha256",
|
||||||
|
hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "tarsum.dev+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||||
|
algorithm: "tarsum.dev+sha256",
|
||||||
|
hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e",
|
||||||
|
algorithm: "tarsum.v1+sha256",
|
||||||
|
hex: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||||
|
algorithm: "sha256",
|
||||||
|
hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "md5:d41d8cd98f00b204e9800998ecf8427e",
|
||||||
|
algorithm: "md5",
|
||||||
|
hex: "d41d8cd98f00b204e9800998ecf8427e",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// empty hex
|
||||||
|
input: "sha256:",
|
||||||
|
err: ErrDigestInvalidFormat,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// just hex
|
||||||
|
input: "d41d8cd98f00b204e9800998ecf8427e",
|
||||||
|
err: ErrDigestInvalidFormat,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "foo:d41d8cd98f00b204e9800998ecf8427e",
|
||||||
|
err: ErrDigestUnsupported,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
digest, err := ParseDigest(testcase.input)
|
||||||
|
if err != testcase.err {
|
||||||
|
t.Fatalf("error differed from expected while parsing %q: %v != %v", testcase.input, err, testcase.err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if testcase.err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if digest.Algorithm() != testcase.algorithm {
|
||||||
|
t.Fatalf("incorrect algorithm for parsed digest: %q != %q", digest.Algorithm(), testcase.algorithm)
|
||||||
|
}
|
||||||
|
|
||||||
|
if digest.Hex() != testcase.hex {
|
||||||
|
t.Fatalf("incorrect hex for parsed digest: %q != %q", digest.Hex(), testcase.hex)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse string return value and check equality
|
||||||
|
newParsed, err := ParseDigest(digest.String())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error parsing input %q: %v", testcase.input, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if newParsed != digest {
|
||||||
|
t.Fatalf("expected equal: %q != %q", newParsed, digest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
52
digest/doc.go
Normal file
52
digest/doc.go
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
// Package digest provides a generalized type to opaquely represent message
|
||||||
|
// digests and their operations within the registry. The Digest type is
|
||||||
|
// designed to serve as a flexible identifier in a content-addressable system.
|
||||||
|
// More importantly, it provides tools and wrappers to work with tarsums and
|
||||||
|
// hash.Hash-based digests with little effort.
|
||||||
|
//
|
||||||
|
// Basics
|
||||||
|
//
|
||||||
|
// The format of a digest is simply a string with two parts, dubbed the
|
||||||
|
// "algorithm" and the "digest", separated by a colon:
|
||||||
|
//
|
||||||
|
// <algorithm>:<digest>
|
||||||
|
//
|
||||||
|
// An example of a sha256 digest representation follows:
|
||||||
|
//
|
||||||
|
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||||
|
//
|
||||||
|
// In this case, the string "sha256" is the algorithm and the hex bytes are
|
||||||
|
// the "digest". A tarsum example will be more illustrative of the use case
|
||||||
|
// involved in the registry:
|
||||||
|
//
|
||||||
|
// tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b
|
||||||
|
//
|
||||||
|
// For this, we consider the algorithm to be "tarsum+sha256". Prudent
|
||||||
|
// applications will favor the ParseDigest function to verify the format over
|
||||||
|
// using simple type casts. However, a normal string can be cast as a digest
|
||||||
|
// with a simple type conversion:
|
||||||
|
//
|
||||||
|
// Digest("tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b")
|
||||||
|
//
|
||||||
|
// Because the Digest type is simply a string, once a valid Digest is
|
||||||
|
// obtained, comparisons are cheap, quick and simple to express with the
|
||||||
|
// standard equality operator.
|
||||||
|
//
|
||||||
|
// Verification
|
||||||
|
//
|
||||||
|
// The main benefit of using the Digest type is simple verification against a
|
||||||
|
// given digest. The Verifier interface, modeled after the stdlib hash.Hash
|
||||||
|
// interface, provides a common write sink for digest verification. After
|
||||||
|
// writing is complete, calling the Verifier.Verified method will indicate
|
||||||
|
// whether or not the stream of bytes matches the target digest.
|
||||||
|
//
|
||||||
|
// Missing Features
|
||||||
|
//
|
||||||
|
// In addition to the above, we intend to add the following features to this
|
||||||
|
// package:
|
||||||
|
//
|
||||||
|
// 1. A Digester type that supports write sink digest calculation.
|
||||||
|
//
|
||||||
|
// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
|
||||||
|
//
|
||||||
|
package digest
|
135
digest/verifiers.go
Normal file
135
digest/verifiers.go
Normal file
|
@ -0,0 +1,135 @@
|
||||||
|
package digest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/md5"
|
||||||
|
"crypto/sha1"
|
||||||
|
"crypto/sha256"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/tarsum"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Verifier presents a general verification interface to be used with message
|
||||||
|
// digests and other byte stream verifications. Users instantiate a Verifier
|
||||||
|
// from one of the various methods, write the data under test to it then check
|
||||||
|
// the result with the Verified method.
|
||||||
|
type Verifier interface {
|
||||||
|
io.Writer
|
||||||
|
|
||||||
|
// Verified will return true if the content written to Verifier matches
|
||||||
|
// the digest.
|
||||||
|
Verified() bool
|
||||||
|
|
||||||
|
// Planned methods:
|
||||||
|
// Err() error
|
||||||
|
// Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDigestVerifier returns a verifier that compares the written bytes
|
||||||
|
// against a passed in digest.
|
||||||
|
func NewDigestVerifier(d Digest) Verifier {
|
||||||
|
alg := d.Algorithm()
|
||||||
|
switch alg {
|
||||||
|
case "md5", "sha1", "sha256":
|
||||||
|
return hashVerifier{
|
||||||
|
hash: newHash(alg),
|
||||||
|
digest: d,
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// Assume we have a tarsum.
|
||||||
|
version, err := tarsum.GetVersionFromTarsum(string(d))
|
||||||
|
if err != nil {
|
||||||
|
panic(err) // Always assume valid tarsum at this point.
|
||||||
|
}
|
||||||
|
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
|
||||||
|
// TODO(stevvooe): We may actually want to ban the earlier versions of
|
||||||
|
// tarsum. That decision may not be the place of the verifier.
|
||||||
|
|
||||||
|
ts, err := tarsum.NewTarSum(pr, true, version)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(sday): Ick! A goroutine per digest verification? We'll have to
|
||||||
|
// get the tarsum library to export an io.Writer variant.
|
||||||
|
go func() {
|
||||||
|
io.Copy(ioutil.Discard, ts)
|
||||||
|
pw.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
return &tarsumVerifier{
|
||||||
|
digest: d,
|
||||||
|
ts: ts,
|
||||||
|
pr: pr,
|
||||||
|
pw: pw,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLengthVerifier returns a verifier that returns true when the number of
|
||||||
|
// read bytes equals the expected parameter.
|
||||||
|
func NewLengthVerifier(expected int64) Verifier {
|
||||||
|
return &lengthVerifier{
|
||||||
|
expected: expected,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type lengthVerifier struct {
|
||||||
|
expected int64 // expected bytes read
|
||||||
|
len int64 // bytes read
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lv *lengthVerifier) Write(p []byte) (n int, err error) {
|
||||||
|
n = len(p)
|
||||||
|
lv.len += int64(n)
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lv *lengthVerifier) Verified() bool {
|
||||||
|
return lv.expected == lv.len
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHash(name string) hash.Hash {
|
||||||
|
switch name {
|
||||||
|
case "sha256":
|
||||||
|
return sha256.New()
|
||||||
|
case "sha1":
|
||||||
|
return sha1.New()
|
||||||
|
case "md5":
|
||||||
|
return md5.New()
|
||||||
|
default:
|
||||||
|
panic("unsupport algorithm: " + name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type hashVerifier struct {
|
||||||
|
digest Digest
|
||||||
|
hash hash.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hv hashVerifier) Write(p []byte) (n int, err error) {
|
||||||
|
return hv.hash.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hv hashVerifier) Verified() bool {
|
||||||
|
return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
type tarsumVerifier struct {
|
||||||
|
digest Digest
|
||||||
|
ts tarsum.TarSum
|
||||||
|
pr *io.PipeReader
|
||||||
|
pw *io.PipeWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tv *tarsumVerifier) Write(p []byte) (n int, err error) {
|
||||||
|
return tv.pw.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tv *tarsumVerifier) Verified() bool {
|
||||||
|
return tv.digest == Digest(tv.ts.Sum(nil))
|
||||||
|
}
|
71
digest/verifiers_test.go
Normal file
71
digest/verifiers_test.go
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
package digest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/common/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDigestVerifier(t *testing.T) {
|
||||||
|
p := make([]byte, 1<<20)
|
||||||
|
rand.Read(p)
|
||||||
|
digest, err := FromBytes(p)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error digesting bytes: %#v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
verifier := NewDigestVerifier(digest)
|
||||||
|
io.Copy(verifier, bytes.NewReader(p))
|
||||||
|
|
||||||
|
if !verifier.Verified() {
|
||||||
|
t.Fatalf("bytes not verified")
|
||||||
|
}
|
||||||
|
|
||||||
|
tf, tarSum, err := testutil.CreateRandomTarFile()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating tarfile: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
digest, err = FromReader(tf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error digesting tarsum: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if digest.String() != tarSum {
|
||||||
|
t.Fatalf("unexpected digest: %q != %q", digest.String(), tarSum)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedSize, _ := tf.Seek(0, os.SEEK_END) // Get tar file size
|
||||||
|
tf.Seek(0, os.SEEK_SET) // seek back
|
||||||
|
|
||||||
|
// This is the most relevant example for the registry application. It's
|
||||||
|
// effectively a read through pipeline, where the final sink is the digest
|
||||||
|
// verifier.
|
||||||
|
verifier = NewDigestVerifier(digest)
|
||||||
|
lengthVerifier := NewLengthVerifier(expectedSize)
|
||||||
|
rd := io.TeeReader(tf, lengthVerifier)
|
||||||
|
io.Copy(verifier, rd)
|
||||||
|
|
||||||
|
if !lengthVerifier.Verified() {
|
||||||
|
t.Fatalf("verifier detected incorrect length")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !verifier.Verified() {
|
||||||
|
t.Fatalf("bytes not verified")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(stevvooe): Add benchmarks to measure bytes/second throughput for
|
||||||
|
// DigestVerifier. We should be tarsum/gzip limited for common cases but we
|
||||||
|
// want to verify this.
|
||||||
|
//
|
||||||
|
// The relevant benchmarks for comparison can be run with the following
|
||||||
|
// commands:
|
||||||
|
//
|
||||||
|
// go test -bench . crypto/sha1
|
||||||
|
// go test -bench . github.com/docker/docker/pkg/tarsum
|
||||||
|
//
|
32
helpers.go
Normal file
32
helpers.go
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// serveJSON marshals v and sets the content-type header to
|
||||||
|
// 'application/json'. If a different status code is required, call
|
||||||
|
// ResponseWriter.WriteHeader before this function.
|
||||||
|
func serveJSON(w http.ResponseWriter, v interface{}) error {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
enc := json.NewEncoder(w)
|
||||||
|
|
||||||
|
if err := enc.Encode(v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// closeResources closes all the provided resources after running the target
|
||||||
|
// handler.
|
||||||
|
func closeResources(handler http.Handler, closers ...io.Closer) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
for _, closer := range closers {
|
||||||
|
defer closer.Close()
|
||||||
|
}
|
||||||
|
handler.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
113
images.go
Normal file
113
images.go
Normal file
|
@ -0,0 +1,113 @@
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/api/v2"
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
"github.com/docker/docker-registry/storage"
|
||||||
|
"github.com/gorilla/handlers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// imageManifestDispatcher takes the request context and builds the
|
||||||
|
// appropriate handler for handling image manifest requests.
|
||||||
|
func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler {
|
||||||
|
imageManifestHandler := &imageManifestHandler{
|
||||||
|
Context: ctx,
|
||||||
|
Tag: ctx.vars["tag"],
|
||||||
|
}
|
||||||
|
|
||||||
|
imageManifestHandler.log = imageManifestHandler.log.WithField("tag", imageManifestHandler.Tag)
|
||||||
|
|
||||||
|
return handlers.MethodHandler{
|
||||||
|
"GET": http.HandlerFunc(imageManifestHandler.GetImageManifest),
|
||||||
|
"PUT": http.HandlerFunc(imageManifestHandler.PutImageManifest),
|
||||||
|
"DELETE": http.HandlerFunc(imageManifestHandler.DeleteImageManifest),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// imageManifestHandler handles http operations on image manifests.
|
||||||
|
type imageManifestHandler struct {
|
||||||
|
*Context
|
||||||
|
|
||||||
|
Tag string
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetImageManifest fetches the image manifest from the storage backend, if it exists.
|
||||||
|
func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) {
|
||||||
|
manifests := imh.services.Manifests()
|
||||||
|
manifest, err := manifests.Get(imh.Name, imh.Tag)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
imh.Errors.Push(v2.ErrorCodeManifestUnknown, err)
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.Header().Set("Content-Length", fmt.Sprint(len(manifest.Raw)))
|
||||||
|
w.Write(manifest.Raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutImageManifest validates and stores and image in the registry.
|
||||||
|
func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) {
|
||||||
|
manifests := imh.services.Manifests()
|
||||||
|
dec := json.NewDecoder(r.Body)
|
||||||
|
|
||||||
|
var manifest storage.SignedManifest
|
||||||
|
if err := dec.Decode(&manifest); err != nil {
|
||||||
|
imh.Errors.Push(v2.ErrorCodeManifestInvalid, err)
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := manifests.Put(imh.Name, imh.Tag, &manifest); err != nil {
|
||||||
|
// TODO(stevvooe): These error handling switches really need to be
|
||||||
|
// handled by an app global mapper.
|
||||||
|
switch err := err.(type) {
|
||||||
|
case storage.ErrManifestVerification:
|
||||||
|
for _, verificationError := range err {
|
||||||
|
switch verificationError := verificationError.(type) {
|
||||||
|
case storage.ErrUnknownLayer:
|
||||||
|
imh.Errors.Push(v2.ErrorCodeBlobUnknown, verificationError.FSLayer)
|
||||||
|
case storage.ErrManifestUnverified:
|
||||||
|
imh.Errors.Push(v2.ErrorCodeManifestUnverified)
|
||||||
|
default:
|
||||||
|
if verificationError == digest.ErrDigestInvalidFormat {
|
||||||
|
// TODO(stevvooe): We need to really need to move all
|
||||||
|
// errors to types. Its much more straightforward.
|
||||||
|
imh.Errors.Push(v2.ErrorCodeDigestInvalid)
|
||||||
|
} else {
|
||||||
|
imh.Errors.PushErr(verificationError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
imh.Errors.PushErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteImageManifest removes the image with the given tag from the registry.
|
||||||
|
func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) {
|
||||||
|
manifests := imh.services.Manifests()
|
||||||
|
if err := manifests.Delete(imh.Name, imh.Tag); err != nil {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case storage.ErrUnknownManifest:
|
||||||
|
imh.Errors.Push(v2.ErrorCodeManifestUnknown, err)
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
default:
|
||||||
|
imh.Errors.Push(v2.ErrorCodeUnknown, err)
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Length", "0")
|
||||||
|
w.WriteHeader(http.StatusAccepted)
|
||||||
|
}
|
62
layer.go
Normal file
62
layer.go
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/api/v2"
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
"github.com/docker/docker-registry/storage"
|
||||||
|
"github.com/gorilla/handlers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// layerDispatcher uses the request context to build a layerHandler.
|
||||||
|
func layerDispatcher(ctx *Context, r *http.Request) http.Handler {
|
||||||
|
dgst, err := digest.ParseDigest(ctx.vars["digest"])
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
layerHandler := &layerHandler{
|
||||||
|
Context: ctx,
|
||||||
|
Digest: dgst,
|
||||||
|
}
|
||||||
|
|
||||||
|
layerHandler.log = layerHandler.log.WithField("digest", dgst)
|
||||||
|
|
||||||
|
return handlers.MethodHandler{
|
||||||
|
"GET": http.HandlerFunc(layerHandler.GetLayer),
|
||||||
|
"HEAD": http.HandlerFunc(layerHandler.GetLayer),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// layerHandler serves http layer requests.
|
||||||
|
type layerHandler struct {
|
||||||
|
*Context
|
||||||
|
|
||||||
|
Digest digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLayer fetches the binary data from backend storage returns it in the
|
||||||
|
// response.
|
||||||
|
func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) {
|
||||||
|
layers := lh.services.Layers()
|
||||||
|
|
||||||
|
layer, err := layers.Fetch(lh.Name, lh.Digest)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case storage.ErrUnknownLayer:
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
lh.Errors.Push(v2.ErrorCodeBlobUnknown, err.FSLayer)
|
||||||
|
default:
|
||||||
|
lh.Errors.Push(v2.ErrorCodeUnknown, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer layer.Close()
|
||||||
|
|
||||||
|
http.ServeContent(w, r, layer.Digest().String(), layer.CreatedAt(), layer)
|
||||||
|
}
|
229
layerupload.go
Normal file
229
layerupload.go
Normal file
|
@ -0,0 +1,229 @@
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker-registry/api/v2"
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
"github.com/docker/docker-registry/storage"
|
||||||
|
"github.com/gorilla/handlers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// layerUploadDispatcher constructs and returns the layer upload handler for
|
||||||
|
// the given request context.
|
||||||
|
func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler {
|
||||||
|
luh := &layerUploadHandler{
|
||||||
|
Context: ctx,
|
||||||
|
UUID: ctx.vars["uuid"],
|
||||||
|
}
|
||||||
|
|
||||||
|
handler := http.Handler(handlers.MethodHandler{
|
||||||
|
"POST": http.HandlerFunc(luh.StartLayerUpload),
|
||||||
|
"GET": http.HandlerFunc(luh.GetUploadStatus),
|
||||||
|
"HEAD": http.HandlerFunc(luh.GetUploadStatus),
|
||||||
|
"PUT": http.HandlerFunc(luh.PutLayerChunk),
|
||||||
|
"DELETE": http.HandlerFunc(luh.CancelLayerUpload),
|
||||||
|
})
|
||||||
|
|
||||||
|
if luh.UUID != "" {
|
||||||
|
luh.log = luh.log.WithField("uuid", luh.UUID)
|
||||||
|
|
||||||
|
layers := ctx.services.Layers()
|
||||||
|
upload, err := layers.Resume(luh.UUID)
|
||||||
|
|
||||||
|
if err != nil && err != storage.ErrLayerUploadUnknown {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
logrus.Infof("error resolving upload: %v", err)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
luh.Errors.Push(v2.ErrorCodeUnknown, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
luh.Upload = upload
|
||||||
|
handler = closeResources(handler, luh.Upload)
|
||||||
|
}
|
||||||
|
|
||||||
|
return handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// layerUploadHandler handles the http layer upload process.
|
||||||
|
type layerUploadHandler struct {
|
||||||
|
*Context
|
||||||
|
|
||||||
|
// UUID identifies the upload instance for the current request.
|
||||||
|
UUID string
|
||||||
|
|
||||||
|
Upload storage.LayerUpload
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartLayerUpload begins the layer upload process and allocates a server-
|
||||||
|
// side upload session.
|
||||||
|
func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.Request) {
|
||||||
|
layers := luh.services.Layers()
|
||||||
|
upload, err := layers.Upload(luh.Name)
|
||||||
|
if err != nil {
|
||||||
|
w.WriteHeader(http.StatusInternalServerError) // Error conditions here?
|
||||||
|
luh.Errors.Push(v2.ErrorCodeUnknown, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
luh.Upload = upload
|
||||||
|
defer luh.Upload.Close()
|
||||||
|
|
||||||
|
if err := luh.layerUploadResponse(w, r); err != nil {
|
||||||
|
w.WriteHeader(http.StatusInternalServerError) // Error conditions here?
|
||||||
|
luh.Errors.Push(v2.ErrorCodeUnknown, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.WriteHeader(http.StatusAccepted)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUploadStatus returns the status of a given upload, identified by uuid.
|
||||||
|
func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if luh.Upload == nil {
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := luh.layerUploadResponse(w, r); err != nil {
|
||||||
|
w.WriteHeader(http.StatusInternalServerError) // Error conditions here?
|
||||||
|
luh.Errors.Push(v2.ErrorCodeUnknown, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutLayerChunk receives a layer chunk during the layer upload process,
|
||||||
|
// possible completing the upload with a checksum and length.
|
||||||
|
func (luh *layerUploadHandler) PutLayerChunk(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if luh.Upload == nil {
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown)
|
||||||
|
}
|
||||||
|
|
||||||
|
var finished bool
|
||||||
|
|
||||||
|
// TODO(stevvooe): This is woefully incomplete. Missing stuff:
|
||||||
|
//
|
||||||
|
// 1. Extract information from range header, if present.
|
||||||
|
// 2. Check offset of current layer.
|
||||||
|
// 3. Emit correct error responses.
|
||||||
|
|
||||||
|
// Read in the chunk
|
||||||
|
io.Copy(luh.Upload, r.Body)
|
||||||
|
|
||||||
|
if err := luh.maybeCompleteUpload(w, r); err != nil {
|
||||||
|
if err != errNotReadyToComplete {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case storage.ErrLayerInvalidSize:
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
luh.Errors.Push(v2.ErrorCodeSizeInvalid, err)
|
||||||
|
return
|
||||||
|
case storage.ErrLayerInvalidDigest:
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
luh.Errors.Push(v2.ErrorCodeDigestInvalid, err)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
luh.Errors.Push(v2.ErrorCodeUnknown, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := luh.layerUploadResponse(w, r); err != nil {
|
||||||
|
w.WriteHeader(http.StatusInternalServerError) // Error conditions here?
|
||||||
|
luh.Errors.Push(v2.ErrorCodeUnknown, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if finished {
|
||||||
|
w.WriteHeader(http.StatusCreated)
|
||||||
|
} else {
|
||||||
|
w.WriteHeader(http.StatusAccepted)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CancelLayerUpload cancels an in-progress upload of a layer.
|
||||||
|
func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if luh.Upload == nil {
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// layerUploadResponse provides a standard request for uploading layers and
|
||||||
|
// chunk responses. This sets the correct headers but the response status is
|
||||||
|
// left to the caller.
|
||||||
|
func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *http.Request) error {
|
||||||
|
uploadURL, err := luh.urlBuilder.BuildBlobUploadChunkURL(luh.Upload.Name(), luh.Upload.UUID())
|
||||||
|
if err != nil {
|
||||||
|
logrus.Infof("error building upload url: %s", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Location", uploadURL)
|
||||||
|
w.Header().Set("Content-Length", "0")
|
||||||
|
w.Header().Set("Range", fmt.Sprintf("0-%d", luh.Upload.Offset()))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errNotReadyToComplete = fmt.Errorf("not ready to complete upload")
|
||||||
|
|
||||||
|
// maybeCompleteUpload tries to complete the upload if the correct parameters
|
||||||
|
// are available. Returns errNotReadyToComplete if not ready to complete.
|
||||||
|
func (luh *layerUploadHandler) maybeCompleteUpload(w http.ResponseWriter, r *http.Request) error {
|
||||||
|
// If we get a digest and length, we can finish the upload.
|
||||||
|
dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters!
|
||||||
|
sizeStr := r.FormValue("size")
|
||||||
|
|
||||||
|
if dgstStr == "" {
|
||||||
|
return errNotReadyToComplete
|
||||||
|
}
|
||||||
|
|
||||||
|
dgst, err := digest.ParseDigest(dgstStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var size int64
|
||||||
|
if sizeStr != "" {
|
||||||
|
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
size = -1
|
||||||
|
}
|
||||||
|
|
||||||
|
luh.completeUpload(w, r, size, dgst)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// completeUpload finishes out the upload with the correct response.
|
||||||
|
func (luh *layerUploadHandler) completeUpload(w http.ResponseWriter, r *http.Request, size int64, dgst digest.Digest) {
|
||||||
|
layer, err := luh.Upload.Finish(size, dgst)
|
||||||
|
if err != nil {
|
||||||
|
luh.Errors.Push(v2.ErrorCodeUnknown, err)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
layerURL, err := luh.urlBuilder.BuildBlobURL(layer.Name(), layer.Digest())
|
||||||
|
if err != nil {
|
||||||
|
luh.Errors.Push(v2.ErrorCodeUnknown, err)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Location", layerURL)
|
||||||
|
w.Header().Set("Content-Length", "0")
|
||||||
|
w.WriteHeader(http.StatusCreated)
|
||||||
|
}
|
20
open-design/MANIFESTO.md
Normal file
20
open-design/MANIFESTO.md
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
# The "Distribution" project
|
||||||
|
|
||||||
|
## What is this
|
||||||
|
|
||||||
|
This is a part of the Docker project, or "primitive" that handles the "distribution" of images.
|
||||||
|
|
||||||
|
### Punchline
|
||||||
|
|
||||||
|
Pack. Sign. Ship. Store. Deliver. Verify.
|
||||||
|
|
||||||
|
### Technical scope
|
||||||
|
|
||||||
|
Distribution has tight relations with:
|
||||||
|
|
||||||
|
* libtrust, providing cryptographical primitives to handle image signing and verification
|
||||||
|
* image format, as transferred over the wire
|
||||||
|
* docker-registry, the server side component that allows storage and retrieval of packed images
|
||||||
|
* authentication and key management APIs, that are used to verify images and access storage services
|
||||||
|
* PKI infrastructure
|
||||||
|
* docker "pull/push client" code gluing all this together - network communication code, tarsum, etc
|
41
open-design/ROADMAP.md
Normal file
41
open-design/ROADMAP.md
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
# Roadmap
|
||||||
|
|
||||||
|
## 11/24/2014: alpha
|
||||||
|
|
||||||
|
Design and code:
|
||||||
|
|
||||||
|
- implements a basic configuration loading mechanism: https://github.com/docker/docker-registry/issues/646
|
||||||
|
- storage API is frozen, implemented and used: https://github.com/docker/docker-registry/issues/616
|
||||||
|
- REST API defined and partly implemented: https://github.com/docker/docker-registry/issues/634
|
||||||
|
- basic logging: https://github.com/docker/docker-registry/issues/635
|
||||||
|
- auth design is frozen: https://github.com/docker/docker-registry/issues/623
|
||||||
|
|
||||||
|
Environment:
|
||||||
|
|
||||||
|
- some good practice are in place and documented: https://github.com/docker/docker-registry/issues/657
|
||||||
|
|
||||||
|
## 12/22/2014: beta
|
||||||
|
|
||||||
|
Design and code:
|
||||||
|
|
||||||
|
- feature freeze
|
||||||
|
- mirroring defined: https://github.com/docker/docker-registry/issues/658
|
||||||
|
- extension model defined: https://github.com/docker/docker-registry/issues/613
|
||||||
|
|
||||||
|
Environment:
|
||||||
|
|
||||||
|
- doc-driven approach: https://github.com/docker/docker-registry/issues/627
|
||||||
|
|
||||||
|
## 01/12/2015: RC
|
||||||
|
|
||||||
|
Design and code:
|
||||||
|
|
||||||
|
- third party drivers and extensions
|
||||||
|
- basic search extension
|
||||||
|
- third-party layers garbage collection scripts
|
||||||
|
- healthcheck endpoints: https://github.com/docker/docker-registry/issues/656
|
||||||
|
- bugnsnag/new-relic support: https://github.com/docker/docker-registry/issues/680
|
||||||
|
|
||||||
|
Environment:
|
||||||
|
|
||||||
|
- exhaustive test-cases
|
52
open-design/specs/TEMPLATE.md
Normal file
52
open-design/specs/TEMPLATE.md
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
# DEP #X: Awesome proposal
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This is related to "Foo" (eg: authentication/storage/extension/...).
|
||||||
|
|
||||||
|
## Abstract
|
||||||
|
|
||||||
|
This proposal suggests to add support for "bar".
|
||||||
|
|
||||||
|
## User stories
|
||||||
|
|
||||||
|
"I'm a Hub user, and 'bar' allows me to do baz1"
|
||||||
|
|
||||||
|
"I'm a FOSS user running my private registry and 'bar' allows me to do baz2"
|
||||||
|
|
||||||
|
"I'm a company running the registry and 'bar' allows me to do baz3"
|
||||||
|
|
||||||
|
## Technology pre-requisites
|
||||||
|
|
||||||
|
'bar' can be implemented using:
|
||||||
|
|
||||||
|
* foobar approach
|
||||||
|
* barfoo concurrent approach
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
Project depends on baz to be completed (eg: docker engine support, or another registry proposal).
|
||||||
|
|
||||||
|
## Technical proposal
|
||||||
|
|
||||||
|
We are going to do foofoo alongside with some chunks of barbaz.
|
||||||
|
|
||||||
|
## Roadmap
|
||||||
|
|
||||||
|
* YYYY-MM-DD: proposal submitted
|
||||||
|
* YYYY-MM-DD: proposal reviewed and updated
|
||||||
|
* YYYY-MM-DD: implementation started (WIP PR)
|
||||||
|
* YYYY-MM-DD: implementation complete ready for thorough review
|
||||||
|
* YYYY-MM-DD: final PR version
|
||||||
|
* YYYY-MM-DD: implementation merged
|
||||||
|
|
||||||
|
## Editors
|
||||||
|
|
||||||
|
Editors:
|
||||||
|
|
||||||
|
* my Company, or maybe just me
|
||||||
|
|
||||||
|
Implementors:
|
||||||
|
|
||||||
|
* me and my buddies
|
||||||
|
* another team working on a different approach
|
20
project/dev-image/Dockerfile
Normal file
20
project/dev-image/Dockerfile
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
FROM ubuntu:14.04
|
||||||
|
|
||||||
|
ENV GOLANG_VERSION 1.4rc1
|
||||||
|
ENV GOPATH /var/cache/drone
|
||||||
|
ENV GOROOT /usr/local/go
|
||||||
|
ENV PATH $PATH:$GOROOT/bin:$GOPATH/bin
|
||||||
|
|
||||||
|
ENV LANG C
|
||||||
|
ENV LC_ALL C
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
wget ca-certificates git mercurial bzr \
|
||||||
|
--no-install-recommends \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN wget https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz --quiet && \
|
||||||
|
tar -C /usr/local -xzf go$GOLANG_VERSION.linux-amd64.tar.gz && \
|
||||||
|
rm go${GOLANG_VERSION}.linux-amd64.tar.gz
|
||||||
|
|
||||||
|
RUN go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint
|
6
project/hooks/README.md
Normal file
6
project/hooks/README.md
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
Git Hooks
|
||||||
|
=========
|
||||||
|
|
||||||
|
To enforce valid and properly-formatted code, there is CI in place which runs `gofmt`, `golint`, and `go vet` against code in the repository.
|
||||||
|
|
||||||
|
As an aid to prevent committing invalid code in the first place, a git pre-commit hook has been added to the repository, found in [pre-commit](./pre-commit). As it is impossible to automatically add linked hooks to a git repository, this hook should be linked into your `.git/hooks/pre-commit`, which can be done by running the `configure-hooks.sh` script in this directory. This script is the preferred method of configuring hooks, as it will be updated as more are added.
|
18
project/hooks/configure-hooks.sh
Executable file
18
project/hooks/configure-hooks.sh
Executable file
|
@ -0,0 +1,18 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
|
||||||
|
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||||
|
RESOLVE_REPO_ROOT_STATUS=$?
|
||||||
|
if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then
|
||||||
|
echo -e "Unable to resolve repository root. Error:\n$REPO_ROOT" > /dev/stderr
|
||||||
|
exit $RESOLVE_REPO_ROOT_STATUS
|
||||||
|
fi
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# Just in case the directory doesn't exist
|
||||||
|
mkdir -p $REPO_ROOT/.git/hooks
|
||||||
|
|
||||||
|
ln -f -s $(pwd)/pre-commit $REPO_ROOT/.git/hooks/pre-commit
|
29
project/hooks/pre-commit
Executable file
29
project/hooks/pre-commit
Executable file
|
@ -0,0 +1,29 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||||
|
RESOLVE_REPO_ROOT_STATUS=$?
|
||||||
|
if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then
|
||||||
|
printf "Unable to resolve repository root. Error:\n%s\n" "$RESOLVE_REPO_ROOT_STATUS" > /dev/stderr
|
||||||
|
exit $RESOLVE_REPO_ROOT_STATUS
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd $REPO_ROOT
|
||||||
|
|
||||||
|
GOFMT_ERRORS=$(gofmt -s -l . 2>&1)
|
||||||
|
if [ -n "$GOFMT_ERRORS" ]; then
|
||||||
|
printf 'gofmt failed for the following files:\n%s\n\nPlease run "gofmt -s -l ." in the root of your repository before committing\n' "$GOFMT_ERRORS" > /dev/stderr
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
GOLINT_ERRORS=$(golint ./... 2>&1)
|
||||||
|
if [ -n "$GOLINT_ERRORS" ]; then
|
||||||
|
printf "golint failed with the following errors:\n%s\n" "$GOLINT_ERRORS" > /dev/stderr
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
GOVET_ERRORS=$(go vet ./... 2>&1)
|
||||||
|
GOVET_STATUS=$?
|
||||||
|
if [ "$GOVET_STATUS" -ne "0" ]; then
|
||||||
|
printf "govet failed with the following errors:\n%s\n" "$GOVET_ERRORS" > /dev/stderr
|
||||||
|
exit $GOVET_STATUS
|
||||||
|
fi
|
3
storage/doc.go
Normal file
3
storage/doc.go
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
// Package storage contains storage services for use in the registry
|
||||||
|
// application. It should be considered an internal package, as of Go 1.4.
|
||||||
|
package storage
|
170
storage/filereader.go
Normal file
170
storage/filereader.go
Normal file
|
@ -0,0 +1,170 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// remoteFileReader provides a read seeker interface to files stored in
|
||||||
|
// storagedriver. Used to implement part of layer interface and will be used
|
||||||
|
// to implement read side of LayerUpload.
|
||||||
|
type fileReader struct {
|
||||||
|
driver storagedriver.StorageDriver
|
||||||
|
|
||||||
|
// identifying fields
|
||||||
|
path string
|
||||||
|
size int64 // size is the total layer size, must be set.
|
||||||
|
modtime time.Time
|
||||||
|
|
||||||
|
// mutable fields
|
||||||
|
rc io.ReadCloser // remote read closer
|
||||||
|
brd *bufio.Reader // internal buffered io
|
||||||
|
offset int64 // offset is the current read offset
|
||||||
|
err error // terminal error, if set, reader is closed
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFileReader(driver storagedriver.StorageDriver, path string) (*fileReader, error) {
|
||||||
|
// Grab the size of the layer file, ensuring existence.
|
||||||
|
fi, err := driver.Stat(path)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi.IsDir() {
|
||||||
|
return nil, fmt.Errorf("cannot read a directory")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &fileReader{
|
||||||
|
driver: driver,
|
||||||
|
path: path,
|
||||||
|
size: fi.Size(),
|
||||||
|
modtime: fi.ModTime(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fr *fileReader) Read(p []byte) (n int, err error) {
|
||||||
|
if fr.err != nil {
|
||||||
|
return 0, fr.err
|
||||||
|
}
|
||||||
|
|
||||||
|
rd, err := fr.reader()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err = rd.Read(p)
|
||||||
|
fr.offset += int64(n)
|
||||||
|
|
||||||
|
// Simulate io.EOR error if we reach filesize.
|
||||||
|
if err == nil && fr.offset >= fr.size {
|
||||||
|
err = io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fr *fileReader) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
if fr.err != nil {
|
||||||
|
return 0, fr.err
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
newOffset := fr.offset
|
||||||
|
|
||||||
|
switch whence {
|
||||||
|
case os.SEEK_CUR:
|
||||||
|
newOffset += int64(offset)
|
||||||
|
case os.SEEK_END:
|
||||||
|
newOffset = fr.size + int64(offset)
|
||||||
|
case os.SEEK_SET:
|
||||||
|
newOffset = int64(offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
if newOffset < 0 {
|
||||||
|
err = fmt.Errorf("cannot seek to negative position")
|
||||||
|
} else if newOffset > fr.size {
|
||||||
|
err = fmt.Errorf("cannot seek passed end of file")
|
||||||
|
} else {
|
||||||
|
if fr.offset != newOffset {
|
||||||
|
fr.reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
// No problems, set the offset.
|
||||||
|
fr.offset = newOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
return fr.offset, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the layer. Should be called when the resource is no longer needed.
|
||||||
|
func (fr *fileReader) Close() error {
|
||||||
|
if fr.err != nil {
|
||||||
|
return fr.err
|
||||||
|
}
|
||||||
|
|
||||||
|
fr.err = ErrLayerClosed
|
||||||
|
|
||||||
|
// close and release reader chain
|
||||||
|
if fr.rc != nil {
|
||||||
|
fr.rc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
fr.rc = nil
|
||||||
|
fr.brd = nil
|
||||||
|
|
||||||
|
return fr.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// reader prepares the current reader at the lrs offset, ensuring its buffered
|
||||||
|
// and ready to go.
|
||||||
|
func (fr *fileReader) reader() (io.Reader, error) {
|
||||||
|
if fr.err != nil {
|
||||||
|
return nil, fr.err
|
||||||
|
}
|
||||||
|
|
||||||
|
if fr.rc != nil {
|
||||||
|
return fr.brd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we don't have a reader, open one up.
|
||||||
|
rc, err := fr.driver.ReadStream(fr.path, fr.offset)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fr.rc = rc
|
||||||
|
|
||||||
|
if fr.brd == nil {
|
||||||
|
// TODO(stevvooe): Set an optimal buffer size here. We'll have to
|
||||||
|
// understand the latency characteristics of the underlying network to
|
||||||
|
// set this correctly, so we may want to leave it to the driver. For
|
||||||
|
// out of process drivers, we'll have to optimize this buffer size for
|
||||||
|
// local communication.
|
||||||
|
fr.brd = bufio.NewReader(fr.rc)
|
||||||
|
} else {
|
||||||
|
fr.brd.Reset(fr.rc)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fr.brd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// resetReader resets the reader, forcing the read method to open up a new
|
||||||
|
// connection and rebuild the buffered reader. This should be called when the
|
||||||
|
// offset and the reader will become out of sync, such as during a seek
|
||||||
|
// operation.
|
||||||
|
func (fr *fileReader) reset() {
|
||||||
|
if fr.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if fr.rc != nil {
|
||||||
|
fr.rc.Close()
|
||||||
|
fr.rc = nil
|
||||||
|
}
|
||||||
|
}
|
158
storage/filereader_test.go
Normal file
158
storage/filereader_test.go
Normal file
|
@ -0,0 +1,158 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"io"
|
||||||
|
mrand "math/rand"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storagedriver/inmemory"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSimpleRead(t *testing.T) {
|
||||||
|
content := make([]byte, 1<<20)
|
||||||
|
n, err := rand.Read(content)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error building random data: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n != len(content) {
|
||||||
|
t.Fatalf("random read did't fill buffer")
|
||||||
|
}
|
||||||
|
|
||||||
|
dgst, err := digest.FromReader(bytes.NewReader(content))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error digesting random content: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
driver := inmemory.New()
|
||||||
|
path := "/random"
|
||||||
|
|
||||||
|
if err := driver.PutContent(path, content); err != nil {
|
||||||
|
t.Fatalf("error putting patterned content: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fr, err := newFileReader(driver, path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error allocating file reader: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
verifier := digest.NewDigestVerifier(dgst)
|
||||||
|
io.Copy(verifier, fr)
|
||||||
|
|
||||||
|
if !verifier.Verified() {
|
||||||
|
t.Fatalf("unable to verify read data")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFileReaderSeek(t *testing.T) {
|
||||||
|
driver := inmemory.New()
|
||||||
|
pattern := "01234567890ab" // prime length block
|
||||||
|
repititions := 1024
|
||||||
|
path := "/patterned"
|
||||||
|
content := bytes.Repeat([]byte(pattern), repititions)
|
||||||
|
|
||||||
|
if err := driver.PutContent(path, content); err != nil {
|
||||||
|
t.Fatalf("error putting patterned content: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fr, err := newFileReader(driver, path)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating file reader: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek all over the place, in blocks of pattern size and make sure we get
|
||||||
|
// the right data.
|
||||||
|
for _, repitition := range mrand.Perm(repititions - 1) {
|
||||||
|
targetOffset := int64(len(pattern) * repitition)
|
||||||
|
// Seek to a multiple of pattern size and read pattern size bytes
|
||||||
|
offset, err := fr.Seek(targetOffset, os.SEEK_SET)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error seeking: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset != targetOffset {
|
||||||
|
t.Fatalf("did not seek to correct offset: %d != %d", offset, targetOffset)
|
||||||
|
}
|
||||||
|
|
||||||
|
p := make([]byte, len(pattern))
|
||||||
|
|
||||||
|
n, err := fr.Read(p)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error reading pattern: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n != len(pattern) {
|
||||||
|
t.Fatalf("incorrect read length: %d != %d", n, len(pattern))
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(p) != pattern {
|
||||||
|
t.Fatalf("incorrect read content: %q != %q", p, pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check offset
|
||||||
|
current, err := fr.Seek(0, os.SEEK_CUR)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error checking current offset: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if current != targetOffset+int64(len(pattern)) {
|
||||||
|
t.Fatalf("unexpected offset after read: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
start, err := fr.Seek(0, os.SEEK_SET)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error seeking to start: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if start != 0 {
|
||||||
|
t.Fatalf("expected to seek to start: %v != 0", start)
|
||||||
|
}
|
||||||
|
|
||||||
|
end, err := fr.Seek(0, os.SEEK_END)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error checking current offset: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if end != int64(len(content)) {
|
||||||
|
t.Fatalf("expected to seek to end: %v != %v", end, len(content))
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Seek past end and before start, ensure error.
|
||||||
|
|
||||||
|
// seek before start
|
||||||
|
before, err := fr.Seek(-1, os.SEEK_SET)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("error expected, returned offset=%v", before)
|
||||||
|
}
|
||||||
|
|
||||||
|
after, err := fr.Seek(int64(len(content)+1), os.SEEK_END)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("error expected, returned offset=%v", after)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestLayerReadErrors covers the various error return type for different
|
||||||
|
// conditions that can arise when reading a layer.
|
||||||
|
func TestFileReaderErrors(t *testing.T) {
|
||||||
|
// TODO(stevvooe): We need to cover error return types, driven by the
|
||||||
|
// errors returned via the HTTP API. For now, here is a incomplete list:
|
||||||
|
//
|
||||||
|
// 1. Layer Not Found: returned when layer is not found or access is
|
||||||
|
// denied.
|
||||||
|
// 2. Layer Unavailable: returned when link references are unresolved,
|
||||||
|
// but layer is known to the registry.
|
||||||
|
// 3. Layer Invalid: This may more split into more errors, but should be
|
||||||
|
// returned when name or tarsum does not reference a valid error. We
|
||||||
|
// may also need something to communication layer verification errors
|
||||||
|
// for the inline tarsum check.
|
||||||
|
// 4. Timeout: timeouts to backend. Need to better understand these
|
||||||
|
// failure cases and how the storage driver propagates these errors
|
||||||
|
// up the stack.
|
||||||
|
}
|
100
storage/layer.go
Normal file
100
storage/layer.go
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Layer provides a readable and seekable layer object. Typically,
|
||||||
|
// implementations are *not* goroutine safe.
|
||||||
|
type Layer interface {
|
||||||
|
// http.ServeContent requires an efficient implementation of
|
||||||
|
// ReadSeeker.Seek(0, os.SEEK_END).
|
||||||
|
io.ReadSeeker
|
||||||
|
io.Closer
|
||||||
|
|
||||||
|
// Name returns the repository under which this layer is linked.
|
||||||
|
Name() string // TODO(stevvooe): struggling with nomenclature: should this be "repo" or "name"?
|
||||||
|
|
||||||
|
// Digest returns the unique digest of the blob, which is the tarsum for
|
||||||
|
// layers.
|
||||||
|
Digest() digest.Digest
|
||||||
|
|
||||||
|
// CreatedAt returns the time this layer was created. Until we implement
|
||||||
|
// Stat call on storagedriver, this just returns the zero time.
|
||||||
|
CreatedAt() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// LayerUpload provides a handle for working with in-progress uploads.
|
||||||
|
// Instances can be obtained from the LayerService.Upload and
|
||||||
|
// LayerService.Resume.
|
||||||
|
type LayerUpload interface {
|
||||||
|
io.WriteCloser
|
||||||
|
|
||||||
|
// UUID returns the identifier for this upload.
|
||||||
|
UUID() string
|
||||||
|
|
||||||
|
// Name of the repository under which the layer will be linked.
|
||||||
|
Name() string
|
||||||
|
|
||||||
|
// Offset returns the position of the last byte written to this layer.
|
||||||
|
Offset() int64
|
||||||
|
|
||||||
|
// TODO(stevvooe): Consider completely removing the size check from this
|
||||||
|
// interface. The digest check may be adequate and we are making it
|
||||||
|
// optional in the HTTP API.
|
||||||
|
|
||||||
|
// Finish marks the upload as completed, returning a valid handle to the
|
||||||
|
// uploaded layer. The final size and digest are validated against the
|
||||||
|
// contents of the uploaded layer. If the size is negative, only the
|
||||||
|
// digest will be checked.
|
||||||
|
Finish(size int64, digest digest.Digest) (Layer, error)
|
||||||
|
|
||||||
|
// Cancel the layer upload process.
|
||||||
|
Cancel() error
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrLayerExists returned when layer already exists
|
||||||
|
ErrLayerExists = fmt.Errorf("layer exists")
|
||||||
|
|
||||||
|
// ErrLayerTarSumVersionUnsupported when tarsum is unsupported version.
|
||||||
|
ErrLayerTarSumVersionUnsupported = fmt.Errorf("unsupported tarsum version")
|
||||||
|
|
||||||
|
// ErrLayerUploadUnknown returned when upload is not found.
|
||||||
|
ErrLayerUploadUnknown = fmt.Errorf("layer upload unknown")
|
||||||
|
|
||||||
|
// ErrLayerClosed returned when an operation is attempted on a closed
|
||||||
|
// Layer or LayerUpload.
|
||||||
|
ErrLayerClosed = fmt.Errorf("layer closed")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrUnknownLayer returned when layer cannot be found.
|
||||||
|
type ErrUnknownLayer struct {
|
||||||
|
FSLayer FSLayer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err ErrUnknownLayer) Error() string {
|
||||||
|
return fmt.Sprintf("unknown layer %v", err.FSLayer.BlobSum)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrLayerInvalidDigest returned when tarsum check fails.
|
||||||
|
type ErrLayerInvalidDigest struct {
|
||||||
|
FSLayer FSLayer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err ErrLayerInvalidDigest) Error() string {
|
||||||
|
return fmt.Sprintf("invalid digest for referenced layer: %v", err.FSLayer.BlobSum)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrLayerInvalidSize returned when length check fails.
|
||||||
|
type ErrLayerInvalidSize struct {
|
||||||
|
Size int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err ErrLayerInvalidSize) Error() string {
|
||||||
|
return fmt.Sprintf("invalid layer size: %d", err.Size)
|
||||||
|
}
|
334
storage/layer_test.go
Normal file
334
storage/layer_test.go
Normal file
|
@ -0,0 +1,334 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/sha256"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/common/testutil"
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/inmemory"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestSimpleLayerUpload covers the layer upload process, exercising common
|
||||||
|
// error paths that might be seen during an upload.
|
||||||
|
func TestSimpleLayerUpload(t *testing.T) {
|
||||||
|
randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating random reader: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dgst := digest.Digest(tarSumStr)
|
||||||
|
|
||||||
|
uploadStore, err := newTemporaryLocalFSLayerUploadStore()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error allocating upload store: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
imageName := "foo/bar"
|
||||||
|
driver := inmemory.New()
|
||||||
|
|
||||||
|
ls := &layerStore{
|
||||||
|
driver: driver,
|
||||||
|
pathMapper: &pathMapper{
|
||||||
|
root: "/storage/testing",
|
||||||
|
version: storagePathVersion,
|
||||||
|
},
|
||||||
|
uploadStore: uploadStore,
|
||||||
|
}
|
||||||
|
|
||||||
|
h := sha256.New()
|
||||||
|
rd := io.TeeReader(randomDataReader, h)
|
||||||
|
|
||||||
|
layerUpload, err := ls.Upload(imageName)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error starting layer upload: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel the upload then restart it
|
||||||
|
if err := layerUpload.Cancel(); err != nil {
|
||||||
|
t.Fatalf("unexpected error during upload cancellation: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do a resume, get unknown upload
|
||||||
|
layerUpload, err = ls.Resume(layerUpload.UUID())
|
||||||
|
if err != ErrLayerUploadUnknown {
|
||||||
|
t.Fatalf("unexpected error resuming upload, should be unkown: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restart!
|
||||||
|
layerUpload, err = ls.Upload(imageName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error starting layer upload: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the size of our random tarfile
|
||||||
|
randomDataSize, err := seekerSize(randomDataReader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error getting seeker size of random data: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
nn, err := io.Copy(layerUpload, rd)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error uploading layer data: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if nn != randomDataSize {
|
||||||
|
t.Fatalf("layer data write incomplete")
|
||||||
|
}
|
||||||
|
|
||||||
|
if layerUpload.Offset() != nn {
|
||||||
|
t.Fatalf("layerUpload not updated with correct offset: %v != %v", layerUpload.Offset(), nn)
|
||||||
|
}
|
||||||
|
layerUpload.Close()
|
||||||
|
|
||||||
|
// Do a resume, for good fun
|
||||||
|
layerUpload, err = ls.Resume(layerUpload.UUID())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error resuming upload: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sha256Digest := digest.NewDigest("sha256", h)
|
||||||
|
layer, err := layerUpload.Finish(randomDataSize, dgst)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error finishing layer upload: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// After finishing an upload, it should no longer exist.
|
||||||
|
if _, err := ls.Resume(layerUpload.UUID()); err != ErrLayerUploadUnknown {
|
||||||
|
t.Fatalf("expected layer upload to be unknown, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test for existence.
|
||||||
|
exists, err := ls.Exists(layer.Name(), layer.Digest())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error checking for existence: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
t.Fatalf("layer should now exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
h.Reset()
|
||||||
|
nn, err = io.Copy(h, layer)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error reading layer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if nn != randomDataSize {
|
||||||
|
t.Fatalf("incorrect read length")
|
||||||
|
}
|
||||||
|
|
||||||
|
if digest.NewDigest("sha256", h) != sha256Digest {
|
||||||
|
t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestSimpleLayerRead just creates a simple layer file and ensures that basic
|
||||||
|
// open, read, seek, read works. More specific edge cases should be covered in
|
||||||
|
// other tests.
|
||||||
|
func TestSimpleLayerRead(t *testing.T) {
|
||||||
|
imageName := "foo/bar"
|
||||||
|
driver := inmemory.New()
|
||||||
|
ls := &layerStore{
|
||||||
|
driver: driver,
|
||||||
|
pathMapper: &pathMapper{
|
||||||
|
root: "/storage/testing",
|
||||||
|
version: storagePathVersion,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating random data: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dgst := digest.Digest(tarSumStr)
|
||||||
|
|
||||||
|
// Test for existence.
|
||||||
|
exists, err := ls.Exists(imageName, dgst)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error checking for existence: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exists {
|
||||||
|
t.Fatalf("layer should not exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to get the layer and make sure we get a not found error
|
||||||
|
layer, err := ls.Fetch(imageName, dgst)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("error expected fetching unknown layer")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch err.(type) {
|
||||||
|
case ErrUnknownLayer:
|
||||||
|
err = nil
|
||||||
|
default:
|
||||||
|
t.Fatalf("unexpected error fetching non-existent layer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
randomLayerDigest, err := writeTestLayer(driver, ls.pathMapper, imageName, dgst, randomLayerReader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error writing test layer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
randomLayerSize, err := seekerSize(randomLayerReader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error getting seeker size for random layer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
layer, err = ls.Fetch(imageName, dgst)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer layer.Close()
|
||||||
|
|
||||||
|
// Now check the sha digest and ensure its the same
|
||||||
|
h := sha256.New()
|
||||||
|
nn, err := io.Copy(h, layer)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
t.Fatalf("unexpected error copying to hash: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if nn != randomLayerSize {
|
||||||
|
t.Fatalf("stored incorrect number of bytes in layer: %d != %d", nn, randomLayerSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
sha256Digest := digest.NewDigest("sha256", h)
|
||||||
|
if sha256Digest != randomLayerDigest {
|
||||||
|
t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, randomLayerDigest)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now seek back the layer, read the whole thing and check against randomLayerData
|
||||||
|
offset, err := layer.Seek(0, os.SEEK_SET)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error seeking layer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset != 0 {
|
||||||
|
t.Fatalf("seek failed: expected 0 offset, got %d", offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := ioutil.ReadAll(layer)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error reading all of layer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p) != int(randomLayerSize) {
|
||||||
|
t.Fatalf("layer data read has different length: %v != %v", len(p), randomLayerSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset the randomLayerReader and read back the buffer
|
||||||
|
_, err = randomLayerReader.Seek(0, os.SEEK_SET)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error resetting layer reader: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
randomLayerData, err := ioutil.ReadAll(randomLayerReader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("random layer read failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(p, randomLayerData) {
|
||||||
|
t.Fatalf("layer data not equal")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeRandomLayer creates a random layer under name and tarSum using driver
|
||||||
|
// and pathMapper. An io.ReadSeeker with the data is returned, along with the
|
||||||
|
// sha256 hex digest.
|
||||||
|
func writeRandomLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string) (rs io.ReadSeeker, tarSum digest.Digest, sha256digest digest.Digest, err error) {
|
||||||
|
reader, tarSumStr, err := testutil.CreateRandomTarFile()
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
tarSum = digest.Digest(tarSumStr)
|
||||||
|
|
||||||
|
// Now, actually create the layer.
|
||||||
|
randomLayerDigest, err := writeTestLayer(driver, pathMapper, name, tarSum, ioutil.NopCloser(reader))
|
||||||
|
|
||||||
|
if _, err := reader.Seek(0, os.SEEK_SET); err != nil {
|
||||||
|
return nil, "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return reader, tarSum, randomLayerDigest, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// seekerSize seeks to the end of seeker, checks the size and returns it to
|
||||||
|
// the original state, returning the size. The state of the seeker should be
|
||||||
|
// treated as unknown if an error is returned.
|
||||||
|
func seekerSize(seeker io.ReadSeeker) (int64, error) {
|
||||||
|
current, err := seeker.Seek(0, os.SEEK_CUR)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
end, err := seeker.Seek(0, os.SEEK_END)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resumed, err := seeker.Seek(current, os.SEEK_SET)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resumed != current {
|
||||||
|
return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location")
|
||||||
|
}
|
||||||
|
|
||||||
|
return end, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createTestLayer creates a simple test layer in the provided driver under
|
||||||
|
// tarsum dgst, returning the sha256 digest location. This is implemented
|
||||||
|
// peicemeal and should probably be replaced by the uploader when it's ready.
|
||||||
|
func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) {
|
||||||
|
h := sha256.New()
|
||||||
|
rd := io.TeeReader(content, h)
|
||||||
|
|
||||||
|
p, err := ioutil.ReadAll(rd)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
blobDigestSHA := digest.NewDigest("sha256", h)
|
||||||
|
|
||||||
|
blobPath, err := pathMapper.path(blobPathSpec{
|
||||||
|
digest: dgst,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err := driver.PutContent(blobPath, p); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
layerLinkPath, err := pathMapper.path(layerLinkPathSpec{
|
||||||
|
name: name,
|
||||||
|
digest: dgst,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := driver.PutContent(layerLinkPath, []byte(dgst)); err != nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return blobDigestSHA, err
|
||||||
|
}
|
30
storage/layerreader.go
Normal file
30
storage/layerreader.go
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// layerReadSeeker implements Layer and provides facilities for reading and
|
||||||
|
// seeking.
|
||||||
|
type layerReader struct {
|
||||||
|
fileReader
|
||||||
|
|
||||||
|
name string // repo name of this layer
|
||||||
|
digest digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Layer = &layerReader{}
|
||||||
|
|
||||||
|
func (lrs *layerReader) Name() string {
|
||||||
|
return lrs.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lrs *layerReader) Digest() digest.Digest {
|
||||||
|
return lrs.digest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lrs *layerReader) CreatedAt() time.Time {
|
||||||
|
return lrs.modtime
|
||||||
|
}
|
123
storage/layerstore.go
Normal file
123
storage/layerstore.go
Normal file
|
@ -0,0 +1,123 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
)
|
||||||
|
|
||||||
|
type layerStore struct {
|
||||||
|
driver storagedriver.StorageDriver
|
||||||
|
pathMapper *pathMapper
|
||||||
|
uploadStore layerUploadStore
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *layerStore) Exists(name string, digest digest.Digest) (bool, error) {
|
||||||
|
// Because this implementation just follows blob links, an existence check
|
||||||
|
// is pretty cheap by starting and closing a fetch.
|
||||||
|
_, err := ls.Fetch(name, digest)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
switch err.(type) {
|
||||||
|
case ErrUnknownLayer:
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *layerStore) Fetch(name string, digest digest.Digest) (Layer, error) {
|
||||||
|
blobPath, err := ls.resolveBlobPath(name, digest)
|
||||||
|
if err != nil {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError:
|
||||||
|
return nil, ErrUnknownLayer{FSLayer{BlobSum: digest}}
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fr, err := newFileReader(ls.driver, blobPath)
|
||||||
|
if err != nil {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError:
|
||||||
|
return nil, ErrUnknownLayer{FSLayer{BlobSum: digest}}
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &layerReader{
|
||||||
|
fileReader: *fr,
|
||||||
|
name: name,
|
||||||
|
digest: digest,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload begins a layer upload, returning a handle. If the layer upload
|
||||||
|
// is already in progress or the layer has already been uploaded, this
|
||||||
|
// will return an error.
|
||||||
|
func (ls *layerStore) Upload(name string) (LayerUpload, error) {
|
||||||
|
|
||||||
|
// NOTE(stevvooe): Consider the issues with allowing concurrent upload of
|
||||||
|
// the same two layers. Should it be disallowed? For now, we allow both
|
||||||
|
// parties to proceed and the the first one uploads the layer.
|
||||||
|
|
||||||
|
lus, err := ls.uploadStore.New(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ls.newLayerUpload(lus), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resume continues an in progress layer upload, returning the current
|
||||||
|
// state of the upload.
|
||||||
|
func (ls *layerStore) Resume(uuid string) (LayerUpload, error) {
|
||||||
|
lus, err := ls.uploadStore.GetState(uuid)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ls.newLayerUpload(lus), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newLayerUpload allocates a new upload controller with the given state.
|
||||||
|
func (ls *layerStore) newLayerUpload(lus LayerUploadState) LayerUpload {
|
||||||
|
return &layerUploadController{
|
||||||
|
LayerUploadState: lus,
|
||||||
|
layerStore: ls,
|
||||||
|
uploadStore: ls.uploadStore,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolveBlobId looks up the blob location in the repositories from a
|
||||||
|
// layer/blob link file, returning blob path or an error on failure.
|
||||||
|
func (ls *layerStore) resolveBlobPath(name string, dgst digest.Digest) (string, error) {
|
||||||
|
pathSpec := layerLinkPathSpec{name: name, digest: dgst}
|
||||||
|
layerLinkPath, err := ls.pathMapper.path(pathSpec)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
layerLinkContent, err := ls.driver.GetContent(layerLinkPath)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE(stevvooe): The content of the layer link should match the digest.
|
||||||
|
// This layer of indirection is for name-based content protection.
|
||||||
|
|
||||||
|
linked, err := digest.ParseDigest(string(layerLinkContent))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
bp := blobPathSpec{digest: linked}
|
||||||
|
|
||||||
|
return ls.pathMapper.path(bp)
|
||||||
|
}
|
455
storage/layerupload.go
Normal file
455
storage/layerupload.go
Normal file
|
@ -0,0 +1,455 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"code.google.com/p/go-uuid/uuid"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
"github.com/docker/docker/pkg/tarsum"
|
||||||
|
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LayerUploadState captures the state serializable state of the layer upload.
|
||||||
|
type LayerUploadState struct {
|
||||||
|
// name is the primary repository under which the layer will be linked.
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// UUID identifies the upload.
|
||||||
|
UUID string
|
||||||
|
|
||||||
|
// offset contains the current progress of the upload.
|
||||||
|
Offset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// layerUploadController is used to control the various aspects of resumable
|
||||||
|
// layer upload. It implements the LayerUpload interface.
|
||||||
|
type layerUploadController struct {
|
||||||
|
LayerUploadState
|
||||||
|
|
||||||
|
layerStore *layerStore
|
||||||
|
uploadStore layerUploadStore
|
||||||
|
fp layerFile
|
||||||
|
err error // terminal error, if set, controller is closed
|
||||||
|
}
|
||||||
|
|
||||||
|
// layerFile documents the interface used while writing layer files, similar
|
||||||
|
// to *os.File. This is separate from layerReader, for now, because we want to
|
||||||
|
// store uploads on the local file system until we have write-through hashing
|
||||||
|
// support. They should be combined once this is worked out.
|
||||||
|
type layerFile interface {
|
||||||
|
io.WriteSeeker
|
||||||
|
io.Reader
|
||||||
|
io.Closer
|
||||||
|
|
||||||
|
// Sync commits the contents of the writer to storage.
|
||||||
|
Sync() (err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// layerUploadStore provides storage for temporary files and upload state of
|
||||||
|
// layers. This is be used by the LayerService to manage the state of ongoing
|
||||||
|
// uploads. This interface will definitely change and will most likely end up
|
||||||
|
// being exported to the app layer. Move the layer.go when it's ready to go.
|
||||||
|
type layerUploadStore interface {
|
||||||
|
New(name string) (LayerUploadState, error)
|
||||||
|
Open(uuid string) (layerFile, error)
|
||||||
|
GetState(uuid string) (LayerUploadState, error)
|
||||||
|
SaveState(lus LayerUploadState) error
|
||||||
|
DeleteState(uuid string) error
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ LayerUpload = &layerUploadController{}
|
||||||
|
|
||||||
|
// Name of the repository under which the layer will be linked.
|
||||||
|
func (luc *layerUploadController) Name() string {
|
||||||
|
return luc.LayerUploadState.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// UUID returns the identifier for this upload.
|
||||||
|
func (luc *layerUploadController) UUID() string {
|
||||||
|
return luc.LayerUploadState.UUID
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset returns the position of the last byte written to this layer.
|
||||||
|
func (luc *layerUploadController) Offset() int64 {
|
||||||
|
return luc.LayerUploadState.Offset
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finish marks the upload as completed, returning a valid handle to the
|
||||||
|
// uploaded layer. The final size and checksum are validated against the
|
||||||
|
// contents of the uploaded layer. The checksum should be provided in the
|
||||||
|
// format <algorithm>:<hex digest>.
|
||||||
|
func (luc *layerUploadController) Finish(size int64, digest digest.Digest) (Layer, error) {
|
||||||
|
|
||||||
|
// This section is going to be pretty ugly now. We will have to read the
|
||||||
|
// file twice. First, to get the tarsum and checksum. When those are
|
||||||
|
// available, and validated, we will upload it to the blob store and link
|
||||||
|
// it into the repository. In the future, we need to use resumable hash
|
||||||
|
// calculations for tarsum and checksum that can be calculated during the
|
||||||
|
// upload. This will allow us to cut the data directly into a temporary
|
||||||
|
// directory in the storage backend.
|
||||||
|
|
||||||
|
fp, err := luc.file()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// Cleanup?
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
digest, err = luc.validateLayer(fp, size, digest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if nn, err := luc.writeLayer(fp, digest); err != nil {
|
||||||
|
// Cleanup?
|
||||||
|
return nil, err
|
||||||
|
} else if size >= 0 && nn != size {
|
||||||
|
// TODO(stevvooe): Short write. Will have to delete the location and
|
||||||
|
// report an error. This error needs to be reported to the client.
|
||||||
|
return nil, fmt.Errorf("short write writing layer")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Yes! We have written some layer data. Let's make it visible. Link the
|
||||||
|
// layer blob into the repository.
|
||||||
|
if err := luc.linkLayer(digest); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ok, the upload has completed and finished. Delete the state.
|
||||||
|
if err := luc.uploadStore.DeleteState(luc.UUID()); err != nil {
|
||||||
|
// Can we ignore this error?
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return luc.layerStore.Fetch(luc.Name(), digest)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel the layer upload process.
|
||||||
|
func (luc *layerUploadController) Cancel() error {
|
||||||
|
if err := luc.layerStore.uploadStore.DeleteState(luc.UUID()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return luc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (luc *layerUploadController) Write(p []byte) (int, error) {
|
||||||
|
wr, err := luc.file()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := wr.Write(p)
|
||||||
|
|
||||||
|
// Because we expect the reported offset to be consistent with the storage
|
||||||
|
// state, unfortunately, we need to Sync on every call to write.
|
||||||
|
if err := wr.Sync(); err != nil {
|
||||||
|
// Effectively, ignore the write state if the Sync fails. Report that
|
||||||
|
// no bytes were written and seek back to the starting offset.
|
||||||
|
offset, seekErr := wr.Seek(luc.Offset(), os.SEEK_SET)
|
||||||
|
if seekErr != nil {
|
||||||
|
// What do we do here? Quite disasterous.
|
||||||
|
luc.reset()
|
||||||
|
|
||||||
|
return 0, fmt.Errorf("multiple errors encounterd after Sync + Seek: %v then %v", err, seekErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset != luc.Offset() {
|
||||||
|
return 0, fmt.Errorf("unexpected offset after seek")
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
luc.LayerUploadState.Offset += int64(n)
|
||||||
|
|
||||||
|
if err := luc.uploadStore.SaveState(luc.LayerUploadState); err != nil {
|
||||||
|
// TODO(stevvooe): This failure case may require more thought.
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (luc *layerUploadController) Close() error {
|
||||||
|
if luc.err != nil {
|
||||||
|
return luc.err
|
||||||
|
}
|
||||||
|
|
||||||
|
if luc.fp != nil {
|
||||||
|
luc.err = luc.fp.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
return luc.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (luc *layerUploadController) file() (layerFile, error) {
|
||||||
|
if luc.fp != nil {
|
||||||
|
return luc.fp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fp, err := luc.uploadStore.Open(luc.UUID())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(stevvooe): We may need a more aggressive check here to ensure that
|
||||||
|
// the file length is equal to the current offset. We may want to sync the
|
||||||
|
// offset before return the layer upload to the client so it can be
|
||||||
|
// validated before proceeding with any writes.
|
||||||
|
|
||||||
|
// Seek to the current layer offset for good measure.
|
||||||
|
if _, err = fp.Seek(luc.Offset(), os.SEEK_SET); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
luc.fp = fp
|
||||||
|
|
||||||
|
return luc.fp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset closes and drops the current writer.
|
||||||
|
func (luc *layerUploadController) reset() {
|
||||||
|
if luc.fp != nil {
|
||||||
|
luc.fp.Close()
|
||||||
|
luc.fp = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateLayer runs several checks on the layer file to ensure its validity.
|
||||||
|
// This is currently very expensive and relies on fast io and fast seek on the
|
||||||
|
// local host. If successful, the latest digest is returned, which should be
|
||||||
|
// used over the passed in value.
|
||||||
|
func (luc *layerUploadController) validateLayer(fp layerFile, size int64, dgst digest.Digest) (digest.Digest, error) {
|
||||||
|
// First, check the incoming tarsum version of the digest.
|
||||||
|
version, err := tarsum.GetVersionFromTarsum(dgst.String())
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(stevvooe): Should we push this down into the digest type?
|
||||||
|
switch version {
|
||||||
|
case tarsum.Version1:
|
||||||
|
default:
|
||||||
|
// version 0 and dev, for now.
|
||||||
|
return "", ErrLayerTarSumVersionUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
digestVerifier := digest.NewDigestVerifier(dgst)
|
||||||
|
lengthVerifier := digest.NewLengthVerifier(size)
|
||||||
|
|
||||||
|
// First, seek to the end of the file, checking the size is as expected.
|
||||||
|
end, err := fp.Seek(0, os.SEEK_END)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only check size if it is greater than
|
||||||
|
if size >= 0 && end != size {
|
||||||
|
// Fast path length check.
|
||||||
|
return "", ErrLayerInvalidSize{Size: size}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now seek back to start and take care of the digest.
|
||||||
|
if _, err := fp.Seek(0, os.SEEK_SET); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
tr := io.TeeReader(fp, digestVerifier)
|
||||||
|
|
||||||
|
// Only verify the size if a positive size argument has been passed.
|
||||||
|
if size >= 0 {
|
||||||
|
tr = io.TeeReader(tr, lengthVerifier)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(stevvooe): This is one of the places we need a Digester write
|
||||||
|
// sink. Instead, its read driven. This migth be okay.
|
||||||
|
|
||||||
|
// Calculate an updated digest with the latest version.
|
||||||
|
dgst, err = digest.FromReader(tr)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if size >= 0 && !lengthVerifier.Verified() {
|
||||||
|
return "", ErrLayerInvalidSize{Size: size}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !digestVerifier.Verified() {
|
||||||
|
return "", ErrLayerInvalidDigest{FSLayer{BlobSum: dgst}}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dgst, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeLayer actually writes the the layer file into its final destination,
|
||||||
|
// identified by dgst. The layer should be validated before commencing the
|
||||||
|
// write.
|
||||||
|
func (luc *layerUploadController) writeLayer(fp layerFile, dgst digest.Digest) (nn int64, err error) {
|
||||||
|
blobPath, err := luc.layerStore.pathMapper.path(blobPathSpec{
|
||||||
|
digest: dgst,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for existence
|
||||||
|
if _, err := luc.layerStore.driver.Stat(blobPath); err != nil {
|
||||||
|
// TODO(stevvooe): This check is kind of problematic and very racy.
|
||||||
|
switch err := err.(type) {
|
||||||
|
case storagedriver.PathNotFoundError:
|
||||||
|
break // ensure that it doesn't exist.
|
||||||
|
default:
|
||||||
|
// TODO(stevvooe): This isn't actually an error: the blob store is
|
||||||
|
// content addressable and we should just use this to ensure we
|
||||||
|
// have it written. Although, we do need to verify that the
|
||||||
|
// content that is there is the correct length.
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek our local layer file back now.
|
||||||
|
if _, err := fp.Seek(0, os.SEEK_SET); err != nil {
|
||||||
|
// Cleanup?
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Okay: we can write the file to the blob store.
|
||||||
|
return luc.layerStore.driver.WriteStream(blobPath, 0, fp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// linkLayer links a valid, written layer blob into the registry under the
|
||||||
|
// named repository for the upload controller.
|
||||||
|
func (luc *layerUploadController) linkLayer(digest digest.Digest) error {
|
||||||
|
layerLinkPath, err := luc.layerStore.pathMapper.path(layerLinkPathSpec{
|
||||||
|
name: luc.Name(),
|
||||||
|
digest: digest,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return luc.layerStore.driver.PutContent(layerLinkPath, []byte(digest))
|
||||||
|
}
|
||||||
|
|
||||||
|
// localFSLayerUploadStore implements a local layerUploadStore. There are some
|
||||||
|
// complexities around hashsums that make round tripping to the storage
|
||||||
|
// backend problematic, so we'll store and read locally for now. By GO-beta,
|
||||||
|
// this should be fully implemented on top of the backend storagedriver.
|
||||||
|
//
|
||||||
|
// For now, the directory layout is as follows:
|
||||||
|
//
|
||||||
|
// /<temp dir>/registry-layer-upload/
|
||||||
|
// <uuid>/
|
||||||
|
// -> state.json
|
||||||
|
// -> data
|
||||||
|
//
|
||||||
|
// Each upload, identified by uuid, has its own directory with a state file
|
||||||
|
// and a data file. The state file has a json representation of the current
|
||||||
|
// state. The data file is the in-progress upload data.
|
||||||
|
type localFSLayerUploadStore struct {
|
||||||
|
root string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTemporaryLocalFSLayerUploadStore() (layerUploadStore, error) {
|
||||||
|
path, err := ioutil.TempDir("", "registry-layer-upload")
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &localFSLayerUploadStore{
|
||||||
|
root: path,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llufs *localFSLayerUploadStore) New(name string) (LayerUploadState, error) {
|
||||||
|
lus := LayerUploadState{
|
||||||
|
Name: name,
|
||||||
|
UUID: uuid.New(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Mkdir(llufs.path(lus.UUID, ""), 0755); err != nil {
|
||||||
|
return lus, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := llufs.SaveState(lus); err != nil {
|
||||||
|
return lus, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return lus, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llufs *localFSLayerUploadStore) Open(uuid string) (layerFile, error) {
|
||||||
|
fp, err := os.OpenFile(llufs.path(uuid, "data"), os.O_CREATE|os.O_APPEND|os.O_RDWR, 0644)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llufs *localFSLayerUploadStore) GetState(uuid string) (LayerUploadState, error) {
|
||||||
|
// TODO(stevvoe): Storing this state on the local file system is an
|
||||||
|
// intermediate stop gap. This technique is unlikely to handle any kind of
|
||||||
|
// concurrency very well.
|
||||||
|
|
||||||
|
var lus LayerUploadState
|
||||||
|
fp, err := os.Open(llufs.path(uuid, "state.json"))
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return lus, ErrLayerUploadUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
return lus, err
|
||||||
|
}
|
||||||
|
defer fp.Close()
|
||||||
|
|
||||||
|
dec := json.NewDecoder(fp)
|
||||||
|
if err := dec.Decode(&lus); err != nil {
|
||||||
|
return lus, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return lus, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llufs *localFSLayerUploadStore) SaveState(lus LayerUploadState) error {
|
||||||
|
p, err := json.Marshal(lus)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(llufs.path(lus.UUID, "state.json"), p, 0644)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return ErrLayerUploadUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llufs *localFSLayerUploadStore) DeleteState(uuid string) error {
|
||||||
|
if err := os.RemoveAll(llufs.path(uuid, "")); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return ErrLayerUploadUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llufs *localFSLayerUploadStore) path(uuid, file string) string {
|
||||||
|
return filepath.Join(llufs.root, uuid, file)
|
||||||
|
}
|
173
storage/manifest.go
Normal file
173
storage/manifest.go
Normal file
|
@ -0,0 +1,173 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
"github.com/docker/libtrust"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Versioned provides a struct with just the manifest schemaVersion. Incoming
|
||||||
|
// content with unknown schema version can be decoded against this struct to
|
||||||
|
// check the version.
|
||||||
|
type Versioned struct {
|
||||||
|
// SchemaVersion is the image manifest schema that this image follows
|
||||||
|
SchemaVersion int `json:"schemaVersion"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manifest provides the base accessible fields for working with V2 image
|
||||||
|
// format in the registry.
|
||||||
|
type Manifest struct {
|
||||||
|
Versioned
|
||||||
|
|
||||||
|
// Name is the name of the image's repository
|
||||||
|
Name string `json:"name"`
|
||||||
|
|
||||||
|
// Tag is the tag of the image specified by this manifest
|
||||||
|
Tag string `json:"tag"`
|
||||||
|
|
||||||
|
// Architecture is the host architecture on which this image is intended to
|
||||||
|
// run
|
||||||
|
Architecture string `json:"architecture"`
|
||||||
|
|
||||||
|
// FSLayers is a list of filesystem layer blobSums contained in this image
|
||||||
|
FSLayers []FSLayer `json:"fsLayers"`
|
||||||
|
|
||||||
|
// History is a list of unstructured historical data for v1 compatibility
|
||||||
|
History []ManifestHistory `json:"history"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign signs the manifest with the provided private key, returning a
|
||||||
|
// SignedManifest. This typically won't be used within the registry, except
|
||||||
|
// for testing.
|
||||||
|
func (m *Manifest) Sign(pk libtrust.PrivateKey) (*SignedManifest, error) {
|
||||||
|
p, err := json.MarshalIndent(m, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
js, err := libtrust.NewJSONSignature(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := js.Sign(pk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pretty, err := js.PrettySignature("signatures")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &SignedManifest{
|
||||||
|
Manifest: *m,
|
||||||
|
Raw: pretty,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignWithChain signs the manifest with the given private key and x509 chain.
|
||||||
|
// The public key of the first element in the chain must be the public key
|
||||||
|
// corresponding with the sign key.
|
||||||
|
func (m *Manifest) SignWithChain(key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) {
|
||||||
|
p, err := json.MarshalIndent(m, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
js, err := libtrust.NewJSONSignature(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := js.SignWithChain(key, chain); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pretty, err := js.PrettySignature("signatures")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &SignedManifest{
|
||||||
|
Manifest: *m,
|
||||||
|
Raw: pretty,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignedManifest provides an envelope for a signed image manifest, including
|
||||||
|
// the format sensitive raw bytes. It contains fields to
|
||||||
|
type SignedManifest struct {
|
||||||
|
Manifest
|
||||||
|
|
||||||
|
// Raw is the byte representation of the ImageManifest, used for signature
|
||||||
|
// verification. The value of Raw must be used directly during
|
||||||
|
// serialization, or the signature check will fail. The manifest byte
|
||||||
|
// representation cannot change or it will have to be re-signed.
|
||||||
|
Raw []byte `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify verifies the signature of the signed manifest returning the public
|
||||||
|
// keys used during signing.
|
||||||
|
func (sm *SignedManifest) Verify() ([]libtrust.PublicKey, error) {
|
||||||
|
js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures")
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithField("err", err).Debugf("(*SignedManifest).Verify")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return js.Verify()
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyChains verifies the signature of the signed manifest against the
|
||||||
|
// certificate pool returning the list of verified chains. Signatures without
|
||||||
|
// an x509 chain are not checked.
|
||||||
|
func (sm *SignedManifest) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) {
|
||||||
|
js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return js.VerifyChains(ca)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON populates a new ImageManifest struct from JSON data.
|
||||||
|
func (sm *SignedManifest) UnmarshalJSON(b []byte) error {
|
||||||
|
var manifest Manifest
|
||||||
|
if err := json.Unmarshal(b, &manifest); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sm.Manifest = manifest
|
||||||
|
sm.Raw = make([]byte, len(b), len(b))
|
||||||
|
copy(sm.Raw, b)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner
|
||||||
|
// contents. Applications requiring a marshaled signed manifest should simply
|
||||||
|
// use Raw directly, since the the content produced by json.Marshal will
|
||||||
|
// compacted and will fail signature checks.
|
||||||
|
func (sm *SignedManifest) MarshalJSON() ([]byte, error) {
|
||||||
|
if len(sm.Raw) > 0 {
|
||||||
|
return sm.Raw, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the raw data is not available, just dump the inner content.
|
||||||
|
return json.Marshal(&sm.Manifest)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FSLayer is a container struct for BlobSums defined in an image manifest
|
||||||
|
type FSLayer struct {
|
||||||
|
// BlobSum is the tarsum of the referenced filesystem image layer
|
||||||
|
BlobSum digest.Digest `json:"blobSum"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ManifestHistory stores unstructured v1 compatibility information
|
||||||
|
type ManifestHistory struct {
|
||||||
|
// V1Compatibility is the raw v1 compatibility information
|
||||||
|
V1Compatibility string `json:"v1Compatibility"`
|
||||||
|
}
|
158
storage/manifest_test.go
Normal file
158
storage/manifest_test.go
Normal file
|
@ -0,0 +1,158 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/libtrust"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/inmemory"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestManifestStorage(t *testing.T) {
|
||||||
|
driver := inmemory.New()
|
||||||
|
ms := &manifestStore{
|
||||||
|
driver: driver,
|
||||||
|
pathMapper: &pathMapper{
|
||||||
|
root: "/storage/testing",
|
||||||
|
version: storagePathVersion,
|
||||||
|
},
|
||||||
|
layerService: newMockedLayerService(),
|
||||||
|
}
|
||||||
|
|
||||||
|
name := "foo/bar"
|
||||||
|
tag := "thetag"
|
||||||
|
|
||||||
|
exists, err := ms.Exists(name, tag)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error checking manifest existence: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exists {
|
||||||
|
t.Fatalf("manifest should not exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := ms.Get(name, tag); true {
|
||||||
|
switch err.(type) {
|
||||||
|
case ErrUnknownManifest:
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
t.Fatalf("expected manifest unknown error: %#v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest := Manifest{
|
||||||
|
Versioned: Versioned{
|
||||||
|
SchemaVersion: 1,
|
||||||
|
},
|
||||||
|
Name: name,
|
||||||
|
Tag: tag,
|
||||||
|
FSLayers: []FSLayer{
|
||||||
|
{
|
||||||
|
BlobSum: "asdf",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlobSum: "qwer",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pk, err := libtrust.GenerateECP256PrivateKey()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error generating private key: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sm, err := manifest.Sign(pk)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error signing manifest: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ms.Put(name, tag, sm)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected errors putting manifest")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(stevvooe): We expect errors describing all of the missing layers.
|
||||||
|
|
||||||
|
ms.layerService.(*mockedExistenceLayerService).add(name, "asdf")
|
||||||
|
ms.layerService.(*mockedExistenceLayerService).add(name, "qwer")
|
||||||
|
|
||||||
|
if err = ms.Put(name, tag, sm); err != nil {
|
||||||
|
t.Fatalf("unexpected error putting manifest: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
exists, err = ms.Exists(name, tag)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error checking manifest existence: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
t.Fatalf("manifest should exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
fetchedManifest, err := ms.Get(name, tag)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error fetching manifest: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(fetchedManifest, sm) {
|
||||||
|
t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest, sm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grabs the tags and check that this tagged manifest is present
|
||||||
|
tags, err := ms.Tags(name)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error fetching tags: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tags) != 1 {
|
||||||
|
t.Fatalf("unexpected tags returned: %v", tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tags[0] != tag {
|
||||||
|
t.Fatalf("unexpected tag found in tags: %v != %v", tags, []string{tag})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type layerKey struct {
|
||||||
|
name string
|
||||||
|
digest digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockedExistenceLayerService struct {
|
||||||
|
exists map[layerKey]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockedLayerService() *mockedExistenceLayerService {
|
||||||
|
return &mockedExistenceLayerService{
|
||||||
|
exists: make(map[layerKey]struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ LayerService = &mockedExistenceLayerService{}
|
||||||
|
|
||||||
|
func (mels *mockedExistenceLayerService) add(name string, digest digest.Digest) {
|
||||||
|
mels.exists[layerKey{name: name, digest: digest}] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mels *mockedExistenceLayerService) remove(name string, digest digest.Digest) {
|
||||||
|
delete(mels.exists, layerKey{name: name, digest: digest})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mels *mockedExistenceLayerService) Exists(name string, digest digest.Digest) (bool, error) {
|
||||||
|
_, ok := mels.exists[layerKey{name: name, digest: digest}]
|
||||||
|
return ok, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mockedExistenceLayerService) Fetch(name string, digest digest.Digest) (Layer, error) {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mockedExistenceLayerService) Upload(name string) (LayerUpload, error) {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mockedExistenceLayerService) Resume(uuid string) (LayerUpload, error) {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
239
storage/manifeststore.go
Normal file
239
storage/manifeststore.go
Normal file
|
@ -0,0 +1,239 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
"github.com/docker/libtrust"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrUnknownRepository is returned if the named repository is not known by
|
||||||
|
// the registry.
|
||||||
|
type ErrUnknownRepository struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err ErrUnknownRepository) Error() string {
|
||||||
|
return fmt.Sprintf("unknown respository name=%s", err.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrUnknownManifest is returned if the manifest is not known by the
|
||||||
|
// registry.
|
||||||
|
type ErrUnknownManifest struct {
|
||||||
|
Name string
|
||||||
|
Tag string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err ErrUnknownManifest) Error() string {
|
||||||
|
return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrManifestUnverified is returned when the registry is unable to verify
|
||||||
|
// the manifest.
|
||||||
|
type ErrManifestUnverified struct{}
|
||||||
|
|
||||||
|
func (ErrManifestUnverified) Error() string {
|
||||||
|
return fmt.Sprintf("unverified manifest")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrManifestVerification provides a type to collect errors encountered
|
||||||
|
// during manifest verification. Currently, it accepts errors of all types,
|
||||||
|
// but it may be narrowed to those involving manifest verification.
|
||||||
|
type ErrManifestVerification []error
|
||||||
|
|
||||||
|
func (errs ErrManifestVerification) Error() string {
|
||||||
|
var parts []string
|
||||||
|
for _, err := range errs {
|
||||||
|
parts = append(parts, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ","))
|
||||||
|
}
|
||||||
|
|
||||||
|
type manifestStore struct {
|
||||||
|
driver storagedriver.StorageDriver
|
||||||
|
pathMapper *pathMapper
|
||||||
|
layerService LayerService
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ ManifestService = &manifestStore{}
|
||||||
|
|
||||||
|
func (ms *manifestStore) Tags(name string) ([]string, error) {
|
||||||
|
p, err := ms.pathMapper.path(manifestTagsPath{
|
||||||
|
name: name,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var tags []string
|
||||||
|
entries, err := ms.driver.List(p)
|
||||||
|
if err != nil {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case storagedriver.PathNotFoundError:
|
||||||
|
return nil, ErrUnknownRepository{Name: name}
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
_, filename := path.Split(entry)
|
||||||
|
|
||||||
|
tags = append(tags, filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tags, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *manifestStore) Exists(name, tag string) (bool, error) {
|
||||||
|
p, err := ms.path(name, tag)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fi, err := ms.driver.Stat(p)
|
||||||
|
if err != nil {
|
||||||
|
switch err.(type) {
|
||||||
|
case storagedriver.PathNotFoundError:
|
||||||
|
return false, nil
|
||||||
|
default:
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi.IsDir() {
|
||||||
|
return false, fmt.Errorf("unexpected directory at path: %v, name=%s tag=%s", p, name, tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi.Size() == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *manifestStore) Get(name, tag string) (*SignedManifest, error) {
|
||||||
|
p, err := ms.path(name, tag)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := ms.driver.GetContent(p)
|
||||||
|
if err != nil {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError:
|
||||||
|
return nil, ErrUnknownManifest{Name: name, Tag: tag}
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var manifest SignedManifest
|
||||||
|
|
||||||
|
if err := json.Unmarshal(content, &manifest); err != nil {
|
||||||
|
// TODO(stevvooe): Corrupted manifest error?
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(stevvooe): Verify the manifest here?
|
||||||
|
|
||||||
|
return &manifest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *manifestStore) Put(name, tag string, manifest *SignedManifest) error {
|
||||||
|
p, err := ms.path(name, tag)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ms.verifyManifest(name, tag, manifest); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(stevvooe): Should we get old manifest first? Perhaps, write, then
|
||||||
|
// move to ensure a valid manifest?
|
||||||
|
|
||||||
|
return ms.driver.PutContent(p, manifest.Raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *manifestStore) Delete(name, tag string) error {
|
||||||
|
p, err := ms.path(name, tag)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ms.driver.Delete(p); err != nil {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError:
|
||||||
|
return ErrUnknownManifest{Name: name, Tag: tag}
|
||||||
|
default:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *manifestStore) path(name, tag string) (string, error) {
|
||||||
|
return ms.pathMapper.path(manifestPathSpec{
|
||||||
|
name: name,
|
||||||
|
tag: tag,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *manifestStore) verifyManifest(name, tag string, manifest *SignedManifest) error {
|
||||||
|
// TODO(stevvooe): This verification is present here, but this needs to be
|
||||||
|
// lifted out of the storage infrastructure and moved into a package
|
||||||
|
// oriented towards defining verifiers and reporting them with
|
||||||
|
// granularity.
|
||||||
|
|
||||||
|
var errs ErrManifestVerification
|
||||||
|
if manifest.Name != name {
|
||||||
|
// TODO(stevvooe): This needs to be an exported error
|
||||||
|
errs = append(errs, fmt.Errorf("name does not match manifest name"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if manifest.Tag != tag {
|
||||||
|
// TODO(stevvooe): This needs to be an exported error.
|
||||||
|
errs = append(errs, fmt.Errorf("tag does not match manifest tag"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(stevvooe): These pubkeys need to be checked with either Verify or
|
||||||
|
// VerifyWithChains. We need to define the exact source of the CA.
|
||||||
|
// Perhaps, its a configuration value injected into manifest store.
|
||||||
|
|
||||||
|
if _, err := manifest.Verify(); err != nil {
|
||||||
|
switch err {
|
||||||
|
case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey:
|
||||||
|
errs = append(errs, ErrManifestUnverified{})
|
||||||
|
default:
|
||||||
|
if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust
|
||||||
|
errs = append(errs, ErrManifestUnverified{})
|
||||||
|
} else {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fsLayer := range manifest.FSLayers {
|
||||||
|
exists, err := ms.layerService.Exists(name, fsLayer.BlobSum)
|
||||||
|
if err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
errs = append(errs, ErrUnknownLayer{FSLayer: fsLayer})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errs) != 0 {
|
||||||
|
// TODO(stevvooe): These need to be recoverable by a caller.
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
207
storage/paths.go
Normal file
207
storage/paths.go
Normal file
|
@ -0,0 +1,207 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/common"
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const storagePathVersion = "v2"
|
||||||
|
|
||||||
|
// pathMapper maps paths based on "object names" and their ids. The "object
|
||||||
|
// names" mapped by pathMapper are internal to the storage system.
|
||||||
|
//
|
||||||
|
// The path layout in the storage backend will be roughly as follows:
|
||||||
|
//
|
||||||
|
// <root>/v2
|
||||||
|
// -> repositories/
|
||||||
|
// -><name>/
|
||||||
|
// -> manifests/
|
||||||
|
// <manifests by tag name>
|
||||||
|
// -> layers/
|
||||||
|
// <layer links to blob store>
|
||||||
|
// -> blob/<algorithm>
|
||||||
|
// <split directory content addressable storage>
|
||||||
|
//
|
||||||
|
// There are few important components to this path layout. First, we have the
|
||||||
|
// repository store identified by name. This contains the image manifests and
|
||||||
|
// a layer store with links to CAS blob ids. Outside of the named repo area,
|
||||||
|
// we have the the blob store. It contains the actual layer data and any other
|
||||||
|
// data that can be referenced by a CAS id.
|
||||||
|
//
|
||||||
|
// We cover the path formats implemented by this path mapper below.
|
||||||
|
//
|
||||||
|
// manifestPathSpec: <root>/v2/repositories/<name>/manifests/<tag>
|
||||||
|
// layerLinkPathSpec: <root>/v2/repositories/<name>/layers/tarsum/<tarsum version>/<tarsum hash alg>/<tarsum hash>
|
||||||
|
// blobPathSpec: <root>/v2/blob/<algorithm>/<first two hex bytes of digest>/<hex digest>
|
||||||
|
//
|
||||||
|
// For more information on the semantic meaning of each path and their
|
||||||
|
// contents, please see the path spec documentation.
|
||||||
|
type pathMapper struct {
|
||||||
|
root string
|
||||||
|
version string // should be a constant?
|
||||||
|
}
|
||||||
|
|
||||||
|
// path returns the path identified by spec.
|
||||||
|
func (pm *pathMapper) path(spec pathSpec) (string, error) {
|
||||||
|
|
||||||
|
// Switch on the path object type and return the appropriate path. At
|
||||||
|
// first glance, one may wonder why we don't use an interface to
|
||||||
|
// accomplish this. By keep the formatting separate from the pathSpec, we
|
||||||
|
// keep separate the path generation componentized. These specs could be
|
||||||
|
// passed to a completely different mapper implementation and generate a
|
||||||
|
// different set of paths.
|
||||||
|
//
|
||||||
|
// For example, imagine migrating from one backend to the other: one could
|
||||||
|
// build a filesystem walker that converts a string path in one version,
|
||||||
|
// to an intermediate path object, than can be consumed and mapped by the
|
||||||
|
// other version.
|
||||||
|
|
||||||
|
rootPrefix := []string{pm.root, pm.version}
|
||||||
|
repoPrefix := append(rootPrefix, "repositories")
|
||||||
|
|
||||||
|
switch v := spec.(type) {
|
||||||
|
case manifestTagsPath:
|
||||||
|
return path.Join(append(repoPrefix, v.name, "manifests")...), nil
|
||||||
|
case manifestPathSpec:
|
||||||
|
// TODO(sday): May need to store manifest by architecture.
|
||||||
|
return path.Join(append(repoPrefix, v.name, "manifests", v.tag)...), nil
|
||||||
|
case layerLinkPathSpec:
|
||||||
|
components, err := digestPathComoponents(v.digest)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// For now, only map tarsum paths.
|
||||||
|
if components[0] != "tarsum" {
|
||||||
|
// Only tarsum is supported, for now
|
||||||
|
return "", fmt.Errorf("unsupported content digest: %v", v.digest)
|
||||||
|
}
|
||||||
|
|
||||||
|
layerLinkPathComponents := append(repoPrefix, v.name, "layers")
|
||||||
|
|
||||||
|
return path.Join(append(layerLinkPathComponents, components...)...), nil
|
||||||
|
case blobPathSpec:
|
||||||
|
components, err := digestPathComoponents(v.digest)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// For now, only map tarsum paths.
|
||||||
|
if components[0] != "tarsum" {
|
||||||
|
// Only tarsum is supported, for now
|
||||||
|
return "", fmt.Errorf("unsupported content digest: %v", v.digest)
|
||||||
|
}
|
||||||
|
|
||||||
|
blobPathPrefix := append(rootPrefix, "blob")
|
||||||
|
return path.Join(append(blobPathPrefix, components...)...), nil
|
||||||
|
default:
|
||||||
|
// TODO(sday): This is an internal error. Ensure it doesn't escape (panic?).
|
||||||
|
return "", fmt.Errorf("unknown path spec: %#v", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pathSpec is a type to mark structs as path specs. There is no
|
||||||
|
// implementation because we'd like to keep the specs and the mappers
|
||||||
|
// decoupled.
|
||||||
|
type pathSpec interface {
|
||||||
|
pathSpec()
|
||||||
|
}
|
||||||
|
|
||||||
|
// manifestTagsPath describes the path elements required to point to the
|
||||||
|
// directory with all manifest tags under the repository.
|
||||||
|
type manifestTagsPath struct {
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (manifestTagsPath) pathSpec() {}
|
||||||
|
|
||||||
|
// manifestPathSpec describes the path elements used to build a manifest path.
|
||||||
|
// The contents should be a signed manifest json file.
|
||||||
|
type manifestPathSpec struct {
|
||||||
|
name string
|
||||||
|
tag string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (manifestPathSpec) pathSpec() {}
|
||||||
|
|
||||||
|
// layerLink specifies a path for a layer link, which is a file with a blob
|
||||||
|
// id. The layer link will contain a content addressable blob id reference
|
||||||
|
// into the blob store. The format of the contents is as follows:
|
||||||
|
//
|
||||||
|
// <algorithm>:<hex digest of layer data>
|
||||||
|
//
|
||||||
|
// The following example of the file contents is more illustrative:
|
||||||
|
//
|
||||||
|
// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36
|
||||||
|
//
|
||||||
|
// This says indicates that there is a blob with the id/digest, calculated via
|
||||||
|
// sha256 that can be fetched from the blob store.
|
||||||
|
type layerLinkPathSpec struct {
|
||||||
|
name string
|
||||||
|
digest digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (layerLinkPathSpec) pathSpec() {}
|
||||||
|
|
||||||
|
// blobAlgorithmReplacer does some very simple path sanitization for user
|
||||||
|
// input. Mostly, this is to provide some heirachry for tarsum digests. Paths
|
||||||
|
// should be "safe" before getting this far due to strict digest requirements
|
||||||
|
// but we can add further path conversion here, if needed.
|
||||||
|
var blobAlgorithmReplacer = strings.NewReplacer(
|
||||||
|
"+", "/",
|
||||||
|
".", "/",
|
||||||
|
";", "/",
|
||||||
|
)
|
||||||
|
|
||||||
|
// blobPath contains the path for the registry global blob store. For now,
|
||||||
|
// this contains layer data, exclusively.
|
||||||
|
type blobPathSpec struct {
|
||||||
|
digest digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (blobPathSpec) pathSpec() {}
|
||||||
|
|
||||||
|
// digestPathComoponents provides a consistent path breakdown for a given
|
||||||
|
// digest. For a generic digest, it will be as follows:
|
||||||
|
//
|
||||||
|
// <algorithm>/<first two bytes of digest>/<full digest>
|
||||||
|
//
|
||||||
|
// Most importantly, for tarsum, the layout looks like this:
|
||||||
|
//
|
||||||
|
// tarsum/<version>/<digest algorithm>/<first two bytes of digest>/<full digest>
|
||||||
|
//
|
||||||
|
// This is slightly specialized to store an extra version path for version 0
|
||||||
|
// tarsums.
|
||||||
|
func digestPathComoponents(dgst digest.Digest) ([]string, error) {
|
||||||
|
if err := dgst.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
algorithm := blobAlgorithmReplacer.Replace(dgst.Algorithm())
|
||||||
|
hex := dgst.Hex()
|
||||||
|
prefix := []string{algorithm}
|
||||||
|
suffix := []string{
|
||||||
|
hex[:2], // Breaks heirarchy up.
|
||||||
|
hex,
|
||||||
|
}
|
||||||
|
|
||||||
|
if tsi, err := common.ParseTarSum(dgst.String()); err == nil {
|
||||||
|
// We have a tarsum!
|
||||||
|
version := tsi.Version
|
||||||
|
if version == "" {
|
||||||
|
version = "v0"
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix = []string{
|
||||||
|
"tarsum",
|
||||||
|
version,
|
||||||
|
tsi.Algorithm,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return append(prefix, suffix...), nil
|
||||||
|
}
|
61
storage/paths_test.go
Normal file
61
storage/paths_test.go
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPathMapper(t *testing.T) {
|
||||||
|
pm := &pathMapper{
|
||||||
|
root: "/pathmapper-test",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testcase := range []struct {
|
||||||
|
spec pathSpec
|
||||||
|
expected string
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
spec: manifestPathSpec{
|
||||||
|
name: "foo/bar",
|
||||||
|
tag: "thetag",
|
||||||
|
},
|
||||||
|
expected: "/pathmapper-test/repositories/foo/bar/manifests/thetag",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: layerLinkPathSpec{
|
||||||
|
name: "foo/bar",
|
||||||
|
digest: digest.Digest("tarsum.v1+test:abcdef"),
|
||||||
|
},
|
||||||
|
expected: "/pathmapper-test/repositories/foo/bar/layers/tarsum/v1/test/ab/abcdef",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: blobPathSpec{
|
||||||
|
digest: digest.Digest("tarsum.dev+sha512:abcdefabcdefabcdef908909909"),
|
||||||
|
},
|
||||||
|
expected: "/pathmapper-test/blob/tarsum/dev/sha512/ab/abcdefabcdefabcdef908909909",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: blobPathSpec{
|
||||||
|
digest: digest.Digest("tarsum.v1+sha256:abcdefabcdefabcdef908909909"),
|
||||||
|
},
|
||||||
|
expected: "/pathmapper-test/blob/tarsum/v1/sha256/ab/abcdefabcdefabcdef908909909",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: blobPathSpec{
|
||||||
|
digest: digest.Digest("tarsum+sha256:abcdefabcdefabcdef908909909"),
|
||||||
|
},
|
||||||
|
expected: "/pathmapper-test/blob/tarsum/v0/sha256/ab/abcdefabcdefabcdef908909909",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
p, err := pm.path(testcase.spec)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p != testcase.expected {
|
||||||
|
t.Fatalf("unexpected path generated: %q != %q", p, testcase.expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
86
storage/services.go
Normal file
86
storage/services.go
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/docker-registry/digest"
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Services provides various services with application-level operations for
|
||||||
|
// use across backend storage drivers.
|
||||||
|
type Services struct {
|
||||||
|
driver storagedriver.StorageDriver
|
||||||
|
pathMapper *pathMapper
|
||||||
|
layerUploadStore layerUploadStore
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServices creates a new Services object to access docker objects stored
|
||||||
|
// in the underlying driver.
|
||||||
|
func NewServices(driver storagedriver.StorageDriver) *Services {
|
||||||
|
layerUploadStore, err := newTemporaryLocalFSLayerUploadStore()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// TODO(stevvooe): This failure needs to be understood in the context
|
||||||
|
// of the lifecycle of the services object, which is uncertain at this
|
||||||
|
// point.
|
||||||
|
panic("unable to allocate layerUploadStore: " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Services{
|
||||||
|
driver: driver,
|
||||||
|
pathMapper: &pathMapper{
|
||||||
|
// TODO(sday): This should be configurable.
|
||||||
|
root: "/docker/registry/",
|
||||||
|
version: storagePathVersion,
|
||||||
|
},
|
||||||
|
layerUploadStore: layerUploadStore,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Layers returns an instance of the LayerService. Instantiation is cheap and
|
||||||
|
// may be context sensitive in the future. The instance should be used similar
|
||||||
|
// to a request local.
|
||||||
|
func (ss *Services) Layers() LayerService {
|
||||||
|
return &layerStore{driver: ss.driver, pathMapper: ss.pathMapper, uploadStore: ss.layerUploadStore}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manifests returns an instance of ManifestService. Instantiation is cheap and
|
||||||
|
// may be context sensitive in the future. The instance should be used similar
|
||||||
|
// to a request local.
|
||||||
|
func (ss *Services) Manifests() ManifestService {
|
||||||
|
return &manifestStore{driver: ss.driver, pathMapper: ss.pathMapper, layerService: ss.Layers()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ManifestService provides operations on image manifests.
|
||||||
|
type ManifestService interface {
|
||||||
|
// Tags lists the tags under the named repository.
|
||||||
|
Tags(name string) ([]string, error)
|
||||||
|
|
||||||
|
// Exists returns true if the layer exists.
|
||||||
|
Exists(name, tag string) (bool, error)
|
||||||
|
|
||||||
|
// Get retrieves the named manifest, if it exists.
|
||||||
|
Get(name, tag string) (*SignedManifest, error)
|
||||||
|
|
||||||
|
// Put creates or updates the named manifest.
|
||||||
|
Put(name, tag string, manifest *SignedManifest) error
|
||||||
|
|
||||||
|
// Delete removes the named manifest, if it exists.
|
||||||
|
Delete(name, tag string) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// LayerService provides operations on layer files in a backend storage.
|
||||||
|
type LayerService interface {
|
||||||
|
// Exists returns true if the layer exists.
|
||||||
|
Exists(name string, digest digest.Digest) (bool, error)
|
||||||
|
|
||||||
|
// Fetch the layer identifed by TarSum.
|
||||||
|
Fetch(name string, digest digest.Digest) (Layer, error)
|
||||||
|
|
||||||
|
// Upload begins a layer upload to repository identified by name,
|
||||||
|
// returning a handle.
|
||||||
|
Upload(name string) (LayerUpload, error)
|
||||||
|
|
||||||
|
// Resume continues an in progress layer upload, returning the current
|
||||||
|
// state of the upload.
|
||||||
|
Resume(uuid string) (LayerUpload, error)
|
||||||
|
}
|
49
storagedriver/README.md
Normal file
49
storagedriver/README.md
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
Docker-Registry Storage Driver
|
||||||
|
==============================
|
||||||
|
|
||||||
|
This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers.
|
||||||
|
|
||||||
|
Provided Drivers
|
||||||
|
================
|
||||||
|
|
||||||
|
This storage driver package comes bundled with three default drivers.
|
||||||
|
|
||||||
|
1. filesystem: A local storage driver configured to use a directory tree in the local filesystem.
|
||||||
|
2. s3: A driver storing objects in an Amazon Simple Storage Solution (S3) bucket.
|
||||||
|
3. inmemory: A temporary storage driver using a local inmemory map. This exists solely for reference and testing.
|
||||||
|
|
||||||
|
Storage Driver API
|
||||||
|
==================
|
||||||
|
|
||||||
|
The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems.
|
||||||
|
|
||||||
|
Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key.
|
||||||
|
|
||||||
|
Storage drivers are intended (but not required) to be written in go, providing compile-time validation of the `storagedriver.StorageDriver` interface, although an IPC driver wrapper means that it is not required for drivers to be included in the compiled registry. The `storagedriver/ipc` package provides a client/server protocol for running storage drivers provided in external executables as a managed child server process.
|
||||||
|
|
||||||
|
Driver Selection and Configuration
|
||||||
|
==================================
|
||||||
|
|
||||||
|
The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package.
|
||||||
|
|
||||||
|
Storage driver factories may be registered by name using the `factory.Register` method, and then later invoked by calling `factory.Create` with a driver name and parameters map. If no driver is registered with the given name, this factory will attempt to find an executable storage driver with the executable name "registry-storage-\<driver name\>" and return an IPC storage driver wrapper managing the driver subprocess. If no such storage driver can be found, `factory.Create` will return an `InvalidStorageDriverError`.
|
||||||
|
|
||||||
|
Driver Contribution
|
||||||
|
===================
|
||||||
|
|
||||||
|
## Writing new storage drivers
|
||||||
|
To create a valid storage driver, one must implement the `storagedriver.StorageDriver` interface and make sure to expose this driver via the factory system and as a distributable IPC server executable.
|
||||||
|
|
||||||
|
### In-process drivers
|
||||||
|
Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase.
|
||||||
|
|
||||||
|
### Out-of-process drivers
|
||||||
|
As many users will run the registry as a pre-constructed docker container, storage drivers should also be distributable as IPC server executables. Drivers written in go should model the main method provided in `storagedriver/filesystem/registry-storage-filesystem/filesystem.go`. Parameters to IPC drivers will be provided as a JSON-serialized map in the first argument to the process. These parameters should be validated and then a blocking call to `ipc.StorageDriverServer` should be made with a new storage driver.
|
||||||
|
|
||||||
|
Out-of-process drivers must also implement the `ipc.IPCStorageDriver` interface, which exposes a `Version` check for the storage driver. This is used to validate storage driver api compatibility at driver load-time.
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
Storage driver test suites are provided in `storagedriver/testsuites/testsuites.go` and may be used for any storage driver written in go. Two methods are provided for registering test suites, `RegisterInProcessSuite` and `RegisterIPCSuite`, which run the same set of tests for the driver imported or managed over IPC respectively.
|
||||||
|
|
||||||
|
## Drivers written in other languages
|
||||||
|
Although storage drivers are strongly recommended to be written in go for consistency, compile-time validation, and support, the IPC framework allows for a level of language-agnosticism. Non-go drivers must implement the storage driver protocol by mimicing StorageDriverServer in `storagedriver/ipc/server.go`. As the IPC framework is a layer on top of [docker/libchan](https://github.com/docker/libchan), this currently limits language support to Java via [ndeloof/chan](https://github.com/ndeloof/jchan) and Javascript via [GraftJS/jschan](https://github.com/GraftJS/jschan), although contributions to the libchan project are welcome.
|
354
storagedriver/azure/azure.go
Normal file
354
storagedriver/azure/azure.go
Normal file
|
@ -0,0 +1,354 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// Package azure provides a storagedriver.StorageDriver implementation to
|
||||||
|
// store blobs in Microsoft Azure Blob Storage Service.
|
||||||
|
package azure
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/factory"
|
||||||
|
|
||||||
|
azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
const driverName = "azure"
|
||||||
|
|
||||||
|
const (
|
||||||
|
paramAccountName = "accountname"
|
||||||
|
paramAccountKey = "accountkey"
|
||||||
|
paramContainer = "container"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Driver is a storagedriver.StorageDriver implementation backed by
|
||||||
|
// Microsoft Azure Blob Storage Service.
|
||||||
|
type Driver struct {
|
||||||
|
client *azure.BlobStorageClient
|
||||||
|
container string
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
factory.Register(driverName, &azureDriverFactory{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type azureDriverFactory struct{}
|
||||||
|
|
||||||
|
func (factory *azureDriverFactory) Create(parameters map[string]string) (storagedriver.StorageDriver, error) {
|
||||||
|
return FromParameters(parameters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromParameters constructs a new Driver with a given parameters map.
|
||||||
|
func FromParameters(parameters map[string]string) (*Driver, error) {
|
||||||
|
accountName, ok := parameters[paramAccountName]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("No %s parameter provided", paramAccountName)
|
||||||
|
}
|
||||||
|
|
||||||
|
accountKey, ok := parameters[paramAccountKey]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("No %s parameter provided", paramAccountKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
container, ok := parameters[paramContainer]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("No %s parameter provided", paramContainer)
|
||||||
|
}
|
||||||
|
|
||||||
|
return New(accountName, accountKey, container)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New constructs a new Driver with the given Azure Storage Account credentials
|
||||||
|
func New(accountName, accountKey, container string) (*Driver, error) {
|
||||||
|
api, err := azure.NewBasicClient(accountName, accountKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
blobClient := api.GetBlobService()
|
||||||
|
|
||||||
|
// Create registry container
|
||||||
|
if _, err = blobClient.CreateContainerIfNotExists(container, azure.ContainerAccessTypePrivate); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Driver{
|
||||||
|
client: blobClient,
|
||||||
|
container: container}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement the storagedriver.StorageDriver interface.
|
||||||
|
|
||||||
|
// GetContent retrieves the content stored at "path" as a []byte.
|
||||||
|
func (d *Driver) GetContent(path string) ([]byte, error) {
|
||||||
|
blob, err := d.client.GetBlob(d.container, path)
|
||||||
|
if err != nil {
|
||||||
|
if is404(err) {
|
||||||
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ioutil.ReadAll(blob)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutContent stores the []byte content at a location designated by "path".
|
||||||
|
func (d *Driver) PutContent(path string, contents []byte) error {
|
||||||
|
return d.client.PutBlockBlob(d.container, path, ioutil.NopCloser(bytes.NewReader(contents)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a
|
||||||
|
// given byte offset.
|
||||||
|
func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) {
|
||||||
|
if ok, err := d.client.BlobExists(d.container, path); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if !ok {
|
||||||
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
size, err := d.CurrentSize(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset >= int64(size) {
|
||||||
|
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
|
||||||
|
}
|
||||||
|
|
||||||
|
bytesRange := fmt.Sprintf("%v-", offset)
|
||||||
|
resp, err := d.client.GetBlobRange(d.container, path, bytesRange)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteStream stores the contents of the provided io.ReadCloser at a location
|
||||||
|
// designated by the given path.
|
||||||
|
func (d *Driver) WriteStream(path string, offset, size int64, reader io.ReadCloser) error {
|
||||||
|
var (
|
||||||
|
lastBlockNum int
|
||||||
|
resumableOffset int64
|
||||||
|
blocks []azure.Block
|
||||||
|
)
|
||||||
|
|
||||||
|
if blobExists, err := d.client.BlobExists(d.container, path); err != nil {
|
||||||
|
return err
|
||||||
|
} else if !blobExists { // new blob
|
||||||
|
lastBlockNum = 0
|
||||||
|
resumableOffset = 0
|
||||||
|
} else { // append
|
||||||
|
if parts, err := d.client.GetBlockList(d.container, path, azure.BlockListTypeCommitted); err != nil {
|
||||||
|
return err
|
||||||
|
} else if len(parts.CommittedBlocks) == 0 {
|
||||||
|
lastBlockNum = 0
|
||||||
|
resumableOffset = 0
|
||||||
|
} else {
|
||||||
|
lastBlock := parts.CommittedBlocks[len(parts.CommittedBlocks)-1]
|
||||||
|
if lastBlockNum, err = blockNum(lastBlock.Name); err != nil {
|
||||||
|
return fmt.Errorf("Cannot parse block name as number '%s': %s", lastBlock.Name, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
var totalSize int64
|
||||||
|
for _, v := range parts.CommittedBlocks {
|
||||||
|
blocks = append(blocks, azure.Block{
|
||||||
|
Id: v.Name,
|
||||||
|
Status: azure.BlockStatusCommitted})
|
||||||
|
totalSize += int64(v.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: Azure driver currently supports only append mode (resumable
|
||||||
|
// index is exactly where the committed blocks of the blob end).
|
||||||
|
// In order to support writing to offsets other than last index,
|
||||||
|
// adjacent blocks overlapping with the [offset:offset+size] area
|
||||||
|
// must be fetched, splitted and should be overwritten accordingly.
|
||||||
|
// As the current use of this method is append only, that implementation
|
||||||
|
// is omitted.
|
||||||
|
resumableOffset = totalSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset != resumableOffset {
|
||||||
|
return storagedriver.InvalidOffsetError{Path: path, Offset: offset}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put content
|
||||||
|
buf := make([]byte, azure.MaxBlobBlockSize)
|
||||||
|
for {
|
||||||
|
// Read chunks of exactly size N except the last chunk to
|
||||||
|
// maximize block size and minimize block count.
|
||||||
|
n, err := io.ReadFull(reader, buf)
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
data := buf[:n]
|
||||||
|
blockID := toBlockID(lastBlockNum + 1)
|
||||||
|
if err = d.client.PutBlock(d.container, path, blockID, data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
blocks = append(blocks, azure.Block{
|
||||||
|
Id: blockID,
|
||||||
|
Status: azure.BlockStatusLatest})
|
||||||
|
lastBlockNum++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit block list
|
||||||
|
return d.client.PutBlockList(d.container, path, blocks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentSize retrieves the curernt size in bytes of the object at the given
|
||||||
|
// path.
|
||||||
|
func (d *Driver) CurrentSize(path string) (uint64, error) {
|
||||||
|
props, err := d.client.GetBlobProperties(d.container, path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return props.ContentLength, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns a list of the objects that are direct descendants of the given
|
||||||
|
// path.
|
||||||
|
func (d *Driver) List(path string) ([]string, error) {
|
||||||
|
if path == "/" {
|
||||||
|
path = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
blobs, err := d.listBlobs(d.container, path)
|
||||||
|
if err != nil {
|
||||||
|
return blobs, err
|
||||||
|
}
|
||||||
|
|
||||||
|
list := directDescendants(blobs, path)
|
||||||
|
return list, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move moves an object stored at sourcePath to destPath, removing the original
|
||||||
|
// object.
|
||||||
|
func (d *Driver) Move(sourcePath string, destPath string) error {
|
||||||
|
sourceBlobURL := d.client.GetBlobUrl(d.container, sourcePath)
|
||||||
|
err := d.client.CopyBlob(d.container, destPath, sourceBlobURL)
|
||||||
|
if err != nil {
|
||||||
|
if is404(err) {
|
||||||
|
return storagedriver.PathNotFoundError{Path: sourcePath}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return d.client.DeleteBlob(d.container, sourcePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
||||||
|
func (d *Driver) Delete(path string) error {
|
||||||
|
ok, err := d.client.DeleteBlobIfExists(d.container, path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
|
return nil // was a blob and deleted, return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not a blob, see if path is a virtual container with blobs
|
||||||
|
blobs, err := d.listBlobs(d.container, path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, b := range blobs {
|
||||||
|
if err = d.client.DeleteBlob(d.container, b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(blobs) == 0 {
|
||||||
|
return storagedriver.PathNotFoundError{Path: path}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// directDescendants will find direct descendants (blobs or virtual containers)
|
||||||
|
// of from list of blob paths and will return their full paths. Elements in blobs
|
||||||
|
// list must be prefixed with a "/" and
|
||||||
|
//
|
||||||
|
// Example: direct descendants of "/" in {"/foo", "/bar/1", "/bar/2"} is
|
||||||
|
// {"/foo", "/bar"} and direct descendants of "bar" is {"/bar/1", "/bar/2"}
|
||||||
|
func directDescendants(blobs []string, prefix string) []string {
|
||||||
|
if !strings.HasPrefix(prefix, "/") { // add trailing '/'
|
||||||
|
prefix = "/" + prefix
|
||||||
|
}
|
||||||
|
if !strings.HasSuffix(prefix, "/") { // containerify the path
|
||||||
|
prefix += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make(map[string]bool)
|
||||||
|
for _, b := range blobs {
|
||||||
|
if strings.HasPrefix(b, prefix) {
|
||||||
|
rel := b[len(prefix):]
|
||||||
|
c := strings.Count(rel, "/")
|
||||||
|
if c == 0 {
|
||||||
|
out[b] = true
|
||||||
|
} else {
|
||||||
|
out[prefix+rel[:strings.Index(rel, "/")]] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var keys []string
|
||||||
|
for k := range out {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Driver) listBlobs(container, virtPath string) ([]string, error) {
|
||||||
|
if virtPath != "" && !strings.HasSuffix(virtPath, "/") { // containerify the path
|
||||||
|
virtPath += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
out := []string{}
|
||||||
|
marker := ""
|
||||||
|
for {
|
||||||
|
resp, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{
|
||||||
|
Marker: marker,
|
||||||
|
Prefix: virtPath,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, b := range resp.Blobs {
|
||||||
|
out = append(out, b.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.Blobs) == 0 || resp.NextMarker == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
marker = resp.NextMarker
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func is404(err error) bool {
|
||||||
|
e, ok := err.(azure.StorageServiceError)
|
||||||
|
return ok && e.StatusCode == 404
|
||||||
|
}
|
||||||
|
|
||||||
|
func blockNum(b64Name string) (int, error) {
|
||||||
|
s, err := base64.StdEncoding.DecodeString(b64Name)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return strconv.Atoi(string(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
func toBlockID(i int) string {
|
||||||
|
return base64.StdEncoding.EncodeToString([]byte(strconv.Itoa(i)))
|
||||||
|
}
|
67
storagedriver/azure/azure_test.go
Normal file
67
storagedriver/azure/azure_test.go
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package azure
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/testsuites"
|
||||||
|
. "gopkg.in/check.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
envAccountName = "AZURE_STORAGE_ACCOUNT_NAME"
|
||||||
|
envAccountKey = "AZURE_STORAGE_ACCOUNT_KEY"
|
||||||
|
envContainer = "AZURE_STORAGE_CONTAINER"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Hook up gocheck into the "go test" runner.
|
||||||
|
func Test(t *testing.T) { TestingT(t) }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
var (
|
||||||
|
accountName string
|
||||||
|
accountKey string
|
||||||
|
container string
|
||||||
|
)
|
||||||
|
|
||||||
|
config := []struct {
|
||||||
|
env string
|
||||||
|
value *string
|
||||||
|
}{
|
||||||
|
{envAccountName, &accountName},
|
||||||
|
{envAccountKey, &accountKey},
|
||||||
|
{envContainer, &container},
|
||||||
|
}
|
||||||
|
|
||||||
|
missing := []string{}
|
||||||
|
for _, v := range config {
|
||||||
|
*v.value = os.Getenv(v.env)
|
||||||
|
if *v.value == "" {
|
||||||
|
missing = append(missing, v.env)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
azureDriverConstructor := func() (storagedriver.StorageDriver, error) {
|
||||||
|
return New(accountName, accountKey, container)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip Azure storage driver tests if environment variable parameters are not provided
|
||||||
|
skipCheck := func() string {
|
||||||
|
if len(missing) > 0 {
|
||||||
|
return fmt.Sprintf("Must set %s environment variables to run Azure tests", strings.Join(missing, ", "))
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
testsuites.RegisterInProcessSuite(azureDriverConstructor, skipCheck)
|
||||||
|
testsuites.RegisterIPCSuite(driverName, map[string]string{
|
||||||
|
paramAccountName: accountName,
|
||||||
|
paramAccountKey: accountKey,
|
||||||
|
paramContainer: container,
|
||||||
|
}, skipCheck)
|
||||||
|
}
|
71
storagedriver/factory/factory.go
Normal file
71
storagedriver/factory/factory.go
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
package factory
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// driverFactories stores an internal mapping between storage driver names and their respective
|
||||||
|
// factories
|
||||||
|
var driverFactories = make(map[string]StorageDriverFactory)
|
||||||
|
|
||||||
|
// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces
|
||||||
|
// Storage drivers should call Register() with a factory to make the driver available by name
|
||||||
|
type StorageDriverFactory interface {
|
||||||
|
// Create returns a new storagedriver.StorageDriver with the given parameters
|
||||||
|
// Parameters will vary by driver and may be ignored
|
||||||
|
// Each parameter key must only consist of lowercase letters and numbers
|
||||||
|
Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register makes a storage driver available by the provided name.
|
||||||
|
// If Register is called twice with the same name or if driver factory is nil, it panics.
|
||||||
|
func Register(name string, factory StorageDriverFactory) {
|
||||||
|
if factory == nil {
|
||||||
|
panic("Must not provide nil StorageDriverFactory")
|
||||||
|
}
|
||||||
|
_, registered := driverFactories[name]
|
||||||
|
if registered {
|
||||||
|
panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name))
|
||||||
|
}
|
||||||
|
|
||||||
|
driverFactories[name] = factory
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new storagedriver.StorageDriver with the given name and parameters
|
||||||
|
// To run in-process, the StorageDriverFactory must first be registered with the given name
|
||||||
|
// If no in-process drivers are found with the given name, this attempts to create an IPC driver
|
||||||
|
// If no in-process or external drivers are found, an InvalidStorageDriverError is returned
|
||||||
|
func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
|
||||||
|
driverFactory, ok := driverFactories[name]
|
||||||
|
if !ok {
|
||||||
|
return nil, InvalidStorageDriverError{name}
|
||||||
|
|
||||||
|
// NOTE(stevvooe): We are disabling storagedriver ipc for now, as the
|
||||||
|
// server and client need to be updated for the changed API calls and
|
||||||
|
// there were some problems libchan hanging. We'll phase this
|
||||||
|
// functionality back in over the next few weeks.
|
||||||
|
|
||||||
|
// No registered StorageDriverFactory found, try ipc
|
||||||
|
// driverClient, err := ipc.NewDriverClient(name, parameters)
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, InvalidStorageDriverError{name}
|
||||||
|
// }
|
||||||
|
// err = driverClient.Start()
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
// }
|
||||||
|
// return driverClient, nil
|
||||||
|
}
|
||||||
|
return driverFactory.Create(parameters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidStorageDriverError records an attempt to construct an unregistered storage driver
|
||||||
|
type InvalidStorageDriverError struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err InvalidStorageDriverError) Error() string {
|
||||||
|
return fmt.Sprintf("StorageDriver not registered: %s", err.Name)
|
||||||
|
}
|
79
storagedriver/fileinfo.go
Normal file
79
storagedriver/fileinfo.go
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
package storagedriver
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// FileInfo returns information about a given path. Inspired by os.FileInfo,
|
||||||
|
// it elides the base name method for a full path instead.
|
||||||
|
type FileInfo interface {
|
||||||
|
// Path provides the full path of the target of this file info.
|
||||||
|
Path() string
|
||||||
|
|
||||||
|
// Size returns current length in bytes of the file. The return value can
|
||||||
|
// be used to write to the end of the file at path. The value is
|
||||||
|
// meaningless if IsDir returns true.
|
||||||
|
Size() int64
|
||||||
|
|
||||||
|
// ModTime returns the modification time for the file. For backends that
|
||||||
|
// don't have a modification time, the creation time should be returned.
|
||||||
|
ModTime() time.Time
|
||||||
|
|
||||||
|
// IsDir returns true if the path is a directory.
|
||||||
|
IsDir() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE(stevvooe): The next two types, FileInfoFields and FileInfoInternal
|
||||||
|
// should only be used by storagedriver implementations. They should moved to
|
||||||
|
// a "driver" package, similar to database/sql.
|
||||||
|
|
||||||
|
// FileInfoFields provides the exported fields for implementing FileInfo
|
||||||
|
// interface in storagedriver implementations. It should be used with
|
||||||
|
// InternalFileInfo.
|
||||||
|
type FileInfoFields struct {
|
||||||
|
// Path provides the full path of the target of this file info.
|
||||||
|
Path string
|
||||||
|
|
||||||
|
// Size is current length in bytes of the file. The value of this field
|
||||||
|
// can be used to write to the end of the file at path. The value is
|
||||||
|
// meaningless if IsDir is set to true.
|
||||||
|
Size int64
|
||||||
|
|
||||||
|
// ModTime returns the modification time for the file. For backends that
|
||||||
|
// don't have a modification time, the creation time should be returned.
|
||||||
|
ModTime time.Time
|
||||||
|
|
||||||
|
// IsDir returns true if the path is a directory.
|
||||||
|
IsDir bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfoInternal implements the FileInfo interface. This should only be
|
||||||
|
// used by storagedriver implementations that don't have a specialized
|
||||||
|
// FileInfo type.
|
||||||
|
type FileInfoInternal struct {
|
||||||
|
FileInfoFields
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ FileInfo = FileInfoInternal{}
|
||||||
|
var _ FileInfo = &FileInfoInternal{}
|
||||||
|
|
||||||
|
// Path provides the full path of the target of this file info.
|
||||||
|
func (fi FileInfoInternal) Path() string {
|
||||||
|
return fi.FileInfoFields.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns current length in bytes of the file. The return value can
|
||||||
|
// be used to write to the end of the file at path. The value is
|
||||||
|
// meaningless if IsDir returns true.
|
||||||
|
func (fi FileInfoInternal) Size() int64 {
|
||||||
|
return fi.FileInfoFields.Size
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the modification time for the file. For backends that
|
||||||
|
// don't have a modification time, the creation time should be returned.
|
||||||
|
func (fi FileInfoInternal) ModTime() time.Time {
|
||||||
|
return fi.FileInfoFields.ModTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDir returns true if the path is a directory.
|
||||||
|
func (fi FileInfoInternal) IsDir() bool {
|
||||||
|
return fi.FileInfoFields.IsDir
|
||||||
|
}
|
305
storagedriver/filesystem/driver.go
Normal file
305
storagedriver/filesystem/driver.go
Normal file
|
@ -0,0 +1,305 @@
|
||||||
|
package filesystem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/factory"
|
||||||
|
)
|
||||||
|
|
||||||
|
const driverName = "filesystem"
|
||||||
|
const defaultRootDirectory = "/tmp/registry/storage"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
factory.Register(driverName, &filesystemDriverFactory{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// filesystemDriverFactory implements the factory.StorageDriverFactory interface
|
||||||
|
type filesystemDriverFactory struct{}
|
||||||
|
|
||||||
|
func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
|
||||||
|
return FromParameters(parameters), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Driver is a storagedriver.StorageDriver implementation backed by a local
|
||||||
|
// filesystem. All provided paths will be subpaths of the RootDirectory
|
||||||
|
type Driver struct {
|
||||||
|
rootDirectory string
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromParameters constructs a new Driver with a given parameters map
|
||||||
|
// Optional Parameters:
|
||||||
|
// - rootdirectory
|
||||||
|
func FromParameters(parameters map[string]interface{}) *Driver {
|
||||||
|
var rootDirectory = defaultRootDirectory
|
||||||
|
if parameters != nil {
|
||||||
|
rootDir, ok := parameters["rootdirectory"]
|
||||||
|
if ok {
|
||||||
|
rootDirectory = fmt.Sprint(rootDir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return New(rootDirectory)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New constructs a new Driver with a given rootDirectory
|
||||||
|
func New(rootDirectory string) *Driver {
|
||||||
|
return &Driver{rootDirectory}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement the storagedriver.StorageDriver interface
|
||||||
|
|
||||||
|
// GetContent retrieves the content stored at "path" as a []byte.
|
||||||
|
func (d *Driver) GetContent(path string) ([]byte, error) {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(path) {
|
||||||
|
return nil, storagedriver.InvalidPathError{Path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
rc, err := d.ReadStream(path, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rc.Close()
|
||||||
|
|
||||||
|
p, err := ioutil.ReadAll(rc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutContent stores the []byte content at a location designated by "path".
|
||||||
|
func (d *Driver) PutContent(subPath string, contents []byte) error {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(subPath) {
|
||||||
|
return storagedriver.InvalidPathError{Path: subPath}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := d.WriteStream(subPath, 0, bytes.NewReader(contents)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return os.Truncate(d.fullPath(subPath), int64(len(contents)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a
|
||||||
|
// given byte offset.
|
||||||
|
func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(path) {
|
||||||
|
return nil, storagedriver.InvalidPathError{Path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset < 0 {
|
||||||
|
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
seekPos, err := file.Seek(int64(offset), os.SEEK_SET)
|
||||||
|
if err != nil {
|
||||||
|
file.Close()
|
||||||
|
return nil, err
|
||||||
|
} else if seekPos < int64(offset) {
|
||||||
|
file.Close()
|
||||||
|
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
|
||||||
|
}
|
||||||
|
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteStream stores the contents of the provided io.Reader at a location
|
||||||
|
// designated by the given path.
|
||||||
|
func (d *Driver) WriteStream(subPath string, offset int64, reader io.Reader) (nn int64, err error) {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(subPath) {
|
||||||
|
return 0, storagedriver.InvalidPathError{Path: subPath}
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset < 0 {
|
||||||
|
return 0, storagedriver.InvalidOffsetError{Path: subPath, Offset: offset}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(stevvooe): This needs to be a requirement.
|
||||||
|
// if !path.IsAbs(subPath) {
|
||||||
|
// return fmt.Errorf("absolute path required: %q", subPath)
|
||||||
|
// }
|
||||||
|
|
||||||
|
fullPath := d.fullPath(subPath)
|
||||||
|
parentDir := path.Dir(fullPath)
|
||||||
|
if err := os.MkdirAll(parentDir, 0755); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0644)
|
||||||
|
if err != nil {
|
||||||
|
// TODO(stevvooe): A few missing conditions in storage driver:
|
||||||
|
// 1. What if the path is already a directory?
|
||||||
|
// 2. Should number 1 be exposed explicitly in storagedriver?
|
||||||
|
// 2. Can this path not exist, even if we create above?
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer fp.Close()
|
||||||
|
|
||||||
|
nn, err = fp.Seek(offset, os.SEEK_SET)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if nn != offset {
|
||||||
|
return 0, fmt.Errorf("bad seek to %v, expected %v in fp=%v", offset, nn, fp)
|
||||||
|
}
|
||||||
|
|
||||||
|
return io.Copy(fp, reader)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat retrieves the FileInfo for the given path, including the current size
|
||||||
|
// in bytes and the creation time.
|
||||||
|
func (d *Driver) Stat(subPath string) (storagedriver.FileInfo, error) {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(subPath) {
|
||||||
|
return nil, storagedriver.InvalidPathError{Path: subPath}
|
||||||
|
}
|
||||||
|
|
||||||
|
fullPath := d.fullPath(subPath)
|
||||||
|
|
||||||
|
fi, err := os.Stat(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, storagedriver.PathNotFoundError{Path: subPath}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fileInfo{
|
||||||
|
path: subPath,
|
||||||
|
FileInfo: fi,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns a list of the objects that are direct descendants of the given
|
||||||
|
// path.
|
||||||
|
func (d *Driver) List(subPath string) ([]string, error) {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(subPath) && subPath != "/" {
|
||||||
|
return nil, storagedriver.InvalidPathError{Path: subPath}
|
||||||
|
}
|
||||||
|
|
||||||
|
if subPath[len(subPath)-1] != '/' {
|
||||||
|
subPath += "/"
|
||||||
|
}
|
||||||
|
fullPath := d.fullPath(subPath)
|
||||||
|
|
||||||
|
dir, err := os.Open(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, storagedriver.PathNotFoundError{Path: subPath}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer dir.Close()
|
||||||
|
|
||||||
|
fileNames, err := dir.Readdirnames(0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
keys := make([]string, 0, len(fileNames))
|
||||||
|
for _, fileName := range fileNames {
|
||||||
|
keys = append(keys, path.Join(subPath, fileName))
|
||||||
|
}
|
||||||
|
|
||||||
|
return keys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move moves an object stored at sourcePath to destPath, removing the original
|
||||||
|
// object.
|
||||||
|
func (d *Driver) Move(sourcePath string, destPath string) error {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(sourcePath) {
|
||||||
|
return storagedriver.InvalidPathError{Path: sourcePath}
|
||||||
|
} else if !storagedriver.PathRegexp.MatchString(destPath) {
|
||||||
|
return storagedriver.InvalidPathError{Path: destPath}
|
||||||
|
}
|
||||||
|
|
||||||
|
source := d.fullPath(sourcePath)
|
||||||
|
dest := d.fullPath(destPath)
|
||||||
|
|
||||||
|
if _, err := os.Stat(source); os.IsNotExist(err) {
|
||||||
|
return storagedriver.PathNotFoundError{Path: sourcePath}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.MkdirAll(path.Dir(dest), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err := os.Rename(source, dest)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
||||||
|
func (d *Driver) Delete(subPath string) error {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(subPath) {
|
||||||
|
return storagedriver.InvalidPathError{Path: subPath}
|
||||||
|
}
|
||||||
|
|
||||||
|
fullPath := d.fullPath(subPath)
|
||||||
|
|
||||||
|
_, err := os.Stat(fullPath)
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
} else if err != nil {
|
||||||
|
return storagedriver.PathNotFoundError{Path: subPath}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.RemoveAll(fullPath)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// fullPath returns the absolute path of a key within the Driver's storage.
|
||||||
|
func (d *Driver) fullPath(subPath string) string {
|
||||||
|
return path.Join(d.rootDirectory, subPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileInfo struct {
|
||||||
|
os.FileInfo
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ storagedriver.FileInfo = fileInfo{}
|
||||||
|
|
||||||
|
// Path provides the full path of the target of this file info.
|
||||||
|
func (fi fileInfo) Path() string {
|
||||||
|
return fi.path
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns current length in bytes of the file. The return value can
|
||||||
|
// be used to write to the end of the file at path. The value is
|
||||||
|
// meaningless if IsDir returns true.
|
||||||
|
func (fi fileInfo) Size() int64 {
|
||||||
|
if fi.IsDir() {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return fi.FileInfo.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the modification time for the file. For backends that
|
||||||
|
// don't have a modification time, the creation time should be returned.
|
||||||
|
func (fi fileInfo) ModTime() time.Time {
|
||||||
|
return fi.FileInfo.ModTime()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDir returns true if the path is a directory.
|
||||||
|
func (fi fileInfo) IsDir() bool {
|
||||||
|
return fi.FileInfo.IsDir()
|
||||||
|
}
|
29
storagedriver/filesystem/driver_test.go
Normal file
29
storagedriver/filesystem/driver_test.go
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
package filesystem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/testsuites"
|
||||||
|
. "gopkg.in/check.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Hook up gocheck into the "go test" runner.
|
||||||
|
func Test(t *testing.T) { TestingT(t) }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
root, err := ioutil.TempDir("", "driver-")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer os.Remove(root)
|
||||||
|
|
||||||
|
testsuites.RegisterInProcessSuite(func() (storagedriver.StorageDriver, error) {
|
||||||
|
return New(root), nil
|
||||||
|
}, testsuites.NeverSkip)
|
||||||
|
|
||||||
|
// BUG(stevvooe): IPC is broken so we're disabling for now. Will revisit later.
|
||||||
|
// testsuites.RegisterIPCSuite(driverName, map[string]string{"rootdirectory": root}, testsuites.NeverSkip)
|
||||||
|
}
|
253
storagedriver/inmemory/driver.go
Normal file
253
storagedriver/inmemory/driver.go
Normal file
|
@ -0,0 +1,253 @@
|
||||||
|
package inmemory
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/factory"
|
||||||
|
)
|
||||||
|
|
||||||
|
const driverName = "inmemory"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
factory.Register(driverName, &inMemoryDriverFactory{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface.
|
||||||
|
type inMemoryDriverFactory struct{}
|
||||||
|
|
||||||
|
func (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
|
||||||
|
return New(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Driver is a storagedriver.StorageDriver implementation backed by a local map.
|
||||||
|
// Intended solely for example and testing purposes.
|
||||||
|
type Driver struct {
|
||||||
|
root *dir
|
||||||
|
mutex sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// New constructs a new Driver.
|
||||||
|
func New() *Driver {
|
||||||
|
return &Driver{root: &dir{
|
||||||
|
common: common{
|
||||||
|
p: "/",
|
||||||
|
mod: time.Now(),
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement the storagedriver.StorageDriver interface.
|
||||||
|
|
||||||
|
// GetContent retrieves the content stored at "path" as a []byte.
|
||||||
|
func (d *Driver) GetContent(path string) ([]byte, error) {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(path) {
|
||||||
|
return nil, storagedriver.InvalidPathError{Path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.mutex.RLock()
|
||||||
|
defer d.mutex.RUnlock()
|
||||||
|
|
||||||
|
rc, err := d.ReadStream(path, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rc.Close()
|
||||||
|
|
||||||
|
return ioutil.ReadAll(rc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutContent stores the []byte content at a location designated by "path".
|
||||||
|
func (d *Driver) PutContent(p string, contents []byte) error {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(p) {
|
||||||
|
return storagedriver.InvalidPathError{Path: p}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.mutex.Lock()
|
||||||
|
defer d.mutex.Unlock()
|
||||||
|
|
||||||
|
f, err := d.root.mkfile(p)
|
||||||
|
if err != nil {
|
||||||
|
// TODO(stevvooe): Again, we need to clarify when this is not a
|
||||||
|
// directory in StorageDriver API.
|
||||||
|
return fmt.Errorf("not a file")
|
||||||
|
}
|
||||||
|
|
||||||
|
f.truncate()
|
||||||
|
f.WriteAt(contents, 0)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a
|
||||||
|
// given byte offset.
|
||||||
|
func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(path) {
|
||||||
|
return nil, storagedriver.InvalidPathError{Path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.mutex.RLock()
|
||||||
|
defer d.mutex.RUnlock()
|
||||||
|
|
||||||
|
if offset < 0 {
|
||||||
|
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
|
||||||
|
}
|
||||||
|
|
||||||
|
path = normalize(path)
|
||||||
|
found := d.root.find(path)
|
||||||
|
|
||||||
|
if found.path() != path {
|
||||||
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
if found.isdir() {
|
||||||
|
return nil, fmt.Errorf("%q is a directory", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteStream stores the contents of the provided io.ReadCloser at a location
|
||||||
|
// designated by the given path.
|
||||||
|
func (d *Driver) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(path) {
|
||||||
|
return 0, storagedriver.InvalidPathError{Path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.mutex.Lock()
|
||||||
|
defer d.mutex.Unlock()
|
||||||
|
|
||||||
|
if offset < 0 {
|
||||||
|
return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
|
||||||
|
}
|
||||||
|
|
||||||
|
normalized := normalize(path)
|
||||||
|
|
||||||
|
f, err := d.root.mkfile(normalized)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("not a file")
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
nn, err = buf.ReadFrom(reader)
|
||||||
|
if err != nil {
|
||||||
|
// TODO(stevvooe): This condition is odd and we may need to clarify:
|
||||||
|
// we've read nn bytes from reader but have written nothing to the
|
||||||
|
// backend. What is the correct return value? Really, the caller needs
|
||||||
|
// to know that the reader has been advanced and reattempting the
|
||||||
|
// operation is incorrect.
|
||||||
|
return nn, err
|
||||||
|
}
|
||||||
|
|
||||||
|
f.WriteAt(buf.Bytes(), offset)
|
||||||
|
return nn, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns info about the provided path.
|
||||||
|
func (d *Driver) Stat(path string) (storagedriver.FileInfo, error) {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(path) {
|
||||||
|
return nil, storagedriver.InvalidPathError{Path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.mutex.RLock()
|
||||||
|
defer d.mutex.RUnlock()
|
||||||
|
|
||||||
|
normalized := normalize(path)
|
||||||
|
found := d.root.find(path)
|
||||||
|
|
||||||
|
if found.path() != normalized {
|
||||||
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
fi := storagedriver.FileInfoFields{
|
||||||
|
Path: path,
|
||||||
|
IsDir: found.isdir(),
|
||||||
|
ModTime: found.modtime(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if !fi.IsDir {
|
||||||
|
fi.Size = int64(len(found.(*file).data))
|
||||||
|
}
|
||||||
|
|
||||||
|
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns a list of the objects that are direct descendants of the given
|
||||||
|
// path.
|
||||||
|
func (d *Driver) List(path string) ([]string, error) {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(path) && path != "/" {
|
||||||
|
return nil, storagedriver.InvalidPathError{Path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
normalized := normalize(path)
|
||||||
|
|
||||||
|
found := d.root.find(normalized)
|
||||||
|
|
||||||
|
if !found.isdir() {
|
||||||
|
return nil, fmt.Errorf("not a directory") // TODO(stevvooe): Need error type for this...
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err := found.(*dir).list(normalized)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
switch err {
|
||||||
|
case errNotExists:
|
||||||
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
||||||
|
case errIsNotDir:
|
||||||
|
return nil, fmt.Errorf("not a directory")
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move moves an object stored at sourcePath to destPath, removing the original
|
||||||
|
// object.
|
||||||
|
func (d *Driver) Move(sourcePath string, destPath string) error {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(sourcePath) {
|
||||||
|
return storagedriver.InvalidPathError{Path: sourcePath}
|
||||||
|
} else if !storagedriver.PathRegexp.MatchString(destPath) {
|
||||||
|
return storagedriver.InvalidPathError{Path: destPath}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.mutex.Lock()
|
||||||
|
defer d.mutex.Unlock()
|
||||||
|
|
||||||
|
normalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath)
|
||||||
|
|
||||||
|
err := d.root.move(normalizedSrc, normalizedDst)
|
||||||
|
switch err {
|
||||||
|
case errNotExists:
|
||||||
|
return storagedriver.PathNotFoundError{Path: destPath}
|
||||||
|
default:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
||||||
|
func (d *Driver) Delete(path string) error {
|
||||||
|
if !storagedriver.PathRegexp.MatchString(path) {
|
||||||
|
return storagedriver.InvalidPathError{Path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.mutex.Lock()
|
||||||
|
defer d.mutex.Unlock()
|
||||||
|
|
||||||
|
normalized := normalize(path)
|
||||||
|
|
||||||
|
err := d.root.delete(normalized)
|
||||||
|
switch err {
|
||||||
|
case errNotExists:
|
||||||
|
return storagedriver.PathNotFoundError{Path: path}
|
||||||
|
default:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
24
storagedriver/inmemory/driver_test.go
Normal file
24
storagedriver/inmemory/driver_test.go
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
package inmemory
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/testsuites"
|
||||||
|
|
||||||
|
"gopkg.in/check.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Hook up gocheck into the "go test" runner.
|
||||||
|
func Test(t *testing.T) { check.TestingT(t) }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) {
|
||||||
|
return New(), nil
|
||||||
|
}
|
||||||
|
testsuites.RegisterInProcessSuite(inmemoryDriverConstructor, testsuites.NeverSkip)
|
||||||
|
|
||||||
|
// BUG(stevvooe): Disable flaky IPC tests for now when we can troubleshoot
|
||||||
|
// the problems with libchan.
|
||||||
|
// testsuites.RegisterIPCSuite(driverName, nil, testsuites.NeverSkip)
|
||||||
|
}
|
332
storagedriver/inmemory/mfs.go
Normal file
332
storagedriver/inmemory/mfs.go
Normal file
|
@ -0,0 +1,332 @@
|
||||||
|
package inmemory
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errExists = fmt.Errorf("exists")
|
||||||
|
errNotExists = fmt.Errorf("notexists")
|
||||||
|
errIsNotDir = fmt.Errorf("notdir")
|
||||||
|
errIsDir = fmt.Errorf("isdir")
|
||||||
|
)
|
||||||
|
|
||||||
|
type node interface {
|
||||||
|
name() string
|
||||||
|
path() string
|
||||||
|
isdir() bool
|
||||||
|
modtime() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// dir is the central type for the memory-based storagedriver. All operations
|
||||||
|
// are dispatched from a root dir.
|
||||||
|
type dir struct {
|
||||||
|
common
|
||||||
|
|
||||||
|
// TODO(stevvooe): Use sorted slice + search.
|
||||||
|
children map[string]node
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ node = &dir{}
|
||||||
|
|
||||||
|
func (d *dir) isdir() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// add places the node n into dir d.
|
||||||
|
func (d *dir) add(n node) {
|
||||||
|
if d.children == nil {
|
||||||
|
d.children = make(map[string]node)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.children[n.name()] = n
|
||||||
|
d.mod = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
// find searches for the node, given path q in dir. If the node is found, it
|
||||||
|
// will be returned. If the node is not found, the closet existing parent. If
|
||||||
|
// the node is found, the returned (node).path() will match q.
|
||||||
|
func (d *dir) find(q string) node {
|
||||||
|
q = strings.Trim(q, "/")
|
||||||
|
i := strings.Index(q, "/")
|
||||||
|
|
||||||
|
if q == "" {
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
if i == 0 {
|
||||||
|
panic("shouldn't happen, no root paths")
|
||||||
|
}
|
||||||
|
|
||||||
|
var component string
|
||||||
|
if i < 0 {
|
||||||
|
// No more path components
|
||||||
|
component = q
|
||||||
|
} else {
|
||||||
|
component = q[:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
child, ok := d.children[component]
|
||||||
|
if !ok {
|
||||||
|
// Node was not found. Return p and the current node.
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
if child.isdir() {
|
||||||
|
// traverse down!
|
||||||
|
q = q[i+1:]
|
||||||
|
return child.(*dir).find(q)
|
||||||
|
}
|
||||||
|
|
||||||
|
return child
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dir) list(p string) ([]string, error) {
|
||||||
|
n := d.find(p)
|
||||||
|
|
||||||
|
if n.path() != p {
|
||||||
|
return nil, errNotExists
|
||||||
|
}
|
||||||
|
|
||||||
|
if !n.isdir() {
|
||||||
|
return nil, errIsNotDir
|
||||||
|
}
|
||||||
|
|
||||||
|
var children []string
|
||||||
|
for _, child := range n.(*dir).children {
|
||||||
|
children = append(children, child.path())
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(children)
|
||||||
|
return children, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mkfile or return the existing one. returns an error if it exists and is a
|
||||||
|
// directory. Essentially, this is open or create.
|
||||||
|
func (d *dir) mkfile(p string) (*file, error) {
|
||||||
|
n := d.find(p)
|
||||||
|
if n.path() == p {
|
||||||
|
if n.isdir() {
|
||||||
|
return nil, errIsDir
|
||||||
|
}
|
||||||
|
|
||||||
|
return n.(*file), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dirpath, filename := path.Split(p)
|
||||||
|
// Make any non-existent directories
|
||||||
|
n, err := d.mkdirs(dirpath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dd := n.(*dir)
|
||||||
|
n = &file{
|
||||||
|
common: common{
|
||||||
|
p: path.Join(dd.path(), filename),
|
||||||
|
mod: time.Now(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
dd.add(n)
|
||||||
|
return n.(*file), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mkdirs creates any missing directory entries in p and returns the result.
|
||||||
|
func (d *dir) mkdirs(p string) (*dir, error) {
|
||||||
|
p = normalize(p)
|
||||||
|
|
||||||
|
n := d.find(p)
|
||||||
|
|
||||||
|
if !n.isdir() {
|
||||||
|
// Found something there
|
||||||
|
return nil, errIsNotDir
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.path() == p {
|
||||||
|
return n.(*dir), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dd := n.(*dir)
|
||||||
|
|
||||||
|
relative := strings.Trim(strings.TrimPrefix(p, n.path()), "/")
|
||||||
|
|
||||||
|
if relative == "" {
|
||||||
|
return dd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
components := strings.Split(relative, "/")
|
||||||
|
for _, component := range components {
|
||||||
|
d, err := dd.mkdir(component)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// This should actually never happen, since there are no children.
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dd = d
|
||||||
|
}
|
||||||
|
|
||||||
|
return dd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mkdir creates a child directory under d with the given name.
|
||||||
|
func (d *dir) mkdir(name string) (*dir, error) {
|
||||||
|
if name == "" {
|
||||||
|
return nil, fmt.Errorf("invalid dirname")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok := d.children[name]
|
||||||
|
if ok {
|
||||||
|
return nil, errExists
|
||||||
|
}
|
||||||
|
|
||||||
|
child := &dir{
|
||||||
|
common: common{
|
||||||
|
p: path.Join(d.path(), name),
|
||||||
|
mod: time.Now(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
d.add(child)
|
||||||
|
d.mod = time.Now()
|
||||||
|
|
||||||
|
return child, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dir) move(src, dst string) error {
|
||||||
|
dstDirname, _ := path.Split(dst)
|
||||||
|
|
||||||
|
dp, err := d.mkdirs(dstDirname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
srcDirname, srcFilename := path.Split(src)
|
||||||
|
sp := d.find(srcDirname)
|
||||||
|
|
||||||
|
if normalize(srcDirname) != normalize(sp.path()) {
|
||||||
|
return errNotExists
|
||||||
|
}
|
||||||
|
|
||||||
|
s, ok := sp.(*dir).children[srcFilename]
|
||||||
|
if !ok {
|
||||||
|
return errNotExists
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(sp.(*dir).children, srcFilename)
|
||||||
|
|
||||||
|
switch n := s.(type) {
|
||||||
|
case *dir:
|
||||||
|
n.p = dst
|
||||||
|
case *file:
|
||||||
|
n.p = dst
|
||||||
|
}
|
||||||
|
|
||||||
|
dp.add(s)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dir) delete(p string) error {
|
||||||
|
dirname, filename := path.Split(p)
|
||||||
|
parent := d.find(dirname)
|
||||||
|
|
||||||
|
if normalize(dirname) != normalize(parent.path()) {
|
||||||
|
return errNotExists
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := parent.(*dir).children[filename]; !ok {
|
||||||
|
return errNotExists
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(parent.(*dir).children, filename)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dump outputs a primitive directory structure to stdout.
|
||||||
|
func (d *dir) dump(indent string) {
|
||||||
|
fmt.Println(indent, d.name()+"/")
|
||||||
|
|
||||||
|
for _, child := range d.children {
|
||||||
|
if child.isdir() {
|
||||||
|
child.(*dir).dump(indent + "\t")
|
||||||
|
} else {
|
||||||
|
fmt.Println(indent, child.name())
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dir) String() string {
|
||||||
|
return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children)
|
||||||
|
}
|
||||||
|
|
||||||
|
// file stores actual data in the fs tree. It acts like an open, seekable file
|
||||||
|
// where operations are conducted through ReadAt and WriteAt. Use it with
|
||||||
|
// SectionReader for the best effect.
|
||||||
|
type file struct {
|
||||||
|
common
|
||||||
|
data []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ node = &file{}
|
||||||
|
|
||||||
|
func (f *file) isdir() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *file) truncate() {
|
||||||
|
f.data = f.data[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *file) sectionReader(offset int64) io.Reader {
|
||||||
|
return io.NewSectionReader(f, offset, int64(len(f.data))-offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *file) ReadAt(p []byte, offset int64) (n int, err error) {
|
||||||
|
return copy(p, f.data[offset:]), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *file) WriteAt(p []byte, offset int64) (n int, err error) {
|
||||||
|
off := int(offset)
|
||||||
|
if cap(f.data) < off+len(p) {
|
||||||
|
data := make([]byte, len(f.data), off+len(p))
|
||||||
|
copy(data, f.data)
|
||||||
|
f.data = data
|
||||||
|
}
|
||||||
|
|
||||||
|
f.data = f.data[:off+len(p)]
|
||||||
|
|
||||||
|
return copy(f.data[off:off+len(p)], p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *file) String() string {
|
||||||
|
return fmt.Sprintf("&file{path: %q}", f.p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// common provides shared fields and methods for node implementations.
|
||||||
|
type common struct {
|
||||||
|
p string
|
||||||
|
mod time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *common) name() string {
|
||||||
|
_, name := path.Split(c.p)
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *common) path() string {
|
||||||
|
return c.p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *common) modtime() time.Time {
|
||||||
|
return c.mod
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalize(p string) string {
|
||||||
|
return "/" + strings.Trim(p, "/")
|
||||||
|
}
|
455
storagedriver/ipc/client.go
Normal file
455
storagedriver/ipc/client.go
Normal file
|
@ -0,0 +1,455 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package ipc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
"github.com/docker/libchan"
|
||||||
|
"github.com/docker/libchan/spdy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StorageDriverExecutablePrefix is the prefix which the IPC storage driver
|
||||||
|
// loader expects driver executables to begin with. For example, the s3 driver
|
||||||
|
// should be named "registry-storagedriver-s3".
|
||||||
|
const StorageDriverExecutablePrefix = "registry-storagedriver-"
|
||||||
|
|
||||||
|
// StorageDriverClient is a storagedriver.StorageDriver implementation using a
|
||||||
|
// managed child process communicating over IPC using libchan with a unix domain
|
||||||
|
// socket
|
||||||
|
type StorageDriverClient struct {
|
||||||
|
subprocess *exec.Cmd
|
||||||
|
exitChan chan error
|
||||||
|
exitErr error
|
||||||
|
stopChan chan struct{}
|
||||||
|
socket *os.File
|
||||||
|
transport *spdy.Transport
|
||||||
|
sender libchan.Sender
|
||||||
|
version storagedriver.Version
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDriverClient constructs a new out-of-process storage driver using the
|
||||||
|
// driver name and configuration parameters
|
||||||
|
// A user must call Start on this driver client before remote method calls can
|
||||||
|
// be made
|
||||||
|
//
|
||||||
|
// Looks for drivers in the following locations in order:
|
||||||
|
// - Storage drivers directory (to be determined, yet not implemented)
|
||||||
|
// - $GOPATH/bin
|
||||||
|
// - $PATH
|
||||||
|
func NewDriverClient(name string, parameters map[string]string) (*StorageDriverClient, error) {
|
||||||
|
paramsBytes, err := json.Marshal(parameters)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
driverExecName := StorageDriverExecutablePrefix + name
|
||||||
|
driverPath, err := exec.LookPath(driverExecName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
command := exec.Command(driverPath, string(paramsBytes))
|
||||||
|
|
||||||
|
return &StorageDriverClient{
|
||||||
|
subprocess: command,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the designated child process storage driver and binds a socket
|
||||||
|
// to this process for IPC method calls
|
||||||
|
func (driver *StorageDriverClient) Start() error {
|
||||||
|
driver.exitErr = nil
|
||||||
|
driver.exitChan = make(chan error)
|
||||||
|
driver.stopChan = make(chan struct{})
|
||||||
|
|
||||||
|
fileDescriptors, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
childSocket := os.NewFile(uintptr(fileDescriptors[0]), "childSocket")
|
||||||
|
driver.socket = os.NewFile(uintptr(fileDescriptors[1]), "parentSocket")
|
||||||
|
|
||||||
|
driver.subprocess.Stdout = os.Stdout
|
||||||
|
driver.subprocess.Stderr = os.Stderr
|
||||||
|
driver.subprocess.ExtraFiles = []*os.File{childSocket}
|
||||||
|
|
||||||
|
if err = driver.subprocess.Start(); err != nil {
|
||||||
|
driver.Stop()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
go driver.handleSubprocessExit()
|
||||||
|
|
||||||
|
if err = childSocket.Close(); err != nil {
|
||||||
|
driver.Stop()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
connection, err := net.FileConn(driver.socket)
|
||||||
|
if err != nil {
|
||||||
|
driver.Stop()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
driver.transport, err = spdy.NewClientTransport(connection)
|
||||||
|
if err != nil {
|
||||||
|
driver.Stop()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
driver.sender, err = driver.transport.NewSendChannel()
|
||||||
|
if err != nil {
|
||||||
|
driver.Stop()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the driver's version to determine compatibility
|
||||||
|
receiver, remoteSender := libchan.Pipe()
|
||||||
|
err = driver.sender.Send(&Request{Type: "Version", ResponseChannel: remoteSender})
|
||||||
|
if err != nil {
|
||||||
|
driver.Stop()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var response VersionResponse
|
||||||
|
err = receiver.Receive(&response)
|
||||||
|
if err != nil {
|
||||||
|
driver.Stop()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.Error != nil {
|
||||||
|
return response.Error.Unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
driver.version = response.Version
|
||||||
|
|
||||||
|
if driver.version.Major() != storagedriver.CurrentVersion.Major() || driver.version.Minor() > storagedriver.CurrentVersion.Minor() {
|
||||||
|
return IncompatibleVersionError{driver.version}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the child process storage driver
|
||||||
|
// storagedriver.StorageDriver methods called after Stop will fail
|
||||||
|
func (driver *StorageDriverClient) Stop() error {
|
||||||
|
var closeSenderErr, closeTransportErr, closeSocketErr, killErr error
|
||||||
|
|
||||||
|
if driver.sender != nil {
|
||||||
|
closeSenderErr = driver.sender.Close()
|
||||||
|
}
|
||||||
|
if driver.transport != nil {
|
||||||
|
closeTransportErr = driver.transport.Close()
|
||||||
|
}
|
||||||
|
if driver.socket != nil {
|
||||||
|
closeSocketErr = driver.socket.Close()
|
||||||
|
}
|
||||||
|
if driver.subprocess != nil {
|
||||||
|
killErr = driver.subprocess.Process.Kill()
|
||||||
|
}
|
||||||
|
if driver.stopChan != nil {
|
||||||
|
driver.stopChan <- struct{}{}
|
||||||
|
close(driver.stopChan)
|
||||||
|
}
|
||||||
|
|
||||||
|
if closeSenderErr != nil {
|
||||||
|
return closeSenderErr
|
||||||
|
} else if closeTransportErr != nil {
|
||||||
|
return closeTransportErr
|
||||||
|
} else if closeSocketErr != nil {
|
||||||
|
return closeSocketErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return killErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement the storagedriver.StorageDriver interface over IPC
|
||||||
|
|
||||||
|
// GetContent retrieves the content stored at "path" as a []byte.
|
||||||
|
func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) {
|
||||||
|
if err := driver.exited(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
receiver, remoteSender := libchan.Pipe()
|
||||||
|
|
||||||
|
params := map[string]interface{}{"Path": path}
|
||||||
|
err := driver.sender.Send(&Request{Type: "GetContent", Parameters: params, ResponseChannel: remoteSender})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
response := new(ReadStreamResponse)
|
||||||
|
err = driver.receiveResponse(receiver, response)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.Error != nil {
|
||||||
|
return nil, response.Error.Unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
defer response.Reader.Close()
|
||||||
|
contents, err := ioutil.ReadAll(response.Reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return contents, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutContent stores the []byte content at a location designated by "path".
|
||||||
|
func (driver *StorageDriverClient) PutContent(path string, contents []byte) error {
|
||||||
|
if err := driver.exited(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
receiver, remoteSender := libchan.Pipe()
|
||||||
|
|
||||||
|
params := map[string]interface{}{"Path": path, "Reader": ioutil.NopCloser(bytes.NewReader(contents))}
|
||||||
|
err := driver.sender.Send(&Request{Type: "PutContent", Parameters: params, ResponseChannel: remoteSender})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
response := new(WriteStreamResponse)
|
||||||
|
err = driver.receiveResponse(receiver, response)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.Error != nil {
|
||||||
|
return response.Error.Unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a
|
||||||
|
// given byte offset.
|
||||||
|
func (driver *StorageDriverClient) ReadStream(path string, offset int64) (io.ReadCloser, error) {
|
||||||
|
if err := driver.exited(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
receiver, remoteSender := libchan.Pipe()
|
||||||
|
params := map[string]interface{}{"Path": path, "Offset": offset}
|
||||||
|
err := driver.sender.Send(&Request{Type: "ReadStream", Parameters: params, ResponseChannel: remoteSender})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
response := new(ReadStreamResponse)
|
||||||
|
err = driver.receiveResponse(receiver, response)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.Error != nil {
|
||||||
|
return nil, response.Error.Unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
return response.Reader, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteStream stores the contents of the provided io.ReadCloser at a location
|
||||||
|
// designated by the given path.
|
||||||
|
func (driver *StorageDriverClient) WriteStream(path string, offset, size int64, reader io.ReadCloser) error {
|
||||||
|
if err := driver.exited(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
receiver, remoteSender := libchan.Pipe()
|
||||||
|
params := map[string]interface{}{"Path": path, "Offset": offset, "Size": size, "Reader": reader}
|
||||||
|
err := driver.sender.Send(&Request{Type: "WriteStream", Parameters: params, ResponseChannel: remoteSender})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
response := new(WriteStreamResponse)
|
||||||
|
err = driver.receiveResponse(receiver, response)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.Error != nil {
|
||||||
|
return response.Error.Unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentSize retrieves the curernt size in bytes of the object at the given
|
||||||
|
// path.
|
||||||
|
func (driver *StorageDriverClient) CurrentSize(path string) (uint64, error) {
|
||||||
|
if err := driver.exited(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
receiver, remoteSender := libchan.Pipe()
|
||||||
|
params := map[string]interface{}{"Path": path}
|
||||||
|
err := driver.sender.Send(&Request{Type: "CurrentSize", Parameters: params, ResponseChannel: remoteSender})
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
response := new(CurrentSizeResponse)
|
||||||
|
err = driver.receiveResponse(receiver, response)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.Error != nil {
|
||||||
|
return 0, response.Error.Unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
return response.Position, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns a list of the objects that are direct descendants of the given
|
||||||
|
// path.
|
||||||
|
func (driver *StorageDriverClient) List(path string) ([]string, error) {
|
||||||
|
if err := driver.exited(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
receiver, remoteSender := libchan.Pipe()
|
||||||
|
params := map[string]interface{}{"Path": path}
|
||||||
|
err := driver.sender.Send(&Request{Type: "List", Parameters: params, ResponseChannel: remoteSender})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
response := new(ListResponse)
|
||||||
|
err = driver.receiveResponse(receiver, response)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.Error != nil {
|
||||||
|
return nil, response.Error.Unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
return response.Keys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move moves an object stored at sourcePath to destPath, removing the original
|
||||||
|
// object.
|
||||||
|
func (driver *StorageDriverClient) Move(sourcePath string, destPath string) error {
|
||||||
|
if err := driver.exited(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
receiver, remoteSender := libchan.Pipe()
|
||||||
|
params := map[string]interface{}{"SourcePath": sourcePath, "DestPath": destPath}
|
||||||
|
err := driver.sender.Send(&Request{Type: "Move", Parameters: params, ResponseChannel: remoteSender})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
response := new(MoveResponse)
|
||||||
|
err = driver.receiveResponse(receiver, response)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.Error != nil {
|
||||||
|
return response.Error.Unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
||||||
|
func (driver *StorageDriverClient) Delete(path string) error {
|
||||||
|
if err := driver.exited(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
receiver, remoteSender := libchan.Pipe()
|
||||||
|
params := map[string]interface{}{"Path": path}
|
||||||
|
err := driver.sender.Send(&Request{Type: "Delete", Parameters: params, ResponseChannel: remoteSender})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
response := new(DeleteResponse)
|
||||||
|
err = driver.receiveResponse(receiver, response)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.Error != nil {
|
||||||
|
return response.Error.Unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleSubprocessExit populates the exit channel until we have explicitly
|
||||||
|
// stopped the storage driver subprocess
|
||||||
|
// Requests can select on driver.exitChan and response receiving and not hang if
|
||||||
|
// the process exits
|
||||||
|
func (driver *StorageDriverClient) handleSubprocessExit() {
|
||||||
|
exitErr := driver.subprocess.Wait()
|
||||||
|
if exitErr == nil {
|
||||||
|
exitErr = fmt.Errorf("Storage driver subprocess already exited cleanly")
|
||||||
|
} else {
|
||||||
|
exitErr = fmt.Errorf("Storage driver subprocess exited with error: %s", exitErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
driver.exitErr = exitErr
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case driver.exitChan <- exitErr:
|
||||||
|
case <-driver.stopChan:
|
||||||
|
close(driver.exitChan)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// receiveResponse populates the response value with the next result from the
|
||||||
|
// given receiver, or returns an error if receiving failed or the driver has
|
||||||
|
// stopped
|
||||||
|
func (driver *StorageDriverClient) receiveResponse(receiver libchan.Receiver, response interface{}) error {
|
||||||
|
receiveChan := make(chan error, 1)
|
||||||
|
go func(receiver libchan.Receiver, receiveChan chan<- error) {
|
||||||
|
receiveChan <- receiver.Receive(response)
|
||||||
|
}(receiver, receiveChan)
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var ok bool
|
||||||
|
select {
|
||||||
|
case err = <-receiveChan:
|
||||||
|
case err, ok = <-driver.exitChan:
|
||||||
|
if !ok {
|
||||||
|
err = driver.exitErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// exited returns an exit error if the driver has exited or nil otherwise
|
||||||
|
func (driver *StorageDriverClient) exited() error {
|
||||||
|
select {
|
||||||
|
case err, ok := <-driver.exitChan:
|
||||||
|
if !ok {
|
||||||
|
return driver.exitErr
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
148
storagedriver/ipc/ipc.go
Normal file
148
storagedriver/ipc/ipc.go
Normal file
|
@ -0,0 +1,148 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package ipc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
"github.com/docker/libchan"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StorageDriver is the interface which IPC storage drivers must implement. As external storage
|
||||||
|
// drivers may be defined to use a different version of the storagedriver.StorageDriver interface,
|
||||||
|
// we use an additional version check to determine compatiblity.
|
||||||
|
type StorageDriver interface {
|
||||||
|
// Version returns the storagedriver.StorageDriver interface version which this storage driver
|
||||||
|
// implements, which is used to determine driver compatibility
|
||||||
|
Version() (storagedriver.Version, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IncompatibleVersionError is returned when a storage driver is using an incompatible version of
|
||||||
|
// the storagedriver.StorageDriver api
|
||||||
|
type IncompatibleVersionError struct {
|
||||||
|
version storagedriver.Version
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e IncompatibleVersionError) Error() string {
|
||||||
|
return fmt.Sprintf("Incompatible storage driver version: %s", e.version)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request defines a remote method call request
|
||||||
|
// A return value struct is to be sent over the ResponseChannel
|
||||||
|
type Request struct {
|
||||||
|
Type string `codec:",omitempty"`
|
||||||
|
Parameters map[string]interface{} `codec:",omitempty"`
|
||||||
|
ResponseChannel libchan.Sender `codec:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseError is a serializable error type.
|
||||||
|
// The Type and Parameters may be used to reconstruct the same error on the
|
||||||
|
// client side, falling back to using the Type and Message if this cannot be
|
||||||
|
// done.
|
||||||
|
type ResponseError struct {
|
||||||
|
Type string `codec:",omitempty"`
|
||||||
|
Message string `codec:",omitempty"`
|
||||||
|
Parameters map[string]interface{} `codec:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrapError wraps an error in a serializable struct containing the error's type
|
||||||
|
// and message.
|
||||||
|
func WrapError(err error) *ResponseError {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
v := reflect.ValueOf(err)
|
||||||
|
re := ResponseError{
|
||||||
|
Type: v.Type().String(),
|
||||||
|
Message: err.Error(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.Kind() == reflect.Struct {
|
||||||
|
re.Parameters = make(map[string]interface{})
|
||||||
|
for i := 0; i < v.NumField(); i++ {
|
||||||
|
field := v.Type().Field(i)
|
||||||
|
re.Parameters[field.Name] = v.Field(i).Interface()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &re
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap returns the underlying error if it can be reconstructed, or the
|
||||||
|
// original ResponseError otherwise.
|
||||||
|
func (err *ResponseError) Unwrap() error {
|
||||||
|
var errVal reflect.Value
|
||||||
|
var zeroVal reflect.Value
|
||||||
|
|
||||||
|
switch err.Type {
|
||||||
|
case "storagedriver.PathNotFoundError":
|
||||||
|
errVal = reflect.ValueOf(&storagedriver.PathNotFoundError{})
|
||||||
|
case "storagedriver.InvalidOffsetError":
|
||||||
|
errVal = reflect.ValueOf(&storagedriver.InvalidOffsetError{})
|
||||||
|
}
|
||||||
|
if errVal == zeroVal {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range err.Parameters {
|
||||||
|
fieldVal := errVal.Elem().FieldByName(k)
|
||||||
|
if fieldVal == zeroVal {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fieldVal.Set(reflect.ValueOf(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
if unwrapped, ok := errVal.Elem().Interface().(error); ok {
|
||||||
|
return unwrapped
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *ResponseError) Error() string {
|
||||||
|
return fmt.Sprintf("%s: %s", err.Type, err.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPC method call response object definitions
|
||||||
|
|
||||||
|
// VersionResponse is a response for a Version request
|
||||||
|
type VersionResponse struct {
|
||||||
|
Version storagedriver.Version `codec:",omitempty"`
|
||||||
|
Error *ResponseError `codec:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadStreamResponse is a response for a ReadStream request
|
||||||
|
type ReadStreamResponse struct {
|
||||||
|
Reader io.ReadCloser `codec:",omitempty"`
|
||||||
|
Error *ResponseError `codec:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteStreamResponse is a response for a WriteStream request
|
||||||
|
type WriteStreamResponse struct {
|
||||||
|
Error *ResponseError `codec:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentSizeResponse is a response for a CurrentSize request
|
||||||
|
type CurrentSizeResponse struct {
|
||||||
|
Position uint64 `codec:",omitempty"`
|
||||||
|
Error *ResponseError `codec:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListResponse is a response for a List request
|
||||||
|
type ListResponse struct {
|
||||||
|
Keys []string `codec:",omitempty"`
|
||||||
|
Error *ResponseError `codec:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveResponse is a response for a Move request
|
||||||
|
type MoveResponse struct {
|
||||||
|
Error *ResponseError `codec:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteResponse is a response for a Delete request
|
||||||
|
type DeleteResponse struct {
|
||||||
|
Error *ResponseError `codec:",omitempty"`
|
||||||
|
}
|
178
storagedriver/ipc/server.go
Normal file
178
storagedriver/ipc/server.go
Normal file
|
@ -0,0 +1,178 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package ipc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
"github.com/docker/libchan"
|
||||||
|
"github.com/docker/libchan/spdy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StorageDriverServer runs a new IPC server handling requests for the given
|
||||||
|
// storagedriver.StorageDriver
|
||||||
|
// This explicitly uses file descriptor 3 for IPC communication, as storage drivers are spawned in
|
||||||
|
// client.go
|
||||||
|
//
|
||||||
|
// To create a new out-of-process driver, create a main package which calls StorageDriverServer with
|
||||||
|
// a storagedriver.StorageDriver
|
||||||
|
func StorageDriverServer(driver storagedriver.StorageDriver) error {
|
||||||
|
childSocket := os.NewFile(3, "childSocket")
|
||||||
|
defer childSocket.Close()
|
||||||
|
conn, err := net.FileConn(childSocket)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
if transport, err := spdy.NewServerTransport(conn); err != nil {
|
||||||
|
panic(err)
|
||||||
|
} else {
|
||||||
|
for {
|
||||||
|
receiver, err := transport.WaitReceiveChannel()
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
go receive(driver, receiver)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// receive receives new storagedriver.StorageDriver method requests and creates a new goroutine to
|
||||||
|
// handle each request
|
||||||
|
// Requests are expected to be of type ipc.Request as the parameters are unknown until the request
|
||||||
|
// type is deserialized
|
||||||
|
func receive(driver storagedriver.StorageDriver, receiver libchan.Receiver) {
|
||||||
|
for {
|
||||||
|
var request Request
|
||||||
|
err := receiver.Receive(&request)
|
||||||
|
if err == io.EOF {
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
go handleRequest(driver, request)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleRequest handles storagedriver.StorageDriver method requests as defined in client.go
|
||||||
|
// Responds to requests using the Request.ResponseChannel
|
||||||
|
func handleRequest(driver storagedriver.StorageDriver, request Request) {
|
||||||
|
switch request.Type {
|
||||||
|
case "Version":
|
||||||
|
err := request.ResponseChannel.Send(&VersionResponse{Version: storagedriver.CurrentVersion})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
case "GetContent":
|
||||||
|
path, _ := request.Parameters["Path"].(string)
|
||||||
|
content, err := driver.GetContent(path)
|
||||||
|
var response ReadStreamResponse
|
||||||
|
if err != nil {
|
||||||
|
response = ReadStreamResponse{Error: WrapError(err)}
|
||||||
|
} else {
|
||||||
|
response = ReadStreamResponse{Reader: ioutil.NopCloser(bytes.NewReader(content))}
|
||||||
|
}
|
||||||
|
err = request.ResponseChannel.Send(&response)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
case "PutContent":
|
||||||
|
path, _ := request.Parameters["Path"].(string)
|
||||||
|
reader, _ := request.Parameters["Reader"].(io.ReadCloser)
|
||||||
|
contents, err := ioutil.ReadAll(reader)
|
||||||
|
defer reader.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = driver.PutContent(path, contents)
|
||||||
|
}
|
||||||
|
response := WriteStreamResponse{
|
||||||
|
Error: WrapError(err),
|
||||||
|
}
|
||||||
|
err = request.ResponseChannel.Send(&response)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
case "ReadStream":
|
||||||
|
path, _ := request.Parameters["Path"].(string)
|
||||||
|
// Depending on serialization method, Offset may be convereted to any int/uint type
|
||||||
|
offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int()
|
||||||
|
reader, err := driver.ReadStream(path, offset)
|
||||||
|
var response ReadStreamResponse
|
||||||
|
if err != nil {
|
||||||
|
response = ReadStreamResponse{Error: WrapError(err)}
|
||||||
|
} else {
|
||||||
|
response = ReadStreamResponse{Reader: reader}
|
||||||
|
}
|
||||||
|
err = request.ResponseChannel.Send(&response)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
case "WriteStream":
|
||||||
|
path, _ := request.Parameters["Path"].(string)
|
||||||
|
// Depending on serialization method, Offset may be convereted to any int/uint type
|
||||||
|
offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int()
|
||||||
|
// Depending on serialization method, Size may be convereted to any int/uint type
|
||||||
|
size := reflect.ValueOf(request.Parameters["Size"]).Convert(reflect.TypeOf(int64(0))).Int()
|
||||||
|
reader, _ := request.Parameters["Reader"].(io.ReadCloser)
|
||||||
|
err := driver.WriteStream(path, offset, size, reader)
|
||||||
|
response := WriteStreamResponse{
|
||||||
|
Error: WrapError(err),
|
||||||
|
}
|
||||||
|
err = request.ResponseChannel.Send(&response)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
case "CurrentSize":
|
||||||
|
path, _ := request.Parameters["Path"].(string)
|
||||||
|
position, err := driver.CurrentSize(path)
|
||||||
|
response := CurrentSizeResponse{
|
||||||
|
Position: position,
|
||||||
|
Error: WrapError(err),
|
||||||
|
}
|
||||||
|
err = request.ResponseChannel.Send(&response)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
case "List":
|
||||||
|
path, _ := request.Parameters["Path"].(string)
|
||||||
|
keys, err := driver.List(path)
|
||||||
|
response := ListResponse{
|
||||||
|
Keys: keys,
|
||||||
|
Error: WrapError(err),
|
||||||
|
}
|
||||||
|
err = request.ResponseChannel.Send(&response)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
case "Move":
|
||||||
|
sourcePath, _ := request.Parameters["SourcePath"].(string)
|
||||||
|
destPath, _ := request.Parameters["DestPath"].(string)
|
||||||
|
err := driver.Move(sourcePath, destPath)
|
||||||
|
response := MoveResponse{
|
||||||
|
Error: WrapError(err),
|
||||||
|
}
|
||||||
|
err = request.ResponseChannel.Send(&response)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
case "Delete":
|
||||||
|
path, _ := request.Parameters["Path"].(string)
|
||||||
|
err := driver.Delete(path)
|
||||||
|
response := DeleteResponse{
|
||||||
|
Error: WrapError(err),
|
||||||
|
}
|
||||||
|
err = request.ResponseChannel.Send(&response)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic(request)
|
||||||
|
}
|
||||||
|
}
|
330
storagedriver/s3/s3.go
Normal file
330
storagedriver/s3/s3.go
Normal file
|
@ -0,0 +1,330 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package s3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/crowdmob/goamz/aws"
|
||||||
|
"github.com/crowdmob/goamz/s3"
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/factory"
|
||||||
|
)
|
||||||
|
|
||||||
|
const driverName = "s3"
|
||||||
|
|
||||||
|
// minChunkSize defines the minimum multipart upload chunk size
|
||||||
|
// S3 API requires multipart upload chunks to be at least 5MB
|
||||||
|
const minChunkSize = 5 * 1024 * 1024
|
||||||
|
|
||||||
|
// listPartsMax is the largest amount of parts you can request from S3
|
||||||
|
const listPartsMax = 1000
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
factory.Register(driverName, &s3DriverFactory{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// s3DriverFactory implements the factory.StorageDriverFactory interface
|
||||||
|
type s3DriverFactory struct{}
|
||||||
|
|
||||||
|
func (factory *s3DriverFactory) Create(parameters map[string]string) (storagedriver.StorageDriver, error) {
|
||||||
|
return FromParameters(parameters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3
|
||||||
|
// Objects are stored at absolute keys in the provided bucket
|
||||||
|
type Driver struct {
|
||||||
|
S3 *s3.S3
|
||||||
|
Bucket *s3.Bucket
|
||||||
|
Encrypt bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromParameters constructs a new Driver with a given parameters map
|
||||||
|
// Required parameters:
|
||||||
|
// - accesskey
|
||||||
|
// - secretkey
|
||||||
|
// - region
|
||||||
|
// - bucket
|
||||||
|
// - encrypt
|
||||||
|
func FromParameters(parameters map[string]string) (*Driver, error) {
|
||||||
|
accessKey, ok := parameters["accesskey"]
|
||||||
|
if !ok || accessKey == "" {
|
||||||
|
return nil, fmt.Errorf("No accesskey parameter provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
secretKey, ok := parameters["secretkey"]
|
||||||
|
if !ok || secretKey == "" {
|
||||||
|
return nil, fmt.Errorf("No secretkey parameter provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
regionName, ok := parameters["region"]
|
||||||
|
if !ok || regionName == "" {
|
||||||
|
return nil, fmt.Errorf("No region parameter provided")
|
||||||
|
}
|
||||||
|
region := aws.GetRegion(regionName)
|
||||||
|
if region.Name == "" {
|
||||||
|
return nil, fmt.Errorf("Invalid region provided: %v", region)
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket, ok := parameters["bucket"]
|
||||||
|
if !ok || bucket == "" {
|
||||||
|
return nil, fmt.Errorf("No bucket parameter provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
encrypt, ok := parameters["encrypt"]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("No encrypt parameter provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptBool, err := strconv.ParseBool(encrypt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Unable to parse the encrypt parameter: %v", err)
|
||||||
|
}
|
||||||
|
return New(accessKey, secretKey, region, encryptBool, bucket)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New constructs a new Driver with the given AWS credentials, region, encryption flag, and
|
||||||
|
// bucketName
|
||||||
|
func New(accessKey string, secretKey string, region aws.Region, encrypt bool, bucketName string) (*Driver, error) {
|
||||||
|
auth := aws.Auth{AccessKey: accessKey, SecretKey: secretKey}
|
||||||
|
s3obj := s3.New(auth, region)
|
||||||
|
bucket := s3obj.Bucket(bucketName)
|
||||||
|
|
||||||
|
if err := bucket.PutBucket(getPermissions()); err != nil {
|
||||||
|
s3Err, ok := err.(*s3.Error)
|
||||||
|
if !(ok && s3Err.Code == "BucketAlreadyOwnedByYou") {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Driver{s3obj, bucket, encrypt}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement the storagedriver.StorageDriver interface
|
||||||
|
|
||||||
|
// GetContent retrieves the content stored at "path" as a []byte.
|
||||||
|
func (d *Driver) GetContent(path string) ([]byte, error) {
|
||||||
|
content, err := d.Bucket.Get(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
||||||
|
}
|
||||||
|
return content, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutContent stores the []byte content at a location designated by "path".
|
||||||
|
func (d *Driver) PutContent(path string, contents []byte) error {
|
||||||
|
return d.Bucket.Put(path, contents, d.getContentType(), getPermissions(), d.getOptions())
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a
|
||||||
|
// given byte offset.
|
||||||
|
func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) {
|
||||||
|
headers := make(http.Header)
|
||||||
|
headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-")
|
||||||
|
|
||||||
|
resp, err := d.Bucket.GetResponseWithHeaders(path, headers)
|
||||||
|
if err != nil {
|
||||||
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
||||||
|
}
|
||||||
|
return resp.Body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteStream stores the contents of the provided io.ReadCloser at a location
|
||||||
|
// designated by the given path.
|
||||||
|
func (d *Driver) WriteStream(path string, offset, size int64, reader io.ReadCloser) error {
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
|
chunkSize := int64(minChunkSize)
|
||||||
|
for size/chunkSize >= listPartsMax {
|
||||||
|
chunkSize *= 2
|
||||||
|
}
|
||||||
|
|
||||||
|
partNumber := 1
|
||||||
|
var totalRead int64
|
||||||
|
multi, parts, err := d.getAllParts(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if (offset) > int64(len(parts))*chunkSize || (offset < size && offset%chunkSize != 0) {
|
||||||
|
return storagedriver.InvalidOffsetError{Path: path, Offset: offset}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(parts) > 0 {
|
||||||
|
partNumber = int(offset/chunkSize) + 1
|
||||||
|
totalRead = offset
|
||||||
|
parts = parts[0 : partNumber-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, chunkSize)
|
||||||
|
for {
|
||||||
|
bytesRead, err := io.ReadFull(reader, buf)
|
||||||
|
totalRead += int64(bytesRead)
|
||||||
|
|
||||||
|
if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF {
|
||||||
|
return err
|
||||||
|
} else if (int64(bytesRead) < chunkSize) && totalRead != size {
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
part, err := multi.PutPart(int(partNumber), bytes.NewReader(buf[0:bytesRead]))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
parts = append(parts, part)
|
||||||
|
if totalRead == size {
|
||||||
|
multi.Complete(parts)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
partNumber++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentSize retrieves the curernt size in bytes of the object at the given
|
||||||
|
// path.
|
||||||
|
func (d *Driver) CurrentSize(path string) (uint64, error) {
|
||||||
|
_, parts, err := d.getAllParts(path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(parts) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return (((uint64(len(parts)) - 1) * uint64(parts[0].Size)) + uint64(parts[len(parts)-1].Size)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns a list of the objects that are direct descendants of the given
|
||||||
|
// path.
|
||||||
|
func (d *Driver) List(path string) ([]string, error) {
|
||||||
|
if path[len(path)-1] != '/' {
|
||||||
|
path = path + "/"
|
||||||
|
}
|
||||||
|
listResponse, err := d.Bucket.List(path, "/", "", listPartsMax)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
files := []string{}
|
||||||
|
directories := []string{}
|
||||||
|
|
||||||
|
for {
|
||||||
|
for _, key := range listResponse.Contents {
|
||||||
|
files = append(files, key.Key)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, commonPrefix := range listResponse.CommonPrefixes {
|
||||||
|
directories = append(directories, commonPrefix[0:len(commonPrefix)-1])
|
||||||
|
}
|
||||||
|
|
||||||
|
if listResponse.IsTruncated {
|
||||||
|
listResponse, err = d.Bucket.List(path, "/", listResponse.NextMarker, listPartsMax)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return append(files, directories...), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move moves an object stored at sourcePath to destPath, removing the original
|
||||||
|
// object.
|
||||||
|
func (d *Driver) Move(sourcePath string, destPath string) error {
|
||||||
|
/* This is terrible, but aws doesn't have an actual move. */
|
||||||
|
_, err := d.Bucket.PutCopy(destPath, getPermissions(),
|
||||||
|
s3.CopyOptions{Options: d.getOptions(), MetadataDirective: "", ContentType: d.getContentType()},
|
||||||
|
d.Bucket.Name+"/"+sourcePath)
|
||||||
|
if err != nil {
|
||||||
|
return storagedriver.PathNotFoundError{Path: sourcePath}
|
||||||
|
}
|
||||||
|
|
||||||
|
return d.Delete(sourcePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
||||||
|
func (d *Driver) Delete(path string) error {
|
||||||
|
listResponse, err := d.Bucket.List(path, "", "", listPartsMax)
|
||||||
|
if err != nil || len(listResponse.Contents) == 0 {
|
||||||
|
return storagedriver.PathNotFoundError{Path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
s3Objects := make([]s3.Object, listPartsMax)
|
||||||
|
|
||||||
|
for len(listResponse.Contents) > 0 {
|
||||||
|
for index, key := range listResponse.Contents {
|
||||||
|
s3Objects[index].Key = key.Key
|
||||||
|
}
|
||||||
|
|
||||||
|
err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:len(listResponse.Contents)]})
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
listResponse, err = d.Bucket.List(path, "", "", listPartsMax)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Driver) getHighestIDMulti(path string) (multi *s3.Multi, err error) {
|
||||||
|
multis, _, err := d.Bucket.ListMulti(path, "")
|
||||||
|
if err != nil && !hasCode(err, "NoSuchUpload") {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadID := ""
|
||||||
|
|
||||||
|
if len(multis) > 0 {
|
||||||
|
for _, m := range multis {
|
||||||
|
if m.Key == path && m.UploadId >= uploadID {
|
||||||
|
uploadID = m.UploadId
|
||||||
|
multi = m
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return multi, nil
|
||||||
|
}
|
||||||
|
multi, err = d.Bucket.InitMulti(path, d.getContentType(), getPermissions(), d.getOptions())
|
||||||
|
return multi, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Driver) getAllParts(path string) (*s3.Multi, []s3.Part, error) {
|
||||||
|
multi, err := d.getHighestIDMulti(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
parts, err := multi.ListParts()
|
||||||
|
return multi, parts, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasCode(err error, code string) bool {
|
||||||
|
s3err, ok := err.(*aws.Error)
|
||||||
|
return ok && s3err.Code == code
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Driver) getOptions() s3.Options {
|
||||||
|
return s3.Options{SSE: d.Encrypt}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPermissions() s3.ACL {
|
||||||
|
return s3.Private
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Driver) getContentType() string {
|
||||||
|
return "application/octet-stream"
|
||||||
|
}
|
56
storagedriver/s3/s3_test.go
Normal file
56
storagedriver/s3/s3_test.go
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package s3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/crowdmob/goamz/aws"
|
||||||
|
"github.com/docker/docker-registry/storagedriver"
|
||||||
|
"github.com/docker/docker-registry/storagedriver/testsuites"
|
||||||
|
|
||||||
|
"gopkg.in/check.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Hook up gocheck into the "go test" runner.
|
||||||
|
func Test(t *testing.T) { check.TestingT(t) }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
accessKey := os.Getenv("AWS_ACCESS_KEY")
|
||||||
|
secretKey := os.Getenv("AWS_SECRET_KEY")
|
||||||
|
bucket := os.Getenv("S3_BUCKET")
|
||||||
|
encrypt := os.Getenv("S3_ENCRYPT")
|
||||||
|
|
||||||
|
s3DriverConstructor := func(region aws.Region) (storagedriver.StorageDriver, error) {
|
||||||
|
shouldEncrypt, err := strconv.ParseBool(encrypt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return New(accessKey, secretKey, region, shouldEncrypt, bucket)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip S3 storage driver tests if environment variable parameters are not provided
|
||||||
|
skipCheck := func() string {
|
||||||
|
if accessKey == "" || secretKey == "" || bucket == "" || encrypt == "" {
|
||||||
|
return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, S3_BUCKET, and S3_ENCRYPT to run S3 tests"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, region := range aws.Regions {
|
||||||
|
if region == aws.USGovWest {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
testsuites.RegisterInProcessSuite(s3DriverConstructor(region), skipCheck)
|
||||||
|
testsuites.RegisterIPCSuite(driverName, map[string]string{
|
||||||
|
"accesskey": accessKey,
|
||||||
|
"secretkey": secretKey,
|
||||||
|
"region": region.Name,
|
||||||
|
"bucket": bucket,
|
||||||
|
"encrypt": encrypt,
|
||||||
|
}, skipCheck)
|
||||||
|
}
|
||||||
|
}
|
114
storagedriver/storagedriver.go
Normal file
114
storagedriver/storagedriver.go
Normal file
|
@ -0,0 +1,114 @@
|
||||||
|
package storagedriver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Version is a string representing the storage driver version, of the form
|
||||||
|
// Major.Minor.
|
||||||
|
// The registry must accept storage drivers with equal major version and greater
|
||||||
|
// minor version, but may not be compatible with older storage driver versions.
|
||||||
|
type Version string
|
||||||
|
|
||||||
|
// Major returns the major (primary) component of a version.
|
||||||
|
func (version Version) Major() uint {
|
||||||
|
majorPart := strings.Split(string(version), ".")[0]
|
||||||
|
major, _ := strconv.ParseUint(majorPart, 10, 0)
|
||||||
|
return uint(major)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Minor returns the minor (secondary) component of a version.
|
||||||
|
func (version Version) Minor() uint {
|
||||||
|
minorPart := strings.Split(string(version), ".")[1]
|
||||||
|
minor, _ := strconv.ParseUint(minorPart, 10, 0)
|
||||||
|
return uint(minor)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentVersion is the current storage driver Version.
|
||||||
|
const CurrentVersion Version = "0.1"
|
||||||
|
|
||||||
|
// StorageDriver defines methods that a Storage Driver must implement for a
|
||||||
|
// filesystem-like key/value object storage.
|
||||||
|
type StorageDriver interface {
|
||||||
|
// GetContent retrieves the content stored at "path" as a []byte.
|
||||||
|
// This should primarily be used for small objects.
|
||||||
|
GetContent(path string) ([]byte, error)
|
||||||
|
|
||||||
|
// PutContent stores the []byte content at a location designated by "path".
|
||||||
|
// This should primarily be used for small objects.
|
||||||
|
PutContent(path string, content []byte) error
|
||||||
|
|
||||||
|
// ReadStream retrieves an io.ReadCloser for the content stored at "path"
|
||||||
|
// with a given byte offset.
|
||||||
|
// May be used to resume reading a stream by providing a nonzero offset.
|
||||||
|
ReadStream(path string, offset int64) (io.ReadCloser, error)
|
||||||
|
|
||||||
|
// WriteStream stores the contents of the provided io.ReadCloser at a
|
||||||
|
// location designated by the given path.
|
||||||
|
// The driver will know it has received the full contents when it has read
|
||||||
|
// "size" bytes.
|
||||||
|
// May be used to resume writing a stream by providing a nonzero offset.
|
||||||
|
// The offset must be no larger than the CurrentSize for this path.
|
||||||
|
WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error)
|
||||||
|
|
||||||
|
// Stat retrieves the FileInfo for the given path, including the current
|
||||||
|
// size in bytes and the creation time.
|
||||||
|
Stat(path string) (FileInfo, error)
|
||||||
|
|
||||||
|
// List returns a list of the objects that are direct descendants of the
|
||||||
|
//given path.
|
||||||
|
List(path string) ([]string, error)
|
||||||
|
|
||||||
|
// Move moves an object stored at sourcePath to destPath, removing the
|
||||||
|
// original object.
|
||||||
|
// Note: This may be no more efficient than a copy followed by a delete for
|
||||||
|
// many implementations.
|
||||||
|
Move(sourcePath string, destPath string) error
|
||||||
|
|
||||||
|
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
||||||
|
Delete(path string) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// PathComponentRegexp is the regular expression which each repository path
|
||||||
|
// component must match.
|
||||||
|
// A component of a repository path must be at least two characters, optionally
|
||||||
|
// separated by periods, dashes or underscores.
|
||||||
|
var PathComponentRegexp = regexp.MustCompile(`[a-z0-9]+([._-]?[a-z0-9])+`)
|
||||||
|
|
||||||
|
// PathRegexp is the regular expression which each repository path must match.
|
||||||
|
// A repository path is absolute, beginning with a slash and containing a
|
||||||
|
// positive number of path components separated by slashes.
|
||||||
|
var PathRegexp = regexp.MustCompile(`^(/[a-z0-9]+([._-]?[a-z0-9])+)+$`)
|
||||||
|
|
||||||
|
// PathNotFoundError is returned when operating on a nonexistent path.
|
||||||
|
type PathNotFoundError struct {
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err PathNotFoundError) Error() string {
|
||||||
|
return fmt.Sprintf("Path not found: %s", err.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidPathError is returned when the provided path is malformed.
|
||||||
|
type InvalidPathError struct {
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err InvalidPathError) Error() string {
|
||||||
|
return fmt.Sprintf("Invalid path: %s", err.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidOffsetError is returned when attempting to read or write from an
|
||||||
|
// invalid offset.
|
||||||
|
type InvalidOffsetError struct {
|
||||||
|
Path string
|
||||||
|
Offset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err InvalidOffsetError) Error() string {
|
||||||
|
return fmt.Sprintf("Invalid offset: %d for path: %s", err.Offset, err.Path)
|
||||||
|
}
|
1026
storagedriver/testsuites/testsuites.go
Normal file
1026
storagedriver/testsuites/testsuites.go
Normal file
File diff suppressed because it is too large
Load diff
58
tags.go
Normal file
58
tags.go
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/docker/docker-registry/api/v2"
|
||||||
|
"github.com/docker/docker-registry/storage"
|
||||||
|
"github.com/gorilla/handlers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// tagsDispatcher constructs the tags handler api endpoint.
|
||||||
|
func tagsDispatcher(ctx *Context, r *http.Request) http.Handler {
|
||||||
|
tagsHandler := &tagsHandler{
|
||||||
|
Context: ctx,
|
||||||
|
}
|
||||||
|
|
||||||
|
return handlers.MethodHandler{
|
||||||
|
"GET": http.HandlerFunc(tagsHandler.GetTags),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// tagsHandler handles requests for lists of tags under a repository name.
|
||||||
|
type tagsHandler struct {
|
||||||
|
*Context
|
||||||
|
}
|
||||||
|
|
||||||
|
type tagsAPIResponse struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Tags []string `json:"tags"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTags returns a json list of tags for a specific image name.
|
||||||
|
func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) {
|
||||||
|
defer r.Body.Close()
|
||||||
|
manifests := th.services.Manifests()
|
||||||
|
|
||||||
|
tags, err := manifests.Tags(th.Name)
|
||||||
|
if err != nil {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case storage.ErrUnknownRepository:
|
||||||
|
w.WriteHeader(404)
|
||||||
|
th.Errors.Push(v2.ErrorCodeNameUnknown, map[string]string{"name": th.Name})
|
||||||
|
default:
|
||||||
|
th.Errors.PushErr(err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
enc := json.NewEncoder(w)
|
||||||
|
if err := enc.Encode(tagsAPIResponse{
|
||||||
|
Name: th.Name,
|
||||||
|
Tags: tags,
|
||||||
|
}); err != nil {
|
||||||
|
th.Errors.PushErr(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
27
util.go
Normal file
27
util.go
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/gorilla/handlers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// functionName returns the name of the function fn.
|
||||||
|
func functionName(fn interface{}) string {
|
||||||
|
return runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolveHandlerName attempts to resolve a nice, pretty name for the passed
|
||||||
|
// in handler.
|
||||||
|
func resolveHandlerName(method string, handler http.Handler) string {
|
||||||
|
switch v := handler.(type) {
|
||||||
|
case handlers.MethodHandler:
|
||||||
|
return functionName(v[method])
|
||||||
|
case http.HandlerFunc:
|
||||||
|
return functionName(v)
|
||||||
|
default:
|
||||||
|
return functionName(handler.ServeHTTP)
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in a new issue