From dd41410647fe60a4f14e35bdc0c13fe501744398 Mon Sep 17 00:00:00 2001 From: Misty Stanley-Jones Date: Wed, 28 Sep 2016 14:46:28 -0700 Subject: [PATCH] Moved registry docs to registry subdirectory --- docs/Dockerfile | 9 + docs/Makefile | 38 + docs/api/errcode/errors.go | 267 - docs/api/errcode/errors_test.go | 185 - docs/api/errcode/handler.go | 44 - docs/api/errcode/register.go | 138 - docs/api/v2/descriptors.go | 1596 ----- docs/api/v2/doc.go | 9 - docs/api/v2/errors.go | 136 - docs/api/v2/routes.go | 49 - docs/api/v2/routes_test.go | 355 -- docs/api/v2/urls.go | 251 - docs/api/v2/urls_test.go | 334 - docs/architecture.md | 54 + docs/auth/auth.go | 168 - docs/auth/htpasswd/access.go | 97 - docs/auth/htpasswd/access_test.go | 122 - docs/auth/htpasswd/htpasswd.go | 82 - docs/auth/htpasswd/htpasswd_test.go | 85 - docs/auth/silly/access.go | 97 - docs/auth/silly/access_test.go | 71 - docs/auth/token/accesscontroller.go | 268 - docs/auth/token/stringset.go | 35 - docs/auth/token/token.go | 343 - docs/auth/token/token_test.go | 387 -- docs/auth/token/util.go | 58 - docs/client/auth/api_version.go | 58 - docs/client/auth/authchallenge.go | 220 - docs/client/auth/authchallenge_test.go | 81 - docs/client/auth/session.go | 480 -- docs/client/auth/session_test.go | 787 --- docs/client/blob_writer.go | 162 - docs/client/blob_writer_test.go | 211 - docs/client/errors.go | 107 - docs/client/errors_test.go | 104 - docs/client/repository.go | 863 --- docs/client/repository_test.go | 1182 ---- docs/client/transport/http_reader.go | 250 - docs/client/transport/transport.go | 147 - docs/compatibility.md | 84 + docs/configuration.md | 1877 ++++++ docs/deploying.md | 237 + docs/deprecated.md | 27 + docs/doc.go | 2 - docs/garbage-collection.md | 137 + docs/glossary.md | 70 + docs/handlers/api_test.go | 2474 -------- docs/handlers/app.go | 996 --- docs/handlers/app_test.go | 274 - docs/handlers/basicauth.go | 11 - docs/handlers/basicauth_prego14.go | 41 - docs/handlers/blob.go | 99 - docs/handlers/blobupload.go | 368 -- docs/handlers/catalog.go | 95 - docs/handlers/context.go | 152 - docs/handlers/health_test.go | 201 - docs/handlers/helpers.go | 66 - docs/handlers/hmac.go | 72 - docs/handlers/hmac_test.go | 117 - docs/handlers/hooks.go | 53 - docs/handlers/images.go | 386 -- docs/handlers/mail.go | 45 - docs/handlers/tags.go | 62 - docs/help.md | 24 + docs/images/notifications.gliffy | 1 + docs/images/notifications.png | Bin 0 -> 37836 bytes docs/images/notifications.svg | 1 + docs/images/v2-registry-auth.png | Bin 0 -> 12590 bytes docs/index.md | 67 + docs/insecure.md | 114 + docs/introduction.md | 55 + docs/listener/listener.go | 74 - docs/menu.md | 23 + docs/middleware/registry/middleware.go | 54 - docs/middleware/repository/middleware.go | 40 - docs/migration.md | 30 + docs/notifications.md | 350 ++ docs/proxy/proxyauth.go | 58 - docs/proxy/proxyblobstore.go | 222 - docs/proxy/proxyblobstore_test.go | 409 -- docs/proxy/proxymanifeststore.go | 95 - docs/proxy/proxymanifeststore_test.go | 274 - docs/proxy/proxymetrics.go | 74 - docs/proxy/proxyregistry.go | 248 - docs/proxy/proxytagservice.go | 65 - docs/proxy/proxytagservice_test.go | 182 - docs/proxy/scheduler/scheduler.go | 258 - docs/proxy/scheduler/scheduler_test.go | 188 - docs/recipes/apache.md | 215 + docs/recipes/index.md | 37 + docs/recipes/menu.md | 21 + docs/recipes/mirror.md | 74 + docs/recipes/nginx.md | 190 + docs/recipes/osx-setup-guide.md | 81 + docs/recipes/osx/com.docker.registry.plist | 42 + docs/recipes/osx/config.yml | 16 + docs/registry.go | 345 -- docs/root.go | 84 - docs/spec/api.md | 5489 +++++++++++++++++ docs/spec/api.md.tmpl | 1219 ++++ docs/spec/auth/index.md | 17 + docs/spec/auth/jwt.md | 334 + docs/spec/auth/oauth.md | 191 + docs/spec/auth/scope.md | 143 + docs/spec/auth/token.md | 255 + docs/spec/implementations.md | 32 + docs/spec/index.md | 17 + docs/spec/json.md | 94 + docs/spec/manifest-v2-1.md | 167 + docs/spec/manifest-v2-2.md | 296 + docs/spec/menu.md | 13 + docs/storage-drivers/azure.md | 78 + docs/storage-drivers/filesystem.md | 24 + docs/storage-drivers/gcs.md | 78 + docs/storage-drivers/index.md | 66 + docs/storage-drivers/inmemory.md | 23 + docs/storage-drivers/menu.md | 13 + docs/storage-drivers/oss.md | 126 + docs/storage-drivers/s3.md | 268 + docs/storage-drivers/swift.md | 246 + docs/storage/blob_test.go | 614 -- docs/storage/blobcachemetrics.go | 60 - docs/storage/blobserver.go | 78 - docs/storage/blobstore.go | 223 - docs/storage/blobwriter.go | 399 -- docs/storage/blobwriter_nonresumable.go | 17 - docs/storage/blobwriter_resumable.go | 145 - docs/storage/cache/cache.go | 35 - docs/storage/cache/cachecheck/suite.go | 180 - .../cache/cachedblobdescriptorstore.go | 101 - docs/storage/cache/memory/memory.go | 170 - docs/storage/cache/memory/memory_test.go | 13 - docs/storage/cache/redis/redis.go | 268 - docs/storage/cache/redis/redis_test.go | 51 - docs/storage/catalog.go | 97 - docs/storage/catalog_test.go | 125 - docs/storage/doc.go | 3 - docs/storage/driver/azure/azure.go | 482 -- docs/storage/driver/azure/azure_test.go | 63 - docs/storage/driver/base/base.go | 198 - docs/storage/driver/base/regulator.go | 145 - docs/storage/driver/factory/factory.go | 64 - docs/storage/driver/fileinfo.go | 79 - docs/storage/driver/filesystem/driver.go | 440 -- docs/storage/driver/filesystem/driver_test.go | 113 - docs/storage/driver/gcs/doc.go | 3 - docs/storage/driver/gcs/gcs.go | 873 --- docs/storage/driver/gcs/gcs_test.go | 311 - docs/storage/driver/inmemory/driver.go | 312 - docs/storage/driver/inmemory/driver_test.go | 19 - docs/storage/driver/inmemory/mfs.go | 338 - .../middleware/cloudfront/middleware.go | 136 - .../driver/middleware/redirect/middleware.go | 50 - .../middleware/redirect/middleware_test.go | 58 - .../driver/middleware/storagemiddleware.go | 39 - docs/storage/driver/oss/doc.go | 3 - docs/storage/driver/oss/oss.go | 670 -- docs/storage/driver/oss/oss_test.go | 144 - docs/storage/driver/s3-aws/s3.go | 977 --- docs/storage/driver/s3-aws/s3_test.go | 238 - docs/storage/driver/s3-goamz/s3.go | 746 --- docs/storage/driver/s3-goamz/s3_test.go | 201 - docs/storage/driver/storagedriver.go | 165 - docs/storage/driver/swift/swift.go | 837 --- docs/storage/driver/swift/swift_test.go | 177 - docs/storage/driver/testdriver/testdriver.go | 71 - docs/storage/driver/testsuites/testsuites.go | 1229 ---- docs/storage/filereader.go | 177 - docs/storage/filereader_test.go | 199 - docs/storage/garbagecollect.go | 133 - docs/storage/garbagecollect_test.go | 376 -- docs/storage/linkedblobstore.go | 472 -- docs/storage/manifestlisthandler.go | 96 - docs/storage/manifeststore.go | 141 - docs/storage/manifeststore_test.go | 391 -- docs/storage/paths.go | 490 -- docs/storage/paths_test.go | 135 - docs/storage/purgeuploads.go | 139 - docs/storage/purgeuploads_test.go | 166 - docs/storage/registry.go | 279 - docs/storage/schema2manifesthandler.go | 128 - docs/storage/schema2manifesthandler_test.go | 117 - docs/storage/signedmanifesthandler.go | 145 - docs/storage/tagstore.go | 191 - docs/storage/tagstore_test.go | 209 - docs/storage/util.go | 21 - docs/storage/vacuum.go | 67 - docs/storage/walk.go | 59 - docs/storage/walk_test.go | 152 - 189 files changed, 13063 insertions(+), 34056 deletions(-) create mode 100644 docs/Dockerfile create mode 100644 docs/Makefile delete mode 100644 docs/api/errcode/errors.go delete mode 100644 docs/api/errcode/errors_test.go delete mode 100644 docs/api/errcode/handler.go delete mode 100644 docs/api/errcode/register.go delete mode 100644 docs/api/v2/descriptors.go delete mode 100644 docs/api/v2/doc.go delete mode 100644 docs/api/v2/errors.go delete mode 100644 docs/api/v2/routes.go delete mode 100644 docs/api/v2/routes_test.go delete mode 100644 docs/api/v2/urls.go delete mode 100644 docs/api/v2/urls_test.go create mode 100644 docs/architecture.md delete mode 100644 docs/auth/auth.go delete mode 100644 docs/auth/htpasswd/access.go delete mode 100644 docs/auth/htpasswd/access_test.go delete mode 100644 docs/auth/htpasswd/htpasswd.go delete mode 100644 docs/auth/htpasswd/htpasswd_test.go delete mode 100644 docs/auth/silly/access.go delete mode 100644 docs/auth/silly/access_test.go delete mode 100644 docs/auth/token/accesscontroller.go delete mode 100644 docs/auth/token/stringset.go delete mode 100644 docs/auth/token/token.go delete mode 100644 docs/auth/token/token_test.go delete mode 100644 docs/auth/token/util.go delete mode 100644 docs/client/auth/api_version.go delete mode 100644 docs/client/auth/authchallenge.go delete mode 100644 docs/client/auth/authchallenge_test.go delete mode 100644 docs/client/auth/session.go delete mode 100644 docs/client/auth/session_test.go delete mode 100644 docs/client/blob_writer.go delete mode 100644 docs/client/blob_writer_test.go delete mode 100644 docs/client/errors.go delete mode 100644 docs/client/errors_test.go delete mode 100644 docs/client/repository.go delete mode 100644 docs/client/repository_test.go delete mode 100644 docs/client/transport/http_reader.go delete mode 100644 docs/client/transport/transport.go create mode 100644 docs/compatibility.md create mode 100644 docs/configuration.md create mode 100644 docs/deploying.md create mode 100644 docs/deprecated.md delete mode 100644 docs/doc.go create mode 100644 docs/garbage-collection.md create mode 100644 docs/glossary.md delete mode 100644 docs/handlers/api_test.go delete mode 100644 docs/handlers/app.go delete mode 100644 docs/handlers/app_test.go delete mode 100644 docs/handlers/basicauth.go delete mode 100644 docs/handlers/basicauth_prego14.go delete mode 100644 docs/handlers/blob.go delete mode 100644 docs/handlers/blobupload.go delete mode 100644 docs/handlers/catalog.go delete mode 100644 docs/handlers/context.go delete mode 100644 docs/handlers/health_test.go delete mode 100644 docs/handlers/helpers.go delete mode 100644 docs/handlers/hmac.go delete mode 100644 docs/handlers/hmac_test.go delete mode 100644 docs/handlers/hooks.go delete mode 100644 docs/handlers/images.go delete mode 100644 docs/handlers/mail.go delete mode 100644 docs/handlers/tags.go create mode 100644 docs/help.md create mode 100644 docs/images/notifications.gliffy create mode 100644 docs/images/notifications.png create mode 100644 docs/images/notifications.svg create mode 100644 docs/images/v2-registry-auth.png create mode 100644 docs/index.md create mode 100644 docs/insecure.md create mode 100644 docs/introduction.md delete mode 100644 docs/listener/listener.go create mode 100644 docs/menu.md delete mode 100644 docs/middleware/registry/middleware.go delete mode 100644 docs/middleware/repository/middleware.go create mode 100644 docs/migration.md create mode 100644 docs/notifications.md delete mode 100644 docs/proxy/proxyauth.go delete mode 100644 docs/proxy/proxyblobstore.go delete mode 100644 docs/proxy/proxyblobstore_test.go delete mode 100644 docs/proxy/proxymanifeststore.go delete mode 100644 docs/proxy/proxymanifeststore_test.go delete mode 100644 docs/proxy/proxymetrics.go delete mode 100644 docs/proxy/proxyregistry.go delete mode 100644 docs/proxy/proxytagservice.go delete mode 100644 docs/proxy/proxytagservice_test.go delete mode 100644 docs/proxy/scheduler/scheduler.go delete mode 100644 docs/proxy/scheduler/scheduler_test.go create mode 100644 docs/recipes/apache.md create mode 100644 docs/recipes/index.md create mode 100644 docs/recipes/menu.md create mode 100644 docs/recipes/mirror.md create mode 100644 docs/recipes/nginx.md create mode 100644 docs/recipes/osx-setup-guide.md create mode 100644 docs/recipes/osx/com.docker.registry.plist create mode 100644 docs/recipes/osx/config.yml delete mode 100644 docs/registry.go delete mode 100644 docs/root.go create mode 100644 docs/spec/api.md create mode 100644 docs/spec/api.md.tmpl create mode 100644 docs/spec/auth/index.md create mode 100644 docs/spec/auth/jwt.md create mode 100644 docs/spec/auth/oauth.md create mode 100644 docs/spec/auth/scope.md create mode 100644 docs/spec/auth/token.md create mode 100644 docs/spec/implementations.md create mode 100644 docs/spec/index.md create mode 100644 docs/spec/json.md create mode 100644 docs/spec/manifest-v2-1.md create mode 100644 docs/spec/manifest-v2-2.md create mode 100644 docs/spec/menu.md create mode 100644 docs/storage-drivers/azure.md create mode 100644 docs/storage-drivers/filesystem.md create mode 100644 docs/storage-drivers/gcs.md create mode 100644 docs/storage-drivers/index.md create mode 100644 docs/storage-drivers/inmemory.md create mode 100644 docs/storage-drivers/menu.md create mode 100644 docs/storage-drivers/oss.md create mode 100644 docs/storage-drivers/s3.md create mode 100644 docs/storage-drivers/swift.md delete mode 100644 docs/storage/blob_test.go delete mode 100644 docs/storage/blobcachemetrics.go delete mode 100644 docs/storage/blobserver.go delete mode 100644 docs/storage/blobstore.go delete mode 100644 docs/storage/blobwriter.go delete mode 100644 docs/storage/blobwriter_nonresumable.go delete mode 100644 docs/storage/blobwriter_resumable.go delete mode 100644 docs/storage/cache/cache.go delete mode 100644 docs/storage/cache/cachecheck/suite.go delete mode 100644 docs/storage/cache/cachedblobdescriptorstore.go delete mode 100644 docs/storage/cache/memory/memory.go delete mode 100644 docs/storage/cache/memory/memory_test.go delete mode 100644 docs/storage/cache/redis/redis.go delete mode 100644 docs/storage/cache/redis/redis_test.go delete mode 100644 docs/storage/catalog.go delete mode 100644 docs/storage/catalog_test.go delete mode 100644 docs/storage/doc.go delete mode 100644 docs/storage/driver/azure/azure.go delete mode 100644 docs/storage/driver/azure/azure_test.go delete mode 100644 docs/storage/driver/base/base.go delete mode 100644 docs/storage/driver/base/regulator.go delete mode 100644 docs/storage/driver/factory/factory.go delete mode 100644 docs/storage/driver/fileinfo.go delete mode 100644 docs/storage/driver/filesystem/driver.go delete mode 100644 docs/storage/driver/filesystem/driver_test.go delete mode 100644 docs/storage/driver/gcs/doc.go delete mode 100644 docs/storage/driver/gcs/gcs.go delete mode 100644 docs/storage/driver/gcs/gcs_test.go delete mode 100644 docs/storage/driver/inmemory/driver.go delete mode 100644 docs/storage/driver/inmemory/driver_test.go delete mode 100644 docs/storage/driver/inmemory/mfs.go delete mode 100644 docs/storage/driver/middleware/cloudfront/middleware.go delete mode 100644 docs/storage/driver/middleware/redirect/middleware.go delete mode 100644 docs/storage/driver/middleware/redirect/middleware_test.go delete mode 100644 docs/storage/driver/middleware/storagemiddleware.go delete mode 100644 docs/storage/driver/oss/doc.go delete mode 100644 docs/storage/driver/oss/oss.go delete mode 100644 docs/storage/driver/oss/oss_test.go delete mode 100644 docs/storage/driver/s3-aws/s3.go delete mode 100644 docs/storage/driver/s3-aws/s3_test.go delete mode 100644 docs/storage/driver/s3-goamz/s3.go delete mode 100644 docs/storage/driver/s3-goamz/s3_test.go delete mode 100644 docs/storage/driver/storagedriver.go delete mode 100644 docs/storage/driver/swift/swift.go delete mode 100644 docs/storage/driver/swift/swift_test.go delete mode 100644 docs/storage/driver/testdriver/testdriver.go delete mode 100644 docs/storage/driver/testsuites/testsuites.go delete mode 100644 docs/storage/filereader.go delete mode 100644 docs/storage/filereader_test.go delete mode 100644 docs/storage/garbagecollect.go delete mode 100644 docs/storage/garbagecollect_test.go delete mode 100644 docs/storage/linkedblobstore.go delete mode 100644 docs/storage/manifestlisthandler.go delete mode 100644 docs/storage/manifeststore.go delete mode 100644 docs/storage/manifeststore_test.go delete mode 100644 docs/storage/paths.go delete mode 100644 docs/storage/paths_test.go delete mode 100644 docs/storage/purgeuploads.go delete mode 100644 docs/storage/purgeuploads_test.go delete mode 100644 docs/storage/registry.go delete mode 100644 docs/storage/schema2manifesthandler.go delete mode 100644 docs/storage/schema2manifesthandler_test.go delete mode 100644 docs/storage/signedmanifesthandler.go delete mode 100644 docs/storage/tagstore.go delete mode 100644 docs/storage/tagstore_test.go delete mode 100644 docs/storage/util.go delete mode 100644 docs/storage/vacuum.go delete mode 100644 docs/storage/walk.go delete mode 100644 docs/storage/walk_test.go diff --git a/docs/Dockerfile b/docs/Dockerfile new file mode 100644 index 00000000..fcc63422 --- /dev/null +++ b/docs/Dockerfile @@ -0,0 +1,9 @@ +FROM docs/base:oss +MAINTAINER Docker Docs + +ENV PROJECT=registry + +# To get the git info for this repo +COPY . /src +RUN rm -rf /docs/content/$PROJECT/ +COPY . /docs/content/$PROJECT/ diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000..585bc871 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,38 @@ +.PHONY: all default docs docs-build docs-shell shell test + +# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) +DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) + +# to allow `make DOCSPORT=9000 docs` +DOCSPORT := 8000 + +# Get the IP ADDRESS +DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''") +HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)") +HUGO_BIND_IP=0.0.0.0 + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") +DOCKER_DOCS_IMAGE := registry-docs$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) + +DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE + +# for some docs workarounds (see below in "docs-build" target) +GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) + +default: docs + +docs: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) + +docs-draft: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) + +docs-shell: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash + +docs-build: + docker build -t "$(DOCKER_DOCS_IMAGE)" . + +test: docs-build + $(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" diff --git a/docs/api/errcode/errors.go b/docs/api/errcode/errors.go deleted file mode 100644 index 6d9bb4b6..00000000 --- a/docs/api/errcode/errors.go +++ /dev/null @@ -1,267 +0,0 @@ -package errcode - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ErrorCoder is the base interface for ErrorCode and Error allowing -// users of each to just call ErrorCode to get the real ID of each -type ErrorCoder interface { - ErrorCode() ErrorCode -} - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -var _ error = ErrorCode(0) - -// ErrorCode just returns itself -func (ec ErrorCode) ErrorCode() ErrorCode { - return ec -} - -// Error returns the ID/Value -func (ec ErrorCode) Error() string { - // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. - return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// WithMessage creates a new Error struct based on the passed-in info and -// overrides the Message property. -func (ec ErrorCode) WithMessage(message string) Error { - return Error{ - Code: ec, - Message: message, - } -} - -// WithDetail creates a new Error struct based on the passed-in info and -// set the Detail property appropriately -func (ec ErrorCode) WithDetail(detail interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithDetail(detail) -} - -// WithArgs creates a new Error struct and sets the Args slice -func (ec ErrorCode) WithArgs(args ...interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithArgs(args...) -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` - - // TODO(duglin): See if we need an "args" property so we can do the - // variable substitution right before showing the message to the user -} - -var _ error = Error{} - -// ErrorCode returns the ID/Value of this Error -func (e Error) ErrorCode() ErrorCode { - return e.Code -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) -} - -// WithDetail will return a new Error, based on the current one, but with -// some Detail info added -func (e Error) WithDetail(detail interface{}) Error { - return Error{ - Code: e.Code, - Message: e.Message, - Detail: detail, - } -} - -// WithArgs uses the passed-in list of interface{} as the substitution -// variables in the Error's Message string, but returns a new Error -func (e Error) WithArgs(args ...interface{}) Error { - return Error{ - Code: e.Code, - Message: fmt.Sprintf(e.Code.Message(), args...), - Detail: e.Detail, - } -} - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCode provides the http status code that is associated with - // this error condition. - HTTPStatusCode int -} - -// ParseErrorCode returns the value by the string error code. -// `ErrorCodeUnknown` will be returned if the error is not known. -func ParseErrorCode(value string) ErrorCode { - ed, ok := idToDescriptors[value] - if ok { - return ed.Code - } - - return ErrorCodeUnknown -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors []error - -var _ error = Errors{} - -func (errs Errors) Error() string { - switch len(errs) { - case 0: - return "" - case 1: - return errs[0].Error() - default: - msg := "errors:\n" - for _, err := range errs { - msg += err.Error() + "\n" - } - return msg - } -} - -// Len returns the current number of errors. -func (errs Errors) Len() int { - return len(errs) -} - -// MarshalJSON converts slice of error, ErrorCode or Error into a -// slice of Error - then serializes -func (errs Errors) MarshalJSON() ([]byte, error) { - var tmpErrs struct { - Errors []Error `json:"errors,omitempty"` - } - - for _, daErr := range errs { - var err Error - - switch daErr.(type) { - case ErrorCode: - err = daErr.(ErrorCode).WithDetail(nil) - case Error: - err = daErr.(Error) - default: - err = ErrorCodeUnknown.WithDetail(daErr) - - } - - // If the Error struct was setup and they forgot to set the - // Message field (meaning its "") then grab it from the ErrCode - msg := err.Message - if msg == "" { - msg = err.Code.Message() - } - - tmpErrs.Errors = append(tmpErrs.Errors, Error{ - Code: err.Code, - Message: msg, - Detail: err.Detail, - }) - } - - return json.Marshal(tmpErrs) -} - -// UnmarshalJSON deserializes []Error and then converts it into slice of -// Error or ErrorCode -func (errs *Errors) UnmarshalJSON(data []byte) error { - var tmpErrs struct { - Errors []Error - } - - if err := json.Unmarshal(data, &tmpErrs); err != nil { - return err - } - - var newErrs Errors - for _, daErr := range tmpErrs.Errors { - // If Message is empty or exactly matches the Code's message string - // then just use the Code, no need for a full Error struct - if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { - // Error's w/o details get converted to ErrorCode - newErrs = append(newErrs, daErr.Code) - } else { - // Error's w/ details are untouched - newErrs = append(newErrs, Error{ - Code: daErr.Code, - Message: daErr.Message, - Detail: daErr.Detail, - }) - } - } - - *errs = newErrs - return nil -} diff --git a/docs/api/errcode/errors_test.go b/docs/api/errcode/errors_test.go deleted file mode 100644 index 54e7a736..00000000 --- a/docs/api/errcode/errors_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package errcode - -import ( - "encoding/json" - "net/http" - "reflect" - "strings" - "testing" -) - -// TestErrorsManagement does a quick check of the Errors type to ensure that -// members are properly pushed and marshaled. -var ErrorCodeTest1 = Register("test.errors", ErrorDescriptor{ - Value: "TEST1", - Message: "test error 1", - Description: `Just a test message #1.`, - HTTPStatusCode: http.StatusInternalServerError, -}) - -var ErrorCodeTest2 = Register("test.errors", ErrorDescriptor{ - Value: "TEST2", - Message: "test error 2", - Description: `Just a test message #2.`, - HTTPStatusCode: http.StatusNotFound, -}) - -var ErrorCodeTest3 = Register("test.errors", ErrorDescriptor{ - Value: "TEST3", - Message: "Sorry %q isn't valid", - Description: `Just a test message #3.`, - HTTPStatusCode: http.StatusNotFound, -}) - -// TestErrorCodes ensures that error code format, mappings and -// marshaling/unmarshaling. round trips are stable. -func TestErrorCodes(t *testing.T) { - if len(errorCodeToDescriptors) == 0 { - t.Fatal("errors aren't loaded!") - } - - for ec, desc := range errorCodeToDescriptors { - if ec != desc.Code { - t.Fatalf("error code in descriptor isn't correct, %q != %q", ec, desc.Code) - } - - if idToDescriptors[desc.Value].Code != ec { - t.Fatalf("error code in idToDesc isn't correct, %q != %q", idToDescriptors[desc.Value].Code, ec) - } - - if ec.Message() != desc.Message { - t.Fatalf("ec.Message doesn't mtach desc.Message: %q != %q", ec.Message(), desc.Message) - } - - // Test (de)serializing the ErrorCode - p, err := json.Marshal(ec) - if err != nil { - t.Fatalf("couldn't marshal ec %v: %v", ec, err) - } - - if len(p) <= 0 { - t.Fatalf("expected content in marshaled before for error code %v", ec) - } - - // First, unmarshal to interface and ensure we have a string. - var ecUnspecified interface{} - if err := json.Unmarshal(p, &ecUnspecified); err != nil { - t.Fatalf("error unmarshaling error code %v: %v", ec, err) - } - - if _, ok := ecUnspecified.(string); !ok { - t.Fatalf("expected a string for error code %v on unmarshal got a %T", ec, ecUnspecified) - } - - // Now, unmarshal with the error code type and ensure they are equal - var ecUnmarshaled ErrorCode - if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { - t.Fatalf("error unmarshaling error code %v: %v", ec, err) - } - - if ecUnmarshaled != ec { - t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, ec) - } - - expectedErrorString := strings.ToLower(strings.Replace(ec.Descriptor().Value, "_", " ", -1)) - if ec.Error() != expectedErrorString { - t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString) - } - } - -} - -func TestErrorsManagement(t *testing.T) { - var errs Errors - - errs = append(errs, ErrorCodeTest1) - errs = append(errs, ErrorCodeTest2.WithDetail( - map[string]interface{}{"digest": "sometestblobsumdoesntmatter"})) - errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE")) - errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data")) - - p, err := json.Marshal(errs) - - if err != nil { - t.Fatalf("error marashaling errors: %v", err) - } - - expectedJSON := `{"errors":[` + - `{"code":"TEST1","message":"test error 1"},` + - `{"code":"TEST2","message":"test error 2","detail":{"digest":"sometestblobsumdoesntmatter"}},` + - `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid"},` + - `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid","detail":"data"}` + - `]}` - - if string(p) != expectedJSON { - t.Fatalf("unexpected json:\ngot:\n%q\n\nexpected:\n%q", string(p), expectedJSON) - } - - // Now test the reverse - var unmarshaled Errors - if err := json.Unmarshal(p, &unmarshaled); err != nil { - t.Fatalf("unexpected error unmarshaling error envelope: %v", err) - } - - if !reflect.DeepEqual(unmarshaled, errs) { - t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) - } - - // Test the arg substitution stuff - e1 := unmarshaled[3].(Error) - exp1 := `Sorry "BOOGIE" isn't valid` - if e1.Message != exp1 { - t.Fatalf("Wrong msg, got:\n%q\n\nexpected:\n%q", e1.Message, exp1) - } - - exp1 = "test3: " + exp1 - if e1.Error() != exp1 { - t.Fatalf("Error() didn't return the right string, got:%s\nexpected:%s", e1.Error(), exp1) - } - - // Test again with a single value this time - errs = Errors{ErrorCodeUnknown} - expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" - p, err = json.Marshal(errs) - - if err != nil { - t.Fatalf("error marashaling errors: %v", err) - } - - if string(p) != expectedJSON { - t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) - } - - // Now test the reverse - unmarshaled = nil - if err := json.Unmarshal(p, &unmarshaled); err != nil { - t.Fatalf("unexpected error unmarshaling error envelope: %v", err) - } - - if !reflect.DeepEqual(unmarshaled, errs) { - t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) - } - - // Verify that calling WithArgs() more than once does the right thing. - // Meaning creates a new Error and uses the ErrorCode Message - e1 = ErrorCodeTest3.WithArgs("test1") - e2 := e1.WithArgs("test2") - if &e1 == &e2 { - t.Fatalf("args: e2 and e1 should not be the same, but they are") - } - if e2.Message != `Sorry "test2" isn't valid` { - t.Fatalf("e2 had wrong message: %q", e2.Message) - } - - // Verify that calling WithDetail() more than once does the right thing. - // Meaning creates a new Error and overwrites the old detail field - e1 = ErrorCodeTest3.WithDetail("stuff1") - e2 = e1.WithDetail("stuff2") - if &e1 == &e2 { - t.Fatalf("detail: e2 and e1 should not be the same, but they are") - } - if e2.Detail != `stuff2` { - t.Fatalf("e2 had wrong detail: %q", e2.Detail) - } - -} diff --git a/docs/api/errcode/handler.go b/docs/api/errcode/handler.go deleted file mode 100644 index 49a64a86..00000000 --- a/docs/api/errcode/handler.go +++ /dev/null @@ -1,44 +0,0 @@ -package errcode - -import ( - "encoding/json" - "net/http" -) - -// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err -// and sets the content-type header to 'application/json'. It will handle -// ErrorCoder and Errors, and if necessary will create an envelope. -func ServeJSON(w http.ResponseWriter, err error) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - var sc int - - switch errs := err.(type) { - case Errors: - if len(errs) < 1 { - break - } - - if err, ok := errs[0].(ErrorCoder); ok { - sc = err.ErrorCode().Descriptor().HTTPStatusCode - } - case ErrorCoder: - sc = errs.ErrorCode().Descriptor().HTTPStatusCode - err = Errors{err} // create an envelope. - default: - // We just have an unhandled error type, so just place in an envelope - // and move along. - err = Errors{err} - } - - if sc == 0 { - sc = http.StatusInternalServerError - } - - w.WriteHeader(sc) - - if err := json.NewEncoder(w).Encode(err); err != nil { - return err - } - - return nil -} diff --git a/docs/api/errcode/register.go b/docs/api/errcode/register.go deleted file mode 100644 index d1e8826c..00000000 --- a/docs/api/errcode/register.go +++ /dev/null @@ -1,138 +0,0 @@ -package errcode - -import ( - "fmt" - "net/http" - "sort" - "sync" -) - -var ( - errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} - idToDescriptors = map[string]ErrorDescriptor{} - groupToDescriptors = map[string][]ErrorDescriptor{} -) - -var ( - // ErrorCodeUnknown is a generic error that can be used as a last - // resort if there is no situation-specific error message that can be used - ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - HTTPStatusCode: http.StatusMethodNotAllowed, - }) - - // ErrorCodeUnauthorized is returned if a request requires - // authentication. - ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "authentication required", - Description: `The access controller was unable to authenticate - the client. Often this will be accompanied by a - Www-Authenticate HTTP response header indicating how to - authenticate.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - - // ErrorCodeDenied is returned if a client does not have sufficient - // permission to perform an action. - ErrorCodeDenied = Register("errcode", ErrorDescriptor{ - Value: "DENIED", - Message: "requested access to the resource is denied", - Description: `The access controller denied access for the - operation on a resource.`, - HTTPStatusCode: http.StatusForbidden, - }) - - // ErrorCodeUnavailable provides a common error to report unavailability - // of a service or endpoint. - ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ - Value: "UNAVAILABLE", - Message: "service unavailable", - Description: "Returned when a service is not available", - HTTPStatusCode: http.StatusServiceUnavailable, - }) - - // ErrorCodeTooManyRequests is returned if a client attempts too many - // times to contact a service endpoint. - ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ - Value: "TOOMANYREQUESTS", - Message: "too many requests", - Description: `Returned when a client attempts to contact a - service too many times`, - HTTPStatusCode: http.StatusTooManyRequests, - }) -) - -var nextCode = 1000 -var registerLock sync.Mutex - -// Register will make the passed-in error known to the environment and -// return a new ErrorCode -func Register(group string, descriptor ErrorDescriptor) ErrorCode { - registerLock.Lock() - defer registerLock.Unlock() - - descriptor.Code = ErrorCode(nextCode) - - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) - } - - groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - - nextCode++ - return descriptor.Code -} - -type byValue []ErrorDescriptor - -func (a byValue) Len() int { return len(a) } -func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -// GetGroupNames returns the list of Error group names that are registered -func GetGroupNames() []string { - keys := []string{} - - for k := range groupToDescriptors { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// GetErrorCodeGroup returns the named group of error descriptors -func GetErrorCodeGroup(name string) []ErrorDescriptor { - desc := groupToDescriptors[name] - sort.Sort(byValue(desc)) - return desc -} - -// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are -// registered, irrespective of what group they're in -func GetErrorAllDescriptors() []ErrorDescriptor { - result := []ErrorDescriptor{} - - for _, group := range GetGroupNames() { - result = append(result, GetErrorCodeGroup(group)...) - } - sort.Sort(byValue(result)) - return result -} diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go deleted file mode 100644 index 9979abae..00000000 --- a/docs/api/v2/descriptors.go +++ /dev/null @@ -1,1596 +0,0 @@ -package v2 - -import ( - "net/http" - "regexp" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" -) - -var ( - nameParameterDescriptor = ParameterDescriptor{ - Name: "name", - Type: "string", - Format: reference.NameRegexp.String(), - Required: true, - Description: `Name of the target repository.`, - } - - referenceParameterDescriptor = ParameterDescriptor{ - Name: "reference", - Type: "string", - Format: reference.TagRegexp.String(), - Required: true, - Description: `Tag or digest of the target manifest.`, - } - - uuidParameterDescriptor = ParameterDescriptor{ - Name: "uuid", - Type: "opaque", - Required: true, - Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", - } - - digestPathParameter = ParameterDescriptor{ - Name: "digest", - Type: "path", - Required: true, - Format: digest.DigestRegexp.String(), - Description: `Digest of desired blob.`, - } - - hostHeader = ParameterDescriptor{ - Name: "Host", - Type: "string", - Description: "Standard HTTP Host Header. Should be set to the registry host.", - Format: "", - Examples: []string{"registry-1.docker.io"}, - } - - authHeader = ParameterDescriptor{ - Name: "Authorization", - Type: "string", - Description: "An RFC7235 compliant authorization header.", - Format: " ", - Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, - } - - authChallengeHeader = ParameterDescriptor{ - Name: "WWW-Authenticate", - Type: "string", - Description: "An RFC7235 compliant authentication challenge header.", - Format: ` realm="", ..."`, - Examples: []string{ - `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, - }, - } - - contentLengthZeroHeader = ParameterDescriptor{ - Name: "Content-Length", - Description: "The `Content-Length` header must be zero and the body must be empty.", - Type: "integer", - Format: "0", - } - - dockerUploadUUIDHeader = ParameterDescriptor{ - Name: "Docker-Upload-UUID", - Description: "Identifies the docker upload uuid for the current request.", - Type: "uuid", - Format: "", - } - - digestHeader = ParameterDescriptor{ - Name: "Docker-Content-Digest", - Description: "Digest of the targeted content for the request.", - Type: "digest", - Format: "", - } - - linkHeader = ParameterDescriptor{ - Name: "Link", - Type: "link", - Description: "RFC5988 compliant rel='next' with URL to next result set, if available", - Format: `<?n=&last=>; rel="next"`, - } - - paginationParameters = []ParameterDescriptor{ - { - Name: "n", - Type: "integer", - Description: "Limit the number of entries in each response. It not present, all entries will be returned.", - Format: "", - Required: false, - }, - { - Name: "last", - Type: "string", - Description: "Result set will include values lexically after last.", - Format: "", - Required: false, - }, - } - - unauthorizedResponseDescriptor = ResponseDescriptor{ - Name: "Authentication Required", - StatusCode: http.StatusUnauthorized, - Description: "The client is not authenticated.", - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - } - - repositoryNotFoundResponseDescriptor = ResponseDescriptor{ - Name: "No Such Repository Error", - StatusCode: http.StatusNotFound, - Description: "The repository is not known to the registry.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - }, - } - - deniedResponseDescriptor = ResponseDescriptor{ - Name: "Access Denied", - StatusCode: http.StatusForbidden, - Description: "The client does not have required access to the repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeDenied, - }, - } - - tooManyRequestsDescriptor = ResponseDescriptor{ - Name: "Too Many Requests", - StatusCode: http.StatusTooManyRequests, - Description: "The client made too many requests within a time interval.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeTooManyRequests, - }, - } -) - -const ( - manifestBody = `{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": -}` - - errorsBody = `{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -}` -) - -// APIDescriptor exports descriptions of the layout of the v2 registry API. -var APIDescriptor = struct { - // RouteDescriptors provides a list of the routes available in the API. - RouteDescriptors []RouteDescriptor -}{ - RouteDescriptors: routeDescriptors, -} - -// RouteDescriptor describes a route specified by name. -type RouteDescriptor struct { - // Name is the name of the route, as specified in RouteNameXXX exports. - // These names a should be considered a unique reference for a route. If - // the route is registered with gorilla, this is the name that will be - // used. - Name string - - // Path is a gorilla/mux-compatible regexp that can be used to match the - // route. For any incoming method and path, only one route descriptor - // should match. - Path string - - // Entity should be a short, human-readalbe description of the object - // targeted by the endpoint. - Entity string - - // Description should provide an accurate overview of the functionality - // provided by the route. - Description string - - // Methods should describe the various HTTP methods that may be used on - // this route, including request and response formats. - Methods []MethodDescriptor -} - -// MethodDescriptor provides a description of the requests that may be -// conducted with the target method. -type MethodDescriptor struct { - - // Method is an HTTP method, such as GET, PUT or POST. - Method string - - // Description should provide an overview of the functionality provided by - // the covered method, suitable for use in documentation. Use of markdown - // here is encouraged. - Description string - - // Requests is a slice of request descriptors enumerating how this - // endpoint may be used. - Requests []RequestDescriptor -} - -// RequestDescriptor covers a particular set of headers and parameters that -// can be carried out with the parent method. Its most helpful to have one -// RequestDescriptor per API use case. -type RequestDescriptor struct { - // Name provides a short identifier for the request, usable as a title or - // to provide quick context for the particular request. - Name string - - // Description should cover the requests purpose, covering any details for - // this particular use case. - Description string - - // Headers describes headers that must be used with the HTTP request. - Headers []ParameterDescriptor - - // PathParameters enumerate the parameterized path components for the - // given request, as defined in the route's regular expression. - PathParameters []ParameterDescriptor - - // QueryParameters provides a list of query parameters for the given - // request. - QueryParameters []ParameterDescriptor - - // Body describes the format of the request body. - Body BodyDescriptor - - // Successes enumerates the possible responses that are considered to be - // the result of a successful request. - Successes []ResponseDescriptor - - // Failures covers the possible failures from this particular request. - Failures []ResponseDescriptor -} - -// ResponseDescriptor describes the components of an API response. -type ResponseDescriptor struct { - // Name provides a short identifier for the response, usable as a title or - // to provide quick context for the particular response. - Name string - - // Description should provide a brief overview of the role of the - // response. - Description string - - // StatusCode specifies the status received by this particular response. - StatusCode int - - // Headers covers any headers that may be returned from the response. - Headers []ParameterDescriptor - - // Fields describes any fields that may be present in the response. - Fields []ParameterDescriptor - - // ErrorCodes enumerates the error codes that may be returned along with - // the response. - ErrorCodes []errcode.ErrorCode - - // Body describes the body of the response, if any. - Body BodyDescriptor -} - -// BodyDescriptor describes a request body and its expected content type. For -// the most part, it should be example json or some placeholder for body -// data in documentation. -type BodyDescriptor struct { - ContentType string - Format string -} - -// ParameterDescriptor describes the format of a request parameter, which may -// be a header, path parameter or query parameter. -type ParameterDescriptor struct { - // Name is the name of the parameter, either of the path component or - // query parameter. - Name string - - // Type specifies the type of the parameter, such as string, integer, etc. - Type string - - // Description provides a human-readable description of the parameter. - Description string - - // Required means the field is required when set. - Required bool - - // Format is a specifying the string format accepted by this parameter. - Format string - - // Regexp is a compiled regular expression that can be used to validate - // the contents of the parameter. - Regexp *regexp.Regexp - - // Examples provides multiple examples for the values that might be valid - // for this parameter. - Examples []string -} - -var routeDescriptors = []RouteDescriptor{ - { - Name: RouteNameBase, - Path: "/v2/", - Entity: "Base", - Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Check that the endpoint implements Docker Registry API V2.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Successes: []ResponseDescriptor{ - { - Description: "The API implements V2 protocol and is accessible.", - StatusCode: http.StatusOK, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The registry does not implement the V2 API.", - StatusCode: http.StatusNotFound, - }, - unauthorizedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameTags, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", - Entity: "Tags", - Description: "Retrieve information about tags.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the tags under the repository identified by `name`.", - Requests: []RequestDescriptor{ - { - Name: "Tags", - Description: "Return all tags for the repository", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ] -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Tags Paginated", - Description: "Return a portion of the tags for the specified repository.", - PathParameters: []ParameterDescriptor{nameParameterDescriptor}, - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ], -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameManifest, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", - Entity: "Manifest", - Description: "Create, update, delete and retrieve manifests.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The name or reference was invalid.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The canonical location url of the uploaded manifest.", - Format: "", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Manifest", - Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", - StatusCode: http.StatusBadRequest, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - ErrorCodeManifestInvalid, - ErrorCodeManifestUnverified, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - { - Name: "Missing Layer(s)", - Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": "" - } - }, - ... - ] -}`, - }, - }, - { - Name: "Not allowed", - Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Reference", - Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - { - Name: "Unknown Manifest", - Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeManifestUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Name: "Not allowed", - Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlob, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", - Entity: "Blob", - Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Name: "Fetch Blob", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob content.", - Format: "", - }, - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - { - Description: "The blob identified by `digest` is available at the provided location.", - StatusCode: http.StatusTemporaryRedirect, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The location where the layer should be accessible.", - Format: "", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Fetch Blob Part", - Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Range", - Type: "string", - Description: "HTTP Range header specifying blob chunk.", - Format: "bytes=-", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", - StatusCode: http.StatusPartialContent, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob chunk.", - Format: "", - }, - { - Name: "Content-Range", - Type: "byte range", - Description: "Content range of blob chunk.", - Format: "bytes -/", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the blob identified by `name` and `digest`", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "0", - Format: "0", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - { - Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", - StatusCode: http.StatusMethodNotAllowed, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - - // TODO(stevvooe): We may want to add a PUT request here to - // kickoff an upload of a blob, integrated with the blob upload - // API. - }, - }, - - { - Name: RouteNameBlobUpload, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", - Entity: "Initiate Blob Upload", - Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", - Methods: []MethodDescriptor{ - { - Method: "POST", - Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", - Requests: []RequestDescriptor{ - { - Name: "Initiate Monolithic Blob Upload", - Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octect-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been created in the registry and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Initiate Resumable Blob Upload", - Description: "Initiate a resumable blob upload with an empty request body.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Format: "0-0", - Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", - }, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Mount Blob", - Description: "Mount a blob identified by the `mount` parameter from another repository.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "mount", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of blob to mount from the source repository.`, - }, - { - Name: "from", - Type: "query", - Format: "", - Regexp: reference.NameRegexp, - Description: `Name of the source repository.`, - }, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been mounted in the repository and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlobUploadChunk, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", - Entity: "Blob Upload", - Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", - Requests: []RequestDescriptor{ - { - Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Progress", - Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PATCH", - Description: "Upload a chunk of data for the specified upload.", - Requests: []RequestDescriptor{ - { - Name: "Stream upload", - Description: "Upload a stream of data to upload without completing the upload.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Data Accepted", - Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Chunked upload", - Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Required: true, - Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", - }, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the chunk being uploaded, corresponding the length of the request body.", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Chunk Accepted", - Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", - Requests: []RequestDescriptor{ - { - Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "string", - Format: "", - Regexp: digest.DigestRegexp, - Required: true, - Description: `Digest of uploaded blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Complete", - Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - Description: "The canonical location of the blob for retrieval", - }, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - errcode.ErrorCodeUnsupported, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", - Requests: []RequestDescriptor{ - { - Description: "Cancel the upload specified by `uuid`.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Deleted", - Description: "The upload has been successfully deleted.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "An error was encountered processing the delete. The client may ignore this error.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameCatalog, - Path: "/v2/_catalog", - Entity: "Catalog", - Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve a sorted, json list of repositories available in the registry.", - Requests: []RequestDescriptor{ - { - Name: "Catalog Fetch", - Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", - Successes: []ResponseDescriptor{ - { - Description: "Returns the unabridged list of repositories as a json response.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] -}`, - }, - }, - }, - }, - { - Name: "Catalog Fetch Paginated", - Description: "Return the specified portion of repositories.", - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] - "next": "?last=&n=" -}`, - }, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - }, - }, - }, - }, - }, - }, - }, -} - -var routeDescriptorsMap map[string]RouteDescriptor - -func init() { - routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) - - for _, descriptor := range routeDescriptors { - routeDescriptorsMap[descriptor.Name] = descriptor - } -} diff --git a/docs/api/v2/doc.go b/docs/api/v2/doc.go deleted file mode 100644 index cde01195..00000000 --- a/docs/api/v2/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package v2 describes routes, urls and the error codes used in the Docker -// Registry JSON HTTP API V2. In addition to declarations, descriptors are -// provided for routes and error codes that can be used for implementation and -// automatically generating documentation. -// -// Definitions here are considered to be locked down for the V2 registry api. -// Any changes must be considered carefully and should not proceed without a -// change proposal in docker core. -package v2 diff --git a/docs/api/v2/errors.go b/docs/api/v2/errors.go deleted file mode 100644 index 97d6923a..00000000 --- a/docs/api/v2/errors.go +++ /dev/null @@ -1,136 +0,0 @@ -package v2 - -import ( - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -const errGroup = "registry.api.v2" - -var ( - // ErrorCodeDigestInvalid is returned when uploading a blob if the - // provided digest does not match the blob contents. - ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DIGEST_INVALID", - Message: "provided digest did not match uploaded content", - Description: `When a blob is uploaded, the registry will check that - the content matches the digest provided by the client. The error may - include a detail structure with the key "digest", including the - invalid digest string. This error may also be returned when a manifest - includes an invalid layer digest.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeSizeInvalid is returned when uploading a blob if the provided - ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "SIZE_INVALID", - Message: "provided length did not match content length", - Description: `When a layer is uploaded, the provided size will be - checked against the uploaded content. If they do not match, this error - will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameInvalid is returned when the name in the manifest does not - // match the provided name. - ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_INVALID", - Message: "invalid repository name", - Description: `Invalid repository name encountered either during - manifest validation or any API operation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeTagInvalid is returned when the tag in the manifest does not - // match the provided tag. - ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "TAG_INVALID", - Message: "manifest tag did not match URI", - Description: `During a manifest upload, if the tag in the manifest - does not match the uri tag, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameUnknown when the repository name is not known. - ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_UNKNOWN", - Message: "repository name not known to registry", - Description: `This is returned if the name used during an operation is - unknown to the registry.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestUnknown returned when image manifest is unknown. - ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNKNOWN", - Message: "manifest unknown", - Description: `This error is returned when the manifest, identified by - name and tag is unknown to the repository.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestInvalid returned when an image manifest is invalid, - // typically during a PUT operation. This error encompasses all errors - // encountered during manifest validation that aren't signature errors. - ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_INVALID", - Message: "manifest invalid", - Description: `During upload, manifests undergo several checks ensuring - validity. If those checks fail, this error may be returned, unless a - more specific error is included. The detail will contain information - the failed validation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verification. - ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNVERIFIED", - Message: "manifest failed signature verification", - Description: `During manifest upload, if the manifest fails signature - verification, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestBlobUnknown is returned when a manifest blob is - // unknown to the registry. - ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a manifest blob is - unknown to the registry.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeBlobUnknown is returned when a blob is unknown to the - // registry. This can happen when the manifest references a nonexistent - // layer or the result is not found by a blob fetch. - ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a blob is unknown to the - registry in a specified repository. This can be returned with a - standard get or if a manifest references an unknown layer during - upload.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. - ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_UNKNOWN", - Message: "blob upload unknown to registry", - Description: `If a blob upload has been cancelled or was never - started, this error code may be returned.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. - ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_INVALID", - Message: "blob upload invalid", - Description: `The blob upload encountered an error and can no - longer proceed.`, - HTTPStatusCode: http.StatusNotFound, - }) -) diff --git a/docs/api/v2/routes.go b/docs/api/v2/routes.go deleted file mode 100644 index 5b80d5be..00000000 --- a/docs/api/v2/routes.go +++ /dev/null @@ -1,49 +0,0 @@ -package v2 - -import "github.com/gorilla/mux" - -// The following are definitions of the name under which all V2 routes are -// registered. These symbols can be used to look up a route based on the name. -const ( - RouteNameBase = "base" - RouteNameManifest = "manifest" - RouteNameTags = "tags" - RouteNameBlob = "blob" - RouteNameBlobUpload = "blob-upload" - RouteNameBlobUploadChunk = "blob-upload-chunk" - RouteNameCatalog = "catalog" -) - -var allEndpoints = []string{ - RouteNameManifest, - RouteNameCatalog, - RouteNameTags, - RouteNameBlob, - RouteNameBlobUpload, - RouteNameBlobUploadChunk, -} - -// Router builds a gorilla router with named routes for the various API -// methods. This can be used directly by both server implementations and -// clients. -func Router() *mux.Router { - return RouterWithPrefix("") -} - -// RouterWithPrefix builds a gorilla router with a configured prefix -// on all routes. -func RouterWithPrefix(prefix string) *mux.Router { - rootRouter := mux.NewRouter() - router := rootRouter - if prefix != "" { - router = router.PathPrefix(prefix).Subrouter() - } - - router.StrictSlash(true) - - for _, descriptor := range routeDescriptors { - router.Path(descriptor.Path).Name(descriptor.Name) - } - - return rootRouter -} diff --git a/docs/api/v2/routes_test.go b/docs/api/v2/routes_test.go deleted file mode 100644 index f632d981..00000000 --- a/docs/api/v2/routes_test.go +++ /dev/null @@ -1,355 +0,0 @@ -package v2 - -import ( - "encoding/json" - "fmt" - "math/rand" - "net/http" - "net/http/httptest" - "reflect" - "strings" - "testing" - "time" - - "github.com/gorilla/mux" -) - -type routeTestCase struct { - RequestURI string - ExpectedURI string - Vars map[string]string - RouteName string - StatusCode int -} - -// TestRouter registers a test handler with all the routes and ensures that -// each route returns the expected path variables. Not method verification is -// present. This not meant to be exhaustive but as check to ensure that the -// expected variables are extracted. -// -// This may go away as the application structure comes together. -func TestRouter(t *testing.T) { - testCases := []routeTestCase{ - { - RouteName: RouteNameBase, - RequestURI: "/v2/", - Vars: map[string]string{}, - }, - { - RouteName: RouteNameManifest, - RequestURI: "/v2/foo/manifests/bar", - Vars: map[string]string{ - "name": "foo", - "reference": "bar", - }, - }, - { - RouteName: RouteNameManifest, - RequestURI: "/v2/foo/bar/manifests/tag", - Vars: map[string]string{ - "name": "foo/bar", - "reference": "tag", - }, - }, - { - RouteName: RouteNameManifest, - RequestURI: "/v2/foo/bar/manifests/sha256:abcdef01234567890", - Vars: map[string]string{ - "name": "foo/bar", - "reference": "sha256:abcdef01234567890", - }, - }, - { - RouteName: RouteNameTags, - RequestURI: "/v2/foo/bar/tags/list", - Vars: map[string]string{ - "name": "foo/bar", - }, - }, - { - RouteName: RouteNameTags, - RequestURI: "/v2/docker.com/foo/tags/list", - Vars: map[string]string{ - "name": "docker.com/foo", - }, - }, - { - RouteName: RouteNameTags, - RequestURI: "/v2/docker.com/foo/bar/tags/list", - Vars: map[string]string{ - "name": "docker.com/foo/bar", - }, - }, - { - RouteName: RouteNameTags, - RequestURI: "/v2/docker.com/foo/bar/baz/tags/list", - Vars: map[string]string{ - "name": "docker.com/foo/bar/baz", - }, - }, - { - RouteName: RouteNameBlob, - RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", - Vars: map[string]string{ - "name": "foo/bar", - "digest": "sha256:abcdef0919234", - }, - }, - { - RouteName: RouteNameBlobUpload, - RequestURI: "/v2/foo/bar/blobs/uploads/", - Vars: map[string]string{ - "name": "foo/bar", - }, - }, - { - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/bar/blobs/uploads/uuid", - Vars: map[string]string{ - "name": "foo/bar", - "uuid": "uuid", - }, - }, - { - // support uuid proper - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - Vars: map[string]string{ - "name": "foo/bar", - "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - }, - }, - { - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", - Vars: map[string]string{ - "name": "foo/bar", - "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", - }, - }, - { - // supports urlsafe base64 - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", - Vars: map[string]string{ - "name": "foo/bar", - "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", - }, - }, - { - // does not match - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/bar/blobs/uploads/totalandcompletejunk++$$-==", - StatusCode: http.StatusNotFound, - }, - { - // Check ambiguity: ensure we can distinguish between tags for - // "foo/bar/image/image" and image for "foo/bar/image" with tag - // "tags" - RouteName: RouteNameManifest, - RequestURI: "/v2/foo/bar/manifests/manifests/tags", - Vars: map[string]string{ - "name": "foo/bar/manifests", - "reference": "tags", - }, - }, - { - // This case presents an ambiguity between foo/bar with tag="tags" - // and list tags for "foo/bar/manifest" - RouteName: RouteNameTags, - RequestURI: "/v2/foo/bar/manifests/tags/list", - Vars: map[string]string{ - "name": "foo/bar/manifests", - }, - }, - { - RouteName: RouteNameManifest, - RequestURI: "/v2/locahost:8080/foo/bar/baz/manifests/tag", - Vars: map[string]string{ - "name": "locahost:8080/foo/bar/baz", - "reference": "tag", - }, - }, - } - - checkTestRouter(t, testCases, "", true) - checkTestRouter(t, testCases, "/prefix/", true) -} - -func TestRouterWithPathTraversals(t *testing.T) { - testCases := []routeTestCase{ - { - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - ExpectedURI: "/blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - StatusCode: http.StatusNotFound, - }, - { - // Testing for path traversal attack handling - RouteName: RouteNameTags, - RequestURI: "/v2/foo/../bar/baz/tags/list", - ExpectedURI: "/v2/bar/baz/tags/list", - Vars: map[string]string{ - "name": "bar/baz", - }, - }, - } - checkTestRouter(t, testCases, "", false) -} - -func TestRouterWithBadCharacters(t *testing.T) { - if testing.Short() { - testCases := []routeTestCase{ - { - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/blob/uploads/不95306FA-FAD3-4E36-8D41-CF1C93EF8286", - StatusCode: http.StatusNotFound, - }, - { - // Testing for path traversal attack handling - RouteName: RouteNameTags, - RequestURI: "/v2/foo/不bar/tags/list", - StatusCode: http.StatusNotFound, - }, - } - checkTestRouter(t, testCases, "", true) - } else { - // in the long version we're going to fuzz the router - // with random UTF8 characters not in the 128 bit ASCII range. - // These are not valid characters for the router and we expect - // 404s on every test. - rand.Seed(time.Now().UTC().UnixNano()) - testCases := make([]routeTestCase, 1000) - for idx := range testCases { - testCases[idx] = routeTestCase{ - RouteName: RouteNameTags, - RequestURI: fmt.Sprintf("/v2/%v/%v/tags/list", randomString(10), randomString(10)), - StatusCode: http.StatusNotFound, - } - } - checkTestRouter(t, testCases, "", true) - } -} - -func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, deeplyEqual bool) { - router := RouterWithPrefix(prefix) - - testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - testCase := routeTestCase{ - RequestURI: r.RequestURI, - Vars: mux.Vars(r), - RouteName: mux.CurrentRoute(r).GetName(), - } - - enc := json.NewEncoder(w) - - if err := enc.Encode(testCase); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - }) - - // Startup test server - server := httptest.NewServer(router) - - for _, testcase := range testCases { - testcase.RequestURI = strings.TrimSuffix(prefix, "/") + testcase.RequestURI - // Register the endpoint - route := router.GetRoute(testcase.RouteName) - if route == nil { - t.Fatalf("route for name %q not found", testcase.RouteName) - } - - route.Handler(testHandler) - - u := server.URL + testcase.RequestURI - - resp, err := http.Get(u) - - if err != nil { - t.Fatalf("error issuing get request: %v", err) - } - - if testcase.StatusCode == 0 { - // Override default, zero-value - testcase.StatusCode = http.StatusOK - } - if testcase.ExpectedURI == "" { - // Override default, zero-value - testcase.ExpectedURI = testcase.RequestURI - } - - if resp.StatusCode != testcase.StatusCode { - t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode) - } - - if testcase.StatusCode != http.StatusOK { - resp.Body.Close() - // We don't care about json response. - continue - } - - dec := json.NewDecoder(resp.Body) - - var actualRouteInfo routeTestCase - if err := dec.Decode(&actualRouteInfo); err != nil { - t.Fatalf("error reading json response: %v", err) - } - // Needs to be set out of band - actualRouteInfo.StatusCode = resp.StatusCode - - if actualRouteInfo.RequestURI != testcase.ExpectedURI { - t.Fatalf("URI %v incorrectly parsed, expected %v", actualRouteInfo.RequestURI, testcase.ExpectedURI) - } - - if actualRouteInfo.RouteName != testcase.RouteName { - t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName) - } - - // when testing deep equality, the actualRouteInfo has an empty ExpectedURI, we don't want - // that to make the comparison fail. We're otherwise done with the testcase so empty the - // testcase.ExpectedURI - testcase.ExpectedURI = "" - if deeplyEqual && !reflect.DeepEqual(actualRouteInfo, testcase) { - t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase) - } - - resp.Body.Close() - } - -} - -// -------------- START LICENSED CODE -------------- -// The following code is derivative of https://github.com/google/gofuzz -// gofuzz is licensed under the Apache License, Version 2.0, January 2004, -// a copy of which can be found in the LICENSE file at the root of this -// repository. - -// These functions allow us to generate strings containing only multibyte -// characters that are invalid in our URLs. They are used above for fuzzing -// to ensure we always get 404s on these invalid strings -type charRange struct { - first, last rune -} - -// choose returns a random unicode character from the given range, using the -// given randomness source. -func (r *charRange) choose() rune { - count := int64(r.last - r.first) - return r.first + rune(rand.Int63n(count)) -} - -var unicodeRanges = []charRange{ - {'\u00a0', '\u02af'}, // Multi-byte encoded characters - {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) -} - -func randomString(length int) string { - runes := make([]rune, length) - for i := range runes { - runes[i] = unicodeRanges[rand.Intn(len(unicodeRanges))].choose() - } - return string(runes) -} - -// -------------- END LICENSED CODE -------------- diff --git a/docs/api/v2/urls.go b/docs/api/v2/urls.go deleted file mode 100644 index a959aaa8..00000000 --- a/docs/api/v2/urls.go +++ /dev/null @@ -1,251 +0,0 @@ -package v2 - -import ( - "net/http" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/gorilla/mux" -) - -// URLBuilder creates registry API urls from a single base endpoint. It can be -// used to create urls for use in a registry client or server. -// -// All urls will be created from the given base, including the api version. -// For example, if a root of "/foo/" is provided, urls generated will be fall -// under "/foo/v2/...". Most application will only provide a schema, host and -// port, such as "https://localhost:5000/". -type URLBuilder struct { - root *url.URL // url root (ie http://localhost/) - router *mux.Router - relative bool -} - -// NewURLBuilder creates a URLBuilder with provided root url object. -func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { - return &URLBuilder{ - root: root, - router: Router(), - relative: relative, - } -} - -// NewURLBuilderFromString workes identically to NewURLBuilder except it takes -// a string argument for the root, returning an error if it is not a valid -// url. -func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { - u, err := url.Parse(root) - if err != nil { - return nil, err - } - - return NewURLBuilder(u, relative), nil -} - -// NewURLBuilderFromRequest uses information from an *http.Request to -// construct the root url. -func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { - var scheme string - - forwardedProto := r.Header.Get("X-Forwarded-Proto") - - switch { - case len(forwardedProto) > 0: - scheme = forwardedProto - case r.TLS != nil: - scheme = "https" - case len(r.URL.Scheme) > 0: - scheme = r.URL.Scheme - default: - scheme = "http" - } - - host := r.Host - forwardedHost := r.Header.Get("X-Forwarded-Host") - if len(forwardedHost) > 0 { - // According to the Apache mod_proxy docs, X-Forwarded-Host can be a - // comma-separated list of hosts, to which each proxy appends the - // requested host. We want to grab the first from this comma-separated - // list. - hosts := strings.SplitN(forwardedHost, ",", 2) - host = strings.TrimSpace(hosts[0]) - } - - basePath := routeDescriptorsMap[RouteNameBase].Path - - requestPath := r.URL.Path - index := strings.Index(requestPath, basePath) - - u := &url.URL{ - Scheme: scheme, - Host: host, - } - - if index > 0 { - // N.B. index+1 is important because we want to include the trailing / - u.Path = requestPath[0 : index+1] - } - - return NewURLBuilder(u, relative) -} - -// BuildBaseURL constructs a base url for the API, typically just "/v2/". -func (ub *URLBuilder) BuildBaseURL() (string, error) { - route := ub.cloneRoute(RouteNameBase) - - baseURL, err := route.URL() - if err != nil { - return "", err - } - - return baseURL.String(), nil -} - -// BuildCatalogURL constructs a url get a catalog of repositories -func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameCatalog) - - catalogURL, err := route.URL() - if err != nil { - return "", err - } - - return appendValuesURL(catalogURL, values...).String(), nil -} - -// BuildTagsURL constructs a url to list the tags in the named repository. -func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameTags) - - tagsURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return tagsURL.String(), nil -} - -// BuildManifestURL constructs a url for the manifest identified by name and -// reference. The argument reference may be either a tag or digest. -func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameManifest) - - tagOrDigest := "" - switch v := ref.(type) { - case reference.Tagged: - tagOrDigest = v.Tag() - case reference.Digested: - tagOrDigest = v.Digest().String() - } - - manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) - if err != nil { - return "", err - } - - return manifestURL.String(), nil -} - -// BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { - route := ub.cloneRoute(RouteNameBlob) - - layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) - if err != nil { - return "", err - } - - return layerURL.String(), nil -} - -// BuildBlobUploadURL constructs a url to begin a blob upload in the -// repository identified by name. -func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUpload) - - uploadURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, -// including any url values. This should generally not be used by clients, as -// this url is provided by server implementations during the blob upload -// process. -func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUploadChunk) - - uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// clondedRoute returns a clone of the named route from the router. Routes -// must be cloned to avoid modifying them during url generation. -func (ub *URLBuilder) cloneRoute(name string) clonedRoute { - route := new(mux.Route) - root := new(url.URL) - - *route = *ub.router.GetRoute(name) // clone the route - *root = *ub.root - - return clonedRoute{Route: route, root: root, relative: ub.relative} -} - -type clonedRoute struct { - *mux.Route - root *url.URL - relative bool -} - -func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { - routeURL, err := cr.Route.URL(pairs...) - if err != nil { - return nil, err - } - - if cr.relative { - return routeURL, nil - } - - if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { - routeURL.Path = routeURL.Path[1:] - } - - url := cr.root.ResolveReference(routeURL) - url.Scheme = cr.root.Scheme - return url, nil -} - -// appendValuesURL appends the parameters to the url. -func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { - merged := u.Query() - - for _, v := range values { - for k, vv := range v { - merged[k] = append(merged[k], vv...) - } - } - - u.RawQuery = merged.Encode() - return u -} - -// appendValues appends the parameters to the url. Panics if the string is not -// a url. -func appendValues(u string, values ...url.Values) string { - up, err := url.Parse(u) - - if err != nil { - panic(err) // should never happen - } - - return appendValuesURL(up, values...).String() -} diff --git a/docs/api/v2/urls_test.go b/docs/api/v2/urls_test.go deleted file mode 100644 index 10aadd52..00000000 --- a/docs/api/v2/urls_test.go +++ /dev/null @@ -1,334 +0,0 @@ -package v2 - -import ( - "net/http" - "net/url" - "testing" - - "github.com/docker/distribution/reference" -) - -type urlBuilderTestCase struct { - description string - expectedPath string - build func() (string, error) -} - -func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { - fooBarRef, _ := reference.ParseNamed("foo/bar") - return []urlBuilderTestCase{ - { - description: "test base url", - expectedPath: "/v2/", - build: urlBuilder.BuildBaseURL, - }, - { - description: "test tags url", - expectedPath: "/v2/foo/bar/tags/list", - build: func() (string, error) { - return urlBuilder.BuildTagsURL(fooBarRef) - }, - }, - { - description: "test manifest url", - expectedPath: "/v2/foo/bar/manifests/tag", - build: func() (string, error) { - ref, _ := reference.WithTag(fooBarRef, "tag") - return urlBuilder.BuildManifestURL(ref) - }, - }, - { - description: "build blob url", - expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", - build: func() (string, error) { - ref, _ := reference.WithDigest(fooBarRef, "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5") - return urlBuilder.BuildBlobURL(ref) - }, - }, - { - description: "build blob upload url", - expectedPath: "/v2/foo/bar/blobs/uploads/", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadURL(fooBarRef) - }, - }, - { - description: "build blob upload url with digest and size", - expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadURL(fooBarRef, url.Values{ - "size": []string{"10000"}, - "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, - }) - }, - }, - { - description: "build blob upload chunk url", - expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part") - }, - }, - { - description: "build blob upload chunk url with digest and size", - expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part", url.Values{ - "size": []string{"10000"}, - "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, - }) - }, - }, - } -} - -// TestURLBuilder tests the various url building functions, ensuring they are -// returning the expected values. -func TestURLBuilder(t *testing.T) { - roots := []string{ - "http://example.com", - "https://example.com", - "http://localhost:5000", - "https://localhost:5443", - } - - doTest := func(relative bool) { - for _, root := range roots { - urlBuilder, err := NewURLBuilderFromString(root, relative) - if err != nil { - t.Fatalf("unexpected error creating urlbuilder: %v", err) - } - - for _, testCase := range makeURLBuilderTestCases(urlBuilder) { - url, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) - } - expectedURL := testCase.expectedPath - if !relative { - expectedURL = root + expectedURL - } - - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) - } - } - } - } - doTest(true) - doTest(false) -} - -func TestURLBuilderWithPrefix(t *testing.T) { - roots := []string{ - "http://example.com/prefix/", - "https://example.com/prefix/", - "http://localhost:5000/prefix/", - "https://localhost:5443/prefix/", - } - - doTest := func(relative bool) { - for _, root := range roots { - urlBuilder, err := NewURLBuilderFromString(root, relative) - if err != nil { - t.Fatalf("unexpected error creating urlbuilder: %v", err) - } - - for _, testCase := range makeURLBuilderTestCases(urlBuilder) { - url, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) - } - - expectedURL := testCase.expectedPath - if !relative { - expectedURL = root[0:len(root)-1] + expectedURL - } - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) - } - } - } - } - doTest(true) - doTest(false) -} - -type builderFromRequestTestCase struct { - request *http.Request - base string -} - -func TestBuilderFromRequest(t *testing.T) { - u, err := url.Parse("http://example.com") - if err != nil { - t.Fatal(err) - } - - forwardedProtoHeader := make(http.Header, 1) - forwardedProtoHeader.Set("X-Forwarded-Proto", "https") - - forwardedHostHeader1 := make(http.Header, 1) - forwardedHostHeader1.Set("X-Forwarded-Host", "first.example.com") - - forwardedHostHeader2 := make(http.Header, 1) - forwardedHostHeader2.Set("X-Forwarded-Host", "first.example.com, proxy1.example.com") - - testRequests := []struct { - request *http.Request - base string - configHost url.URL - }{ - { - request: &http.Request{URL: u, Host: u.Host}, - base: "http://example.com", - }, - - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, - base: "http://example.com", - }, - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, - base: "https://example.com", - }, - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader1}, - base: "http://first.example.com", - }, - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2}, - base: "http://first.example.com", - }, - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2}, - base: "https://third.example.com:5000", - configHost: url.URL{ - Scheme: "https", - Host: "third.example.com:5000", - }, - }, - } - doTest := func(relative bool) { - for _, tr := range testRequests { - var builder *URLBuilder - if tr.configHost.Scheme != "" && tr.configHost.Host != "" { - builder = NewURLBuilder(&tr.configHost, relative) - } else { - builder = NewURLBuilderFromRequest(tr.request, relative) - } - - for _, testCase := range makeURLBuilderTestCases(builder) { - buildURL, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) - } - - var expectedURL string - proto, ok := tr.request.Header["X-Forwarded-Proto"] - if !ok { - expectedURL = testCase.expectedPath - if !relative { - expectedURL = tr.base + expectedURL - } - } else { - urlBase, err := url.Parse(tr.base) - if err != nil { - t.Fatal(err) - } - urlBase.Scheme = proto[0] - expectedURL = testCase.expectedPath - if !relative { - expectedURL = urlBase.String() + expectedURL - } - } - - if buildURL != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) - } - } - } - } - doTest(true) - doTest(false) -} - -func TestBuilderFromRequestWithPrefix(t *testing.T) { - u, err := url.Parse("http://example.com/prefix/v2/") - if err != nil { - t.Fatal(err) - } - - forwardedProtoHeader := make(http.Header, 1) - forwardedProtoHeader.Set("X-Forwarded-Proto", "https") - - testRequests := []struct { - request *http.Request - base string - configHost url.URL - }{ - { - request: &http.Request{URL: u, Host: u.Host}, - base: "http://example.com/prefix/", - }, - - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, - base: "http://example.com/prefix/", - }, - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, - base: "https://example.com/prefix/", - }, - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, - base: "https://subdomain.example.com/prefix/", - configHost: url.URL{ - Scheme: "https", - Host: "subdomain.example.com", - Path: "/prefix/", - }, - }, - } - - var relative bool - for _, tr := range testRequests { - var builder *URLBuilder - if tr.configHost.Scheme != "" && tr.configHost.Host != "" { - builder = NewURLBuilder(&tr.configHost, false) - } else { - builder = NewURLBuilderFromRequest(tr.request, false) - } - - for _, testCase := range makeURLBuilderTestCases(builder) { - buildURL, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) - } - - var expectedURL string - proto, ok := tr.request.Header["X-Forwarded-Proto"] - if !ok { - expectedURL = testCase.expectedPath - if !relative { - expectedURL = tr.base[0:len(tr.base)-1] + expectedURL - } - } else { - urlBase, err := url.Parse(tr.base) - if err != nil { - t.Fatal(err) - } - urlBase.Scheme = proto[0] - expectedURL = testCase.expectedPath - if !relative { - expectedURL = urlBase.String()[0:len(urlBase.String())-1] + expectedURL - } - - } - - if buildURL != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) - } - } - } -} diff --git a/docs/architecture.md b/docs/architecture.md new file mode 100644 index 00000000..39251760 --- /dev/null +++ b/docs/architecture.md @@ -0,0 +1,54 @@ + + +# Architecture + +## Design +**TODO(stevvooe):** Discuss the architecture of the registry, internally and externally, in a few different deployment scenarios. + +### Eventual Consistency + +> **NOTE:** This section belongs somewhere, perhaps in a design document. We +> are leaving this here so the information is not lost. + +Running the registry on eventually consistent backends has been part of the +design from the beginning. This section covers some of the approaches to +dealing with this reality. + +There are a few classes of issues that we need to worry about when +implementing something on top of the storage drivers: + +1. Read-After-Write consistency (see this [article on + s3](http://shlomoswidler.com/2009/12/read-after-write-consistency-in-amazon.html)). +2. [Write-Write Conflicts](http://en.wikipedia.org/wiki/Write%E2%80%93write_conflict). + +In reality, the registry must worry about these kinds of errors when doing the +following: + +1. Accepting data into a temporary upload file may not have latest data block + yet (read-after-write). +2. Moving uploaded data into its blob location (write-write race). +3. Modifying the "current" manifest for given tag (write-write race). +4. A whole slew of operations around deletes (read-after-write, delete-write + races, garbage collection, etc.). + +The backend path layout employs a few techniques to avoid these problems: + +1. Large writes are done to private upload directories. This alleviates most + of the corruption potential under multiple writers by avoiding multiple + writers. +2. Constraints in storage driver implementations, such as support for writing + after the end of a file to extend it. +3. Digest verification to avoid data corruption. +4. Manifest files are stored by digest and cannot change. +5. All other non-content files (links, hashes, etc.) are written as an atomic + unit. Anything that requires additions and deletions is broken out into + separate "files". Last writer still wins. + +Unfortunately, one must play this game when trying to build something like +this on top of eventually consistent storage systems. If we run into serious +problems, we can wrap the storagedrivers in a shared consistency layer but +that would increase complexity and hinder registry cluster performance. diff --git a/docs/auth/auth.go b/docs/auth/auth.go deleted file mode 100644 index 0cb37235..00000000 --- a/docs/auth/auth.go +++ /dev/null @@ -1,168 +0,0 @@ -// Package auth defines a standard interface for request access controllers. -// -// An access controller has a simple interface with a single `Authorized` -// method which checks that a given request is authorized to perform one or -// more actions on one or more resources. This method should return a non-nil -// error if the request is not authorized. -// -// An implementation registers its access controller by name with a constructor -// which accepts an options map for configuring the access controller. -// -// options := map[string]interface{}{"sillySecret": "whysosilly?"} -// accessController, _ := auth.GetAccessController("silly", options) -// -// This `accessController` can then be used in a request handler like so: -// -// func updateOrder(w http.ResponseWriter, r *http.Request) { -// orderNumber := r.FormValue("orderNumber") -// resource := auth.Resource{Type: "customerOrder", Name: orderNumber} -// access := auth.Access{Resource: resource, Action: "update"} -// -// if ctx, err := accessController.Authorized(ctx, access); err != nil { -// if challenge, ok := err.(auth.Challenge) { -// // Let the challenge write the response. -// challenge.SetHeaders(w) -// w.WriteHeader(http.StatusUnauthorized) -// return -// } else { -// // Some other error. -// } -// } -// } -// -package auth - -import ( - "errors" - "fmt" - "net/http" - - "github.com/docker/distribution/context" -) - -const ( - // UserKey is used to get the user object from - // a user context - UserKey = "auth.user" - - // UserNameKey is used to get the user name from - // a user context - UserNameKey = "auth.user.name" -) - -var ( - // ErrInvalidCredential is returned when the auth token does not authenticate correctly. - ErrInvalidCredential = errors.New("invalid authorization credential") - - // ErrAuthenticationFailure returned when authentication fails. - ErrAuthenticationFailure = errors.New("authentication failure") -) - -// UserInfo carries information about -// an autenticated/authorized client. -type UserInfo struct { - Name string -} - -// Resource describes a resource by type and name. -type Resource struct { - Type string - Name string -} - -// Access describes a specific action that is -// requested or allowed for a given resource. -type Access struct { - Resource - Action string -} - -// Challenge is a special error type which is used for HTTP 401 Unauthorized -// responses and is able to write the response with WWW-Authenticate challenge -// header values based on the error. -type Challenge interface { - error - - // SetHeaders prepares the request to conduct a challenge response by - // adding the an HTTP challenge header on the response message. Callers - // are expected to set the appropriate HTTP status code (e.g. 401) - // themselves. - SetHeaders(w http.ResponseWriter) -} - -// AccessController controls access to registry resources based on a request -// and required access levels for a request. Implementations can support both -// complete denial and http authorization challenges. -type AccessController interface { - // Authorized returns a non-nil error if the context is granted access and - // returns a new authorized context. If one or more Access structs are - // provided, the requested access will be compared with what is available - // to the context. The given context will contain a "http.request" key with - // a `*http.Request` value. If the error is non-nil, access should always - // be denied. The error may be of type Challenge, in which case the caller - // may have the Challenge handle the request or choose what action to take - // based on the Challenge header or response status. The returned context - // object should have a "auth.user" value set to a UserInfo struct. - Authorized(ctx context.Context, access ...Access) (context.Context, error) -} - -// CredentialAuthenticator is an object which is able to authenticate credentials -type CredentialAuthenticator interface { - AuthenticateUser(username, password string) error -} - -// WithUser returns a context with the authorized user info. -func WithUser(ctx context.Context, user UserInfo) context.Context { - return userInfoContext{ - Context: ctx, - user: user, - } -} - -type userInfoContext struct { - context.Context - user UserInfo -} - -func (uic userInfoContext) Value(key interface{}) interface{} { - switch key { - case UserKey: - return uic.user - case UserNameKey: - return uic.user.Name - } - - return uic.Context.Value(key) -} - -// InitFunc is the type of an AccessController factory function and is used -// to register the constructor for different AccesController backends. -type InitFunc func(options map[string]interface{}) (AccessController, error) - -var accessControllers map[string]InitFunc - -func init() { - accessControllers = make(map[string]InitFunc) -} - -// Register is used to register an InitFunc for -// an AccessController backend with the given name. -func Register(name string, initFunc InitFunc) error { - if _, exists := accessControllers[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - accessControllers[name] = initFunc - - return nil -} - -// GetAccessController constructs an AccessController -// with the given options using the named backend. -func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { - if initFunc, exists := accessControllers[name]; exists { - return initFunc(options) - } - - return nil, fmt.Errorf("no access controller registered with name: %s", name) -} diff --git a/docs/auth/htpasswd/access.go b/docs/auth/htpasswd/access.go deleted file mode 100644 index 4f71dc27..00000000 --- a/docs/auth/htpasswd/access.go +++ /dev/null @@ -1,97 +0,0 @@ -// Package htpasswd provides a simple authentication scheme that checks for the -// user credential hash in an htpasswd formatted file in a configuration-determined -// location. -// -// This authentication method MUST be used under TLS, as simple token-replay attack is possible. -package htpasswd - -import ( - "fmt" - "net/http" - "os" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" -) - -type accessController struct { - realm string - htpasswd *htpasswd -} - -var _ auth.AccessController = &accessController{} - -func newAccessController(options map[string]interface{}) (auth.AccessController, error) { - realm, present := options["realm"] - if _, ok := realm.(string); !present || !ok { - return nil, fmt.Errorf(`"realm" must be set for htpasswd access controller`) - } - - path, present := options["path"] - if _, ok := path.(string); !present || !ok { - return nil, fmt.Errorf(`"path" must be set for htpasswd access controller`) - } - - f, err := os.Open(path.(string)) - if err != nil { - return nil, err - } - defer f.Close() - - h, err := newHTPasswd(f) - if err != nil { - return nil, err - } - - return &accessController{realm: realm.(string), htpasswd: h}, nil -} - -func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { - req, err := context.GetRequest(ctx) - if err != nil { - return nil, err - } - - username, password, ok := req.BasicAuth() - if !ok { - return nil, &challenge{ - realm: ac.realm, - err: auth.ErrInvalidCredential, - } - } - - if err := ac.AuthenticateUser(username, password); err != nil { - context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) - return nil, &challenge{ - realm: ac.realm, - err: auth.ErrAuthenticationFailure, - } - } - - return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil -} - -func (ac *accessController) AuthenticateUser(username, password string) error { - return ac.htpasswd.authenticateUser(username, password) -} - -// challenge implements the auth.Challenge interface. -type challenge struct { - realm string - err error -} - -var _ auth.Challenge = challenge{} - -// SetHeaders sets the basic challenge header on the response. -func (ch challenge) SetHeaders(w http.ResponseWriter) { - w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", ch.realm)) -} - -func (ch challenge) Error() string { - return fmt.Sprintf("basic authentication challenge for realm %q: %s", ch.realm, ch.err) -} - -func init() { - auth.Register("htpasswd", auth.InitFunc(newAccessController)) -} diff --git a/docs/auth/htpasswd/access_test.go b/docs/auth/htpasswd/access_test.go deleted file mode 100644 index 553f05cf..00000000 --- a/docs/auth/htpasswd/access_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package htpasswd - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" -) - -func TestBasicAccessController(t *testing.T) { - testRealm := "The-Shire" - testUsers := []string{"bilbo", "frodo", "MiShil", "DeokMan"} - testPasswords := []string{"baggins", "baggins", "새주", "공주님"} - testHtpasswdContent := `bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= - frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W - MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 - DeokMan:공주님` - - tempFile, err := ioutil.TempFile("", "htpasswd-test") - if err != nil { - t.Fatal("could not create temporary htpasswd file") - } - if _, err = tempFile.WriteString(testHtpasswdContent); err != nil { - t.Fatal("could not write temporary htpasswd file") - } - - options := map[string]interface{}{ - "realm": testRealm, - "path": tempFile.Name(), - } - ctx := context.Background() - - accessController, err := newAccessController(options) - if err != nil { - t.Fatal("error creating access controller") - } - - tempFile.Close() - - var userNumber = 0 - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := context.WithRequest(ctx, r) - authCtx, err := accessController.Authorized(ctx) - if err != nil { - switch err := err.(type) { - case auth.Challenge: - err.SetHeaders(w) - w.WriteHeader(http.StatusUnauthorized) - return - default: - t.Fatalf("unexpected error authorizing request: %v", err) - } - } - - userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) - if !ok { - t.Fatal("basic accessController did not set auth.user context") - } - - if userInfo.Name != testUsers[userNumber] { - t.Fatalf("expected user name %q, got %q", testUsers[userNumber], userInfo.Name) - } - - w.WriteHeader(http.StatusNoContent) - })) - - client := &http.Client{ - CheckRedirect: nil, - } - - req, _ := http.NewRequest("GET", server.URL, nil) - resp, err := client.Do(req) - - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer resp.Body.Close() - - // Request should not be authorized - if resp.StatusCode != http.StatusUnauthorized { - t.Fatalf("unexpected non-fail response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) - } - - nonbcrypt := map[string]struct{}{ - "bilbo": {}, - "DeokMan": {}, - } - - for i := 0; i < len(testUsers); i++ { - userNumber = i - req, err := http.NewRequest("GET", server.URL, nil) - if err != nil { - t.Fatalf("error allocating new request: %v", err) - } - - req.SetBasicAuth(testUsers[i], testPasswords[i]) - - resp, err = client.Do(req) - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer resp.Body.Close() - - if _, ok := nonbcrypt[testUsers[i]]; ok { - // these are not allowed. - // Request should be authorized - if resp.StatusCode != http.StatusUnauthorized { - t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusUnauthorized, testUsers[i], testPasswords[i]) - } - } else { - // Request should be authorized - if resp.StatusCode != http.StatusNoContent { - t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i]) - } - } - } - -} diff --git a/docs/auth/htpasswd/htpasswd.go b/docs/auth/htpasswd/htpasswd.go deleted file mode 100644 index b10b256c..00000000 --- a/docs/auth/htpasswd/htpasswd.go +++ /dev/null @@ -1,82 +0,0 @@ -package htpasswd - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/docker/distribution/registry/auth" - - "golang.org/x/crypto/bcrypt" -) - -// htpasswd holds a path to a system .htpasswd file and the machinery to parse -// it. Only bcrypt hash entries are supported. -type htpasswd struct { - entries map[string][]byte // maps username to password byte slice. -} - -// newHTPasswd parses the reader and returns an htpasswd or an error. -func newHTPasswd(rd io.Reader) (*htpasswd, error) { - entries, err := parseHTPasswd(rd) - if err != nil { - return nil, err - } - - return &htpasswd{entries: entries}, nil -} - -// AuthenticateUser checks a given user:password credential against the -// receiving HTPasswd's file. If the check passes, nil is returned. -func (htpasswd *htpasswd) authenticateUser(username string, password string) error { - credentials, ok := htpasswd.entries[username] - if !ok { - // timing attack paranoia - bcrypt.CompareHashAndPassword([]byte{}, []byte(password)) - - return auth.ErrAuthenticationFailure - } - - err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password)) - if err != nil { - return auth.ErrAuthenticationFailure - } - - return nil -} - -// parseHTPasswd parses the contents of htpasswd. This will read all the -// entries in the file, whether or not they are needed. An error is returned -// if a syntax errors are encountered or if the reader fails. -func parseHTPasswd(rd io.Reader) (map[string][]byte, error) { - entries := map[string][]byte{} - scanner := bufio.NewScanner(rd) - var line int - for scanner.Scan() { - line++ // 1-based line numbering - t := strings.TrimSpace(scanner.Text()) - - if len(t) < 1 { - continue - } - - // lines that *begin* with a '#' are considered comments - if t[0] == '#' { - continue - } - - i := strings.Index(t, ":") - if i < 0 || i >= len(t) { - return nil, fmt.Errorf("htpasswd: invalid entry at line %d: %q", line, scanner.Text()) - } - - entries[t[:i]] = []byte(t[i+1:]) - } - - if err := scanner.Err(); err != nil { - return nil, err - } - - return entries, nil -} diff --git a/docs/auth/htpasswd/htpasswd_test.go b/docs/auth/htpasswd/htpasswd_test.go deleted file mode 100644 index 309c359a..00000000 --- a/docs/auth/htpasswd/htpasswd_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package htpasswd - -import ( - "fmt" - "reflect" - "strings" - "testing" -) - -func TestParseHTPasswd(t *testing.T) { - - for _, tc := range []struct { - desc string - input string - err error - entries map[string][]byte - }{ - { - desc: "basic example", - input: ` -# This is a comment in a basic example. -bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= -frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W -MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 -DeokMan:공주님 -`, - entries: map[string][]byte{ - "bilbo": []byte("{SHA}5siv5c0SHx681xU6GiSx9ZQryqs="), - "frodo": []byte("$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W"), - "MiShil": []byte("$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2"), - "DeokMan": []byte("공주님"), - }, - }, - { - desc: "ensures comments are filtered", - input: ` -# asdf:asdf -`, - }, - { - desc: "ensure midline hash is not comment", - input: ` -asdf:as#df -`, - entries: map[string][]byte{ - "asdf": []byte("as#df"), - }, - }, - { - desc: "ensure midline hash is not comment", - input: ` -# A valid comment -valid:entry -asdf -`, - err: fmt.Errorf(`htpasswd: invalid entry at line 4: "asdf"`), - }, - } { - - entries, err := parseHTPasswd(strings.NewReader(tc.input)) - if err != tc.err { - if tc.err == nil { - t.Fatalf("%s: unexpected error: %v", tc.desc, err) - } else { - if err.Error() != tc.err.Error() { // use string equality here. - t.Fatalf("%s: expected error not returned: %v != %v", tc.desc, err, tc.err) - } - } - } - - if tc.err != nil { - continue // don't test output - } - - // allow empty and nil to be equal - if tc.entries == nil { - tc.entries = map[string][]byte{} - } - - if !reflect.DeepEqual(entries, tc.entries) { - t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries) - } - } - -} diff --git a/docs/auth/silly/access.go b/docs/auth/silly/access.go deleted file mode 100644 index 2b801d94..00000000 --- a/docs/auth/silly/access.go +++ /dev/null @@ -1,97 +0,0 @@ -// Package silly provides a simple authentication scheme that checks for the -// existence of an Authorization header and issues access if is present and -// non-empty. -// -// This package is present as an example implementation of a minimal -// auth.AccessController and for testing. This is not suitable for any kind of -// production security. -package silly - -import ( - "fmt" - "net/http" - "strings" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" -) - -// accessController provides a simple implementation of auth.AccessController -// that simply checks for a non-empty Authorization header. It is useful for -// demonstration and testing. -type accessController struct { - realm string - service string -} - -var _ auth.AccessController = &accessController{} - -func newAccessController(options map[string]interface{}) (auth.AccessController, error) { - realm, present := options["realm"] - if _, ok := realm.(string); !present || !ok { - return nil, fmt.Errorf(`"realm" must be set for silly access controller`) - } - - service, present := options["service"] - if _, ok := service.(string); !present || !ok { - return nil, fmt.Errorf(`"service" must be set for silly access controller`) - } - - return &accessController{realm: realm.(string), service: service.(string)}, nil -} - -// Authorized simply checks for the existence of the authorization header, -// responding with a bearer challenge if it doesn't exist. -func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { - req, err := context.GetRequest(ctx) - if err != nil { - return nil, err - } - - if req.Header.Get("Authorization") == "" { - challenge := challenge{ - realm: ac.realm, - service: ac.service, - } - - if len(accessRecords) > 0 { - var scopes []string - for _, access := range accessRecords { - scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action)) - } - challenge.scope = strings.Join(scopes, " ") - } - - return nil, &challenge - } - - return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil -} - -type challenge struct { - realm string - service string - scope string -} - -var _ auth.Challenge = challenge{} - -// SetHeaders sets a simple bearer challenge on the response. -func (ch challenge) SetHeaders(w http.ResponseWriter) { - header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) - - if ch.scope != "" { - header = fmt.Sprintf("%s,scope=%q", header, ch.scope) - } - - w.Header().Set("WWW-Authenticate", header) -} - -func (ch challenge) Error() string { - return fmt.Sprintf("silly authentication challenge: %#v", ch) -} - -// init registers the silly auth backend. -func init() { - auth.Register("silly", auth.InitFunc(newAccessController)) -} diff --git a/docs/auth/silly/access_test.go b/docs/auth/silly/access_test.go deleted file mode 100644 index a7c14cb9..00000000 --- a/docs/auth/silly/access_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package silly - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" -) - -func TestSillyAccessController(t *testing.T) { - ac := &accessController{ - realm: "test-realm", - service: "test-service", - } - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := context.WithValue(nil, "http.request", r) - authCtx, err := ac.Authorized(ctx) - if err != nil { - switch err := err.(type) { - case auth.Challenge: - err.SetHeaders(w) - w.WriteHeader(http.StatusUnauthorized) - return - default: - t.Fatalf("unexpected error authorizing request: %v", err) - } - } - - userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) - if !ok { - t.Fatal("silly accessController did not set auth.user context") - } - - if userInfo.Name != "silly" { - t.Fatalf("expected user name %q, got %q", "silly", userInfo.Name) - } - - w.WriteHeader(http.StatusNoContent) - })) - - resp, err := http.Get(server.URL) - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer resp.Body.Close() - - // Request should not be authorized - if resp.StatusCode != http.StatusUnauthorized { - t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) - } - - req, err := http.NewRequest("GET", server.URL, nil) - if err != nil { - t.Fatalf("unexpected error creating new request: %v", err) - } - req.Header.Set("Authorization", "seriously, anything") - - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer resp.Body.Close() - - // Request should not be authorized - if resp.StatusCode != http.StatusNoContent { - t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent) - } -} diff --git a/docs/auth/token/accesscontroller.go b/docs/auth/token/accesscontroller.go deleted file mode 100644 index 5b1ff7ca..00000000 --- a/docs/auth/token/accesscontroller.go +++ /dev/null @@ -1,268 +0,0 @@ -package token - -import ( - "crypto" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" - "github.com/docker/libtrust" -) - -// accessSet maps a typed, named resource to -// a set of actions requested or authorized. -type accessSet map[auth.Resource]actionSet - -// newAccessSet constructs an accessSet from -// a variable number of auth.Access items. -func newAccessSet(accessItems ...auth.Access) accessSet { - accessSet := make(accessSet, len(accessItems)) - - for _, access := range accessItems { - resource := auth.Resource{ - Type: access.Type, - Name: access.Name, - } - - set, exists := accessSet[resource] - if !exists { - set = newActionSet() - accessSet[resource] = set - } - - set.add(access.Action) - } - - return accessSet -} - -// contains returns whether or not the given access is in this accessSet. -func (s accessSet) contains(access auth.Access) bool { - actionSet, ok := s[access.Resource] - if ok { - return actionSet.contains(access.Action) - } - - return false -} - -// scopeParam returns a collection of scopes which can -// be used for a WWW-Authenticate challenge parameter. -// See https://tools.ietf.org/html/rfc6750#section-3 -func (s accessSet) scopeParam() string { - scopes := make([]string, 0, len(s)) - - for resource, actionSet := range s { - actions := strings.Join(actionSet.keys(), ",") - scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) - } - - return strings.Join(scopes, " ") -} - -// Errors used and exported by this package. -var ( - ErrInsufficientScope = errors.New("insufficient scope") - ErrTokenRequired = errors.New("authorization token required") -) - -// authChallenge implements the auth.Challenge interface. -type authChallenge struct { - err error - realm string - service string - accessSet accessSet -} - -var _ auth.Challenge = authChallenge{} - -// Error returns the internal error string for this authChallenge. -func (ac authChallenge) Error() string { - return ac.err.Error() -} - -// Status returns the HTTP Response Status Code for this authChallenge. -func (ac authChallenge) Status() int { - return http.StatusUnauthorized -} - -// challengeParams constructs the value to be used in -// the WWW-Authenticate response challenge header. -// See https://tools.ietf.org/html/rfc6750#section-3 -func (ac authChallenge) challengeParams() string { - str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) - - if scope := ac.accessSet.scopeParam(); scope != "" { - str = fmt.Sprintf("%s,scope=%q", str, scope) - } - - if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { - str = fmt.Sprintf("%s,error=%q", str, "invalid_token") - } else if ac.err == ErrInsufficientScope { - str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") - } - - return str -} - -// SetChallenge sets the WWW-Authenticate value for the response. -func (ac authChallenge) SetHeaders(w http.ResponseWriter) { - w.Header().Add("WWW-Authenticate", ac.challengeParams()) -} - -// accessController implements the auth.AccessController interface. -type accessController struct { - realm string - issuer string - service string - rootCerts *x509.CertPool - trustedKeys map[string]libtrust.PublicKey -} - -// tokenAccessOptions is a convenience type for handling -// options to the contstructor of an accessController. -type tokenAccessOptions struct { - realm string - issuer string - service string - rootCertBundle string -} - -// checkOptions gathers the necessary options -// for an accessController from the given map. -func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { - var opts tokenAccessOptions - - keys := []string{"realm", "issuer", "service", "rootcertbundle"} - vals := make([]string, 0, len(keys)) - for _, key := range keys { - val, ok := options[key].(string) - if !ok { - return opts, fmt.Errorf("token auth requires a valid option string: %q", key) - } - vals = append(vals, val) - } - - opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] - - return opts, nil -} - -// newAccessController creates an accessController using the given options. -func newAccessController(options map[string]interface{}) (auth.AccessController, error) { - config, err := checkOptions(options) - if err != nil { - return nil, err - } - - fp, err := os.Open(config.rootCertBundle) - if err != nil { - return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err) - } - defer fp.Close() - - rawCertBundle, err := ioutil.ReadAll(fp) - if err != nil { - return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err) - } - - var rootCerts []*x509.Certificate - pemBlock, rawCertBundle := pem.Decode(rawCertBundle) - for pemBlock != nil { - cert, err := x509.ParseCertificate(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) - } - - rootCerts = append(rootCerts, cert) - - pemBlock, rawCertBundle = pem.Decode(rawCertBundle) - } - - if len(rootCerts) == 0 { - return nil, errors.New("token auth requires at least one token signing root certificate") - } - - rootPool := x509.NewCertPool() - trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) - for _, rootCert := range rootCerts { - rootPool.AddCert(rootCert) - pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) - if err != nil { - return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) - } - trustedKeys[pubKey.KeyID()] = pubKey - } - - return &accessController{ - realm: config.realm, - issuer: config.issuer, - service: config.service, - rootCerts: rootPool, - trustedKeys: trustedKeys, - }, nil -} - -// Authorized handles checking whether the given request is authorized -// for actions on resources described by the given access items. -func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { - challenge := &authChallenge{ - realm: ac.realm, - service: ac.service, - accessSet: newAccessSet(accessItems...), - } - - req, err := context.GetRequest(ctx) - if err != nil { - return nil, err - } - - parts := strings.Split(req.Header.Get("Authorization"), " ") - - if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { - challenge.err = ErrTokenRequired - return nil, challenge - } - - rawToken := parts[1] - - token, err := NewToken(rawToken) - if err != nil { - challenge.err = err - return nil, challenge - } - - verifyOpts := VerifyOptions{ - TrustedIssuers: []string{ac.issuer}, - AcceptedAudiences: []string{ac.service}, - Roots: ac.rootCerts, - TrustedKeys: ac.trustedKeys, - } - - if err = token.Verify(verifyOpts); err != nil { - challenge.err = err - return nil, challenge - } - - accessSet := token.accessSet() - for _, access := range accessItems { - if !accessSet.contains(access) { - challenge.err = ErrInsufficientScope - return nil, challenge - } - } - - return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil -} - -// init handles registering the token auth backend. -func init() { - auth.Register("token", auth.InitFunc(newAccessController)) -} diff --git a/docs/auth/token/stringset.go b/docs/auth/token/stringset.go deleted file mode 100644 index 1d04f104..00000000 --- a/docs/auth/token/stringset.go +++ /dev/null @@ -1,35 +0,0 @@ -package token - -// StringSet is a useful type for looking up strings. -type stringSet map[string]struct{} - -// NewStringSet creates a new StringSet with the given strings. -func newStringSet(keys ...string) stringSet { - ss := make(stringSet, len(keys)) - ss.add(keys...) - return ss -} - -// Add inserts the given keys into this StringSet. -func (ss stringSet) add(keys ...string) { - for _, key := range keys { - ss[key] = struct{}{} - } -} - -// Contains returns whether the given key is in this StringSet. -func (ss stringSet) contains(key string) bool { - _, ok := ss[key] - return ok -} - -// Keys returns a slice of all keys in this StringSet. -func (ss stringSet) keys() []string { - keys := make([]string, 0, len(ss)) - - for key := range ss { - keys = append(keys, key) - } - - return keys -} diff --git a/docs/auth/token/token.go b/docs/auth/token/token.go deleted file mode 100644 index 2598f362..00000000 --- a/docs/auth/token/token.go +++ /dev/null @@ -1,343 +0,0 @@ -package token - -import ( - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/libtrust" - - "github.com/docker/distribution/registry/auth" -) - -const ( - // TokenSeparator is the value which separates the header, claims, and - // signature in the compact serialization of a JSON Web Token. - TokenSeparator = "." -) - -// Errors used by token parsing and verification. -var ( - ErrMalformedToken = errors.New("malformed token") - ErrInvalidToken = errors.New("invalid token") -) - -// ResourceActions stores allowed actions on a named and typed resource. -type ResourceActions struct { - Type string `json:"type"` - Name string `json:"name"` - Actions []string `json:"actions"` -} - -// ClaimSet describes the main section of a JSON Web Token. -type ClaimSet struct { - // Public claims - Issuer string `json:"iss"` - Subject string `json:"sub"` - Audience string `json:"aud"` - Expiration int64 `json:"exp"` - NotBefore int64 `json:"nbf"` - IssuedAt int64 `json:"iat"` - JWTID string `json:"jti"` - - // Private claims - Access []*ResourceActions `json:"access"` -} - -// Header describes the header section of a JSON Web Token. -type Header struct { - Type string `json:"typ"` - SigningAlg string `json:"alg"` - KeyID string `json:"kid,omitempty"` - X5c []string `json:"x5c,omitempty"` - RawJWK *json.RawMessage `json:"jwk,omitempty"` -} - -// Token describes a JSON Web Token. -type Token struct { - Raw string - Header *Header - Claims *ClaimSet - Signature []byte -} - -// VerifyOptions is used to specify -// options when verifying a JSON Web Token. -type VerifyOptions struct { - TrustedIssuers []string - AcceptedAudiences []string - Roots *x509.CertPool - TrustedKeys map[string]libtrust.PublicKey -} - -// NewToken parses the given raw token string -// and constructs an unverified JSON Web Token. -func NewToken(rawToken string) (*Token, error) { - parts := strings.Split(rawToken, TokenSeparator) - if len(parts) != 3 { - return nil, ErrMalformedToken - } - - var ( - rawHeader, rawClaims = parts[0], parts[1] - headerJSON, claimsJSON []byte - err error - ) - - defer func() { - if err != nil { - log.Errorf("error while unmarshalling raw token: %s", err) - } - }() - - if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil { - err = fmt.Errorf("unable to decode header: %s", err) - return nil, ErrMalformedToken - } - - if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil { - err = fmt.Errorf("unable to decode claims: %s", err) - return nil, ErrMalformedToken - } - - token := new(Token) - token.Header = new(Header) - token.Claims = new(ClaimSet) - - token.Raw = strings.Join(parts[:2], TokenSeparator) - if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil { - err = fmt.Errorf("unable to decode signature: %s", err) - return nil, ErrMalformedToken - } - - if err = json.Unmarshal(headerJSON, token.Header); err != nil { - return nil, ErrMalformedToken - } - - if err = json.Unmarshal(claimsJSON, token.Claims); err != nil { - return nil, ErrMalformedToken - } - - return token, nil -} - -// Verify attempts to verify this token using the given options. -// Returns a nil error if the token is valid. -func (t *Token) Verify(verifyOpts VerifyOptions) error { - // Verify that the Issuer claim is a trusted authority. - if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) { - log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer) - return ErrInvalidToken - } - - // Verify that the Audience claim is allowed. - if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) { - log.Errorf("token intended for another audience: %q", t.Claims.Audience) - return ErrInvalidToken - } - - // Verify that the token is currently usable and not expired. - currentUnixTime := time.Now().Unix() - if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) { - log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime) - return ErrInvalidToken - } - - // Verify the token signature. - if len(t.Signature) == 0 { - log.Error("token has no signature") - return ErrInvalidToken - } - - // Verify that the signing key is trusted. - signingKey, err := t.VerifySigningKey(verifyOpts) - if err != nil { - log.Error(err) - return ErrInvalidToken - } - - // Finally, verify the signature of the token using the key which signed it. - if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { - log.Errorf("unable to verify token signature: %s", err) - return ErrInvalidToken - } - - return nil -} - -// VerifySigningKey attempts to get the key which was used to sign this token. -// The token header should contain either of these 3 fields: -// `x5c` - The x509 certificate chain for the signing key. Needs to be -// verified. -// `jwk` - The JSON Web Key representation of the signing key. -// May contain its own `x5c` field which needs to be verified. -// `kid` - The unique identifier for the key. This library interprets it -// as a libtrust fingerprint. The key itself can be looked up in -// the trustedKeys field of the given verify options. -// Each of these methods are tried in that order of preference until the -// signing key is found or an error is returned. -func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { - // First attempt to get an x509 certificate chain from the header. - var ( - x5c = t.Header.X5c - rawJWK = t.Header.RawJWK - keyID = t.Header.KeyID - ) - - switch { - case len(x5c) > 0: - signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) - case rawJWK != nil: - signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) - case len(keyID) > 0: - signingKey = verifyOpts.TrustedKeys[keyID] - if signingKey == nil { - err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID) - } - default: - err = errors.New("unable to get token signing key") - } - - return -} - -func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) { - if len(x5c) == 0 { - return nil, errors.New("empty x509 certificate chain") - } - - // Ensure the first element is encoded correctly. - leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0]) - if err != nil { - return nil, fmt.Errorf("unable to decode leaf certificate: %s", err) - } - - // And that it is a valid x509 certificate. - leafCert, err := x509.ParseCertificate(leafCertDer) - if err != nil { - return nil, fmt.Errorf("unable to parse leaf certificate: %s", err) - } - - // The rest of the certificate chain are intermediate certificates. - intermediates := x509.NewCertPool() - for i := 1; i < len(x5c); i++ { - intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i]) - if err != nil { - return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err) - } - - intermediateCert, err := x509.ParseCertificate(intermediateCertDer) - if err != nil { - return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err) - } - - intermediates.AddCert(intermediateCert) - } - - verifyOpts := x509.VerifyOptions{ - Intermediates: intermediates, - Roots: roots, - KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, - } - - // TODO: this call returns certificate chains which we ignore for now, but - // we should check them for revocations if we have the ability later. - if _, err = leafCert.Verify(verifyOpts); err != nil { - return nil, fmt.Errorf("unable to verify certificate chain: %s", err) - } - - // Get the public key from the leaf certificate. - leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) - if !ok { - return nil, errors.New("unable to get leaf cert public key value") - } - - leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey) - if err != nil { - return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err) - } - - return -} - -func parseAndVerifyRawJWK(rawJWK *json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { - pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(*rawJWK)) - if err != nil { - return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) - } - - // Check to see if the key includes a certificate chain. - x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{}) - if !ok { - // The JWK should be one of the trusted root keys. - if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted { - return nil, errors.New("untrusted JWK with no certificate chain") - } - - // The JWK is one of the trusted keys. - return - } - - // Ensure each item in the chain is of the correct type. - x5c := make([]string, len(x5cVal)) - for i, val := range x5cVal { - certString, ok := val.(string) - if !ok || len(certString) == 0 { - return nil, errors.New("malformed certificate chain") - } - x5c[i] = certString - } - - // Ensure that the x509 certificate chain can - // be verified up to one of our trusted roots. - leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots) - if err != nil { - return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err) - } - - // Verify that the public key in the leaf cert *is* the signing key. - if pubKey.KeyID() != leafKey.KeyID() { - return nil, errors.New("leaf certificate public key ID does not match JWK key ID") - } - - return -} - -// accessSet returns a set of actions available for the resource -// actions listed in the `access` section of this token. -func (t *Token) accessSet() accessSet { - if t.Claims == nil { - return nil - } - - accessSet := make(accessSet, len(t.Claims.Access)) - - for _, resourceActions := range t.Claims.Access { - resource := auth.Resource{ - Type: resourceActions.Type, - Name: resourceActions.Name, - } - - set, exists := accessSet[resource] - if !exists { - set = newActionSet() - accessSet[resource] = set - } - - for _, action := range resourceActions.Actions { - set.add(action) - } - } - - return accessSet -} - -func (t *Token) compactRaw() string { - return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature)) -} diff --git a/docs/auth/token/token_test.go b/docs/auth/token/token_test.go deleted file mode 100644 index 827dbbd7..00000000 --- a/docs/auth/token/token_test.go +++ /dev/null @@ -1,387 +0,0 @@ -package token - -import ( - "crypto" - "crypto/rand" - "crypto/x509" - "encoding/base64" - "encoding/json" - "encoding/pem" - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" - "testing" - "time" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" - "github.com/docker/libtrust" -) - -func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) { - keys := make([]libtrust.PrivateKey, 0, numKeys) - - for i := 0; i < numKeys; i++ { - key, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, err - } - keys = append(keys, key) - } - - return keys, nil -} - -func makeSigningKeyWithChain(rootKey libtrust.PrivateKey, depth int) (libtrust.PrivateKey, error) { - if depth == 0 { - // Don't need to build a chain. - return rootKey, nil - } - - var ( - x5c = make([]string, depth) - parentKey = rootKey - key libtrust.PrivateKey - cert *x509.Certificate - err error - ) - - for depth > 0 { - if key, err = libtrust.GenerateECP256PrivateKey(); err != nil { - return nil, err - } - - if cert, err = libtrust.GenerateCACert(parentKey, key); err != nil { - return nil, err - } - - depth-- - x5c[depth] = base64.StdEncoding.EncodeToString(cert.Raw) - parentKey = key - } - - key.AddExtendedField("x5c", x5c) - - return key, nil -} - -func makeRootCerts(rootKeys []libtrust.PrivateKey) ([]*x509.Certificate, error) { - certs := make([]*x509.Certificate, 0, len(rootKeys)) - - for _, key := range rootKeys { - cert, err := libtrust.GenerateCACert(key, key) - if err != nil { - return nil, err - } - certs = append(certs, cert) - } - - return certs, nil -} - -func makeTrustedKeyMap(rootKeys []libtrust.PrivateKey) map[string]libtrust.PublicKey { - trustedKeys := make(map[string]libtrust.PublicKey, len(rootKeys)) - - for _, key := range rootKeys { - trustedKeys[key.KeyID()] = key.PublicKey() - } - - return trustedKeys -} - -func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey libtrust.PrivateKey, depth int) (*Token, error) { - signingKey, err := makeSigningKeyWithChain(rootKey, depth) - if err != nil { - return nil, fmt.Errorf("unable to make signing key with chain: %s", err) - } - - var rawJWK json.RawMessage - rawJWK, err = signingKey.PublicKey().MarshalJSON() - if err != nil { - return nil, fmt.Errorf("unable to marshal signing key to JSON: %s", err) - } - - joseHeader := &Header{ - Type: "JWT", - SigningAlg: "ES256", - RawJWK: &rawJWK, - } - - now := time.Now() - - randomBytes := make([]byte, 15) - if _, err = rand.Read(randomBytes); err != nil { - return nil, fmt.Errorf("unable to read random bytes for jwt id: %s", err) - } - - claimSet := &ClaimSet{ - Issuer: issuer, - Subject: "foo", - Audience: audience, - Expiration: now.Add(5 * time.Minute).Unix(), - NotBefore: now.Unix(), - IssuedAt: now.Unix(), - JWTID: base64.URLEncoding.EncodeToString(randomBytes), - Access: access, - } - - var joseHeaderBytes, claimSetBytes []byte - - if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil { - return nil, fmt.Errorf("unable to marshal jose header: %s", err) - } - if claimSetBytes, err = json.Marshal(claimSet); err != nil { - return nil, fmt.Errorf("unable to marshal claim set: %s", err) - } - - encodedJoseHeader := joseBase64UrlEncode(joseHeaderBytes) - encodedClaimSet := joseBase64UrlEncode(claimSetBytes) - encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet) - - var signatureBytes []byte - if signatureBytes, _, err = signingKey.Sign(strings.NewReader(encodingToSign), crypto.SHA256); err != nil { - return nil, fmt.Errorf("unable to sign jwt payload: %s", err) - } - - signature := joseBase64UrlEncode(signatureBytes) - tokenString := fmt.Sprintf("%s.%s", encodingToSign, signature) - - return NewToken(tokenString) -} - -// This test makes 4 tokens with a varying number of intermediate -// certificates ranging from no intermediate chain to a length of 3 -// intermediates. -func TestTokenVerify(t *testing.T) { - var ( - numTokens = 4 - issuer = "test-issuer" - audience = "test-audience" - access = []*ResourceActions{ - { - Type: "repository", - Name: "foo/bar", - Actions: []string{"pull", "push"}, - }, - } - ) - - rootKeys, err := makeRootKeys(numTokens) - if err != nil { - t.Fatal(err) - } - - rootCerts, err := makeRootCerts(rootKeys) - if err != nil { - t.Fatal(err) - } - - rootPool := x509.NewCertPool() - for _, rootCert := range rootCerts { - rootPool.AddCert(rootCert) - } - - trustedKeys := makeTrustedKeyMap(rootKeys) - - tokens := make([]*Token, 0, numTokens) - - for i := 0; i < numTokens; i++ { - token, err := makeTestToken(issuer, audience, access, rootKeys[i], i) - if err != nil { - t.Fatal(err) - } - tokens = append(tokens, token) - } - - verifyOps := VerifyOptions{ - TrustedIssuers: []string{issuer}, - AcceptedAudiences: []string{audience}, - Roots: rootPool, - TrustedKeys: trustedKeys, - } - - for _, token := range tokens { - if err := token.Verify(verifyOps); err != nil { - t.Fatal(err) - } - } -} - -func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err error) { - rootCerts, err := makeRootCerts(rootKeys) - if err != nil { - return "", err - } - - tempFile, err := ioutil.TempFile("", "rootCertBundle") - if err != nil { - return "", err - } - defer tempFile.Close() - - for _, cert := range rootCerts { - if err = pem.Encode(tempFile, &pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - }); err != nil { - os.Remove(tempFile.Name()) - return "", err - } - } - - return tempFile.Name(), nil -} - -// TestAccessController tests complete integration of the token auth package. -// It starts by mocking the options for a token auth accessController which -// it creates. It then tries a few mock requests: -// - don't supply a token; should error with challenge -// - supply an invalid token; should error with challenge -// - supply a token with insufficient access; should error with challenge -// - supply a valid token; should not error -func TestAccessController(t *testing.T) { - // Make 2 keys; only the first is to be a trusted root key. - rootKeys, err := makeRootKeys(2) - if err != nil { - t.Fatal(err) - } - - rootCertBundleFilename, err := writeTempRootCerts(rootKeys[:1]) - if err != nil { - t.Fatal(err) - } - defer os.Remove(rootCertBundleFilename) - - realm := "https://auth.example.com/token/" - issuer := "test-issuer.example.com" - service := "test-service.example.com" - - options := map[string]interface{}{ - "realm": realm, - "issuer": issuer, - "service": service, - "rootcertbundle": rootCertBundleFilename, - } - - accessController, err := newAccessController(options) - if err != nil { - t.Fatal(err) - } - - // 1. Make a mock http.Request with no token. - req, err := http.NewRequest("GET", "http://example.com/foo", nil) - if err != nil { - t.Fatal(err) - } - - testAccess := auth.Access{ - Resource: auth.Resource{ - Type: "foo", - Name: "bar", - }, - Action: "baz", - } - - ctx := context.WithValue(nil, "http.request", req) - authCtx, err := accessController.Authorized(ctx, testAccess) - challenge, ok := err.(auth.Challenge) - if !ok { - t.Fatal("accessController did not return a challenge") - } - - if challenge.Error() != ErrTokenRequired.Error() { - t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) - } - - if authCtx != nil { - t.Fatalf("expected nil auth context but got %s", authCtx) - } - - // 2. Supply an invalid token. - token, err := makeTestToken( - issuer, service, - []*ResourceActions{{ - Type: testAccess.Type, - Name: testAccess.Name, - Actions: []string{testAccess.Action}, - }}, - rootKeys[1], 1, // Everything is valid except the key which signed it. - ) - if err != nil { - t.Fatal(err) - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) - - authCtx, err = accessController.Authorized(ctx, testAccess) - challenge, ok = err.(auth.Challenge) - if !ok { - t.Fatal("accessController did not return a challenge") - } - - if challenge.Error() != ErrInvalidToken.Error() { - t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) - } - - if authCtx != nil { - t.Fatalf("expected nil auth context but got %s", authCtx) - } - - // 3. Supply a token with insufficient access. - token, err = makeTestToken( - issuer, service, - []*ResourceActions{}, // No access specified. - rootKeys[0], 1, - ) - if err != nil { - t.Fatal(err) - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) - - authCtx, err = accessController.Authorized(ctx, testAccess) - challenge, ok = err.(auth.Challenge) - if !ok { - t.Fatal("accessController did not return a challenge") - } - - if challenge.Error() != ErrInsufficientScope.Error() { - t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrInsufficientScope) - } - - if authCtx != nil { - t.Fatalf("expected nil auth context but got %s", authCtx) - } - - // 4. Supply the token we need, or deserve, or whatever. - token, err = makeTestToken( - issuer, service, - []*ResourceActions{{ - Type: testAccess.Type, - Name: testAccess.Name, - Actions: []string{testAccess.Action}, - }}, - rootKeys[0], 1, - ) - if err != nil { - t.Fatal(err) - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) - - authCtx, err = accessController.Authorized(ctx, testAccess) - if err != nil { - t.Fatalf("accessController returned unexpected error: %s", err) - } - - userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) - if !ok { - t.Fatal("token accessController did not set auth.user context") - } - - if userInfo.Name != "foo" { - t.Fatalf("expected user name %q, got %q", "foo", userInfo.Name) - } -} diff --git a/docs/auth/token/util.go b/docs/auth/token/util.go deleted file mode 100644 index d7f95be4..00000000 --- a/docs/auth/token/util.go +++ /dev/null @@ -1,58 +0,0 @@ -package token - -import ( - "encoding/base64" - "errors" - "strings" -) - -// joseBase64UrlEncode encodes the given data using the standard base64 url -// encoding format but with all trailing '=' characters omitted in accordance -// with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlEncode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// joseBase64UrlDecode decodes the given string using the standard base64 url -// decoder but first adds the appropriate number of trailing '=' characters in -// accordance with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlDecode(s string) ([]byte, error) { - switch len(s) % 4 { - case 0: - case 2: - s += "==" - case 3: - s += "=" - default: - return nil, errors.New("illegal base64url string") - } - return base64.URLEncoding.DecodeString(s) -} - -// actionSet is a special type of stringSet. -type actionSet struct { - stringSet -} - -func newActionSet(actions ...string) actionSet { - return actionSet{newStringSet(actions...)} -} - -// Contains calls StringSet.Contains() for -// either "*" or the given action string. -func (s actionSet) contains(action string) bool { - return s.stringSet.contains("*") || s.stringSet.contains(action) -} - -// contains returns true if q is found in ss. -func contains(ss []string, q string) bool { - for _, s := range ss { - if s == q { - return true - } - } - - return false -} diff --git a/docs/client/auth/api_version.go b/docs/client/auth/api_version.go deleted file mode 100644 index 7d8f1d95..00000000 --- a/docs/client/auth/api_version.go +++ /dev/null @@ -1,58 +0,0 @@ -package auth - -import ( - "net/http" - "strings" -) - -// APIVersion represents a version of an API including its -// type and version number. -type APIVersion struct { - // Type refers to the name of a specific API specification - // such as "registry" - Type string - - // Version is the version of the API specification implemented, - // This may omit the revision number and only include - // the major and minor version, such as "2.0" - Version string -} - -// String returns the string formatted API Version -func (v APIVersion) String() string { - return v.Type + "/" + v.Version -} - -// APIVersions gets the API versions out of an HTTP response using the provided -// version header as the key for the HTTP header. -func APIVersions(resp *http.Response, versionHeader string) []APIVersion { - versions := []APIVersion{} - if versionHeader != "" { - for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { - for _, version := range strings.Fields(supportedVersions) { - versions = append(versions, ParseAPIVersion(version)) - } - } - } - return versions -} - -// ParseAPIVersion parses an API version string into an APIVersion -// Format (Expected, not enforced): -// API version string = '/' -// API type = [a-z][a-z0-9]* -// API version = [0-9]+(\.[0-9]+)? -// TODO(dmcgowan): Enforce format, add error condition, remove unknown type -func ParseAPIVersion(versionStr string) APIVersion { - idx := strings.IndexRune(versionStr, '/') - if idx == -1 { - return APIVersion{ - Type: "unknown", - Version: versionStr, - } - } - return APIVersion{ - Type: strings.ToLower(versionStr[:idx]), - Version: versionStr[idx+1:], - } -} diff --git a/docs/client/auth/authchallenge.go b/docs/client/auth/authchallenge.go deleted file mode 100644 index c8cd83bb..00000000 --- a/docs/client/auth/authchallenge.go +++ /dev/null @@ -1,220 +0,0 @@ -package auth - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -// Challenge carries information from a WWW-Authenticate response header. -// See RFC 2617. -type Challenge struct { - // Scheme is the auth-scheme according to RFC 2617 - Scheme string - - // Parameters are the auth-params according to RFC 2617 - Parameters map[string]string -} - -// ChallengeManager manages the challenges for endpoints. -// The challenges are pulled out of HTTP responses. Only -// responses which expect challenges should be added to -// the manager, since a non-unauthorized request will be -// viewed as not requiring challenges. -type ChallengeManager interface { - // GetChallenges returns the challenges for the given - // endpoint URL. - GetChallenges(endpoint url.URL) ([]Challenge, error) - - // AddResponse adds the response to the challenge - // manager. The challenges will be parsed out of - // the WWW-Authenicate headers and added to the - // URL which was produced the response. If the - // response was authorized, any challenges for the - // endpoint will be cleared. - AddResponse(resp *http.Response) error -} - -// NewSimpleChallengeManager returns an instance of -// ChallengeManger which only maps endpoints to challenges -// based on the responses which have been added the -// manager. The simple manager will make no attempt to -// perform requests on the endpoints or cache the responses -// to a backend. -func NewSimpleChallengeManager() ChallengeManager { - return simpleChallengeManager{} -} - -type simpleChallengeManager map[string][]Challenge - -func (m simpleChallengeManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { - endpoint.Host = strings.ToLower(endpoint.Host) - - challenges := m[endpoint.String()] - return challenges, nil -} - -func (m simpleChallengeManager) AddResponse(resp *http.Response) error { - challenges := ResponseChallenges(resp) - if resp.Request == nil { - return fmt.Errorf("missing request reference") - } - urlCopy := url.URL{ - Path: resp.Request.URL.Path, - Host: strings.ToLower(resp.Request.URL.Host), - Scheme: resp.Request.URL.Scheme, - } - m[urlCopy.String()] = challenges - return nil -} - -// Octet types from RFC 2616. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// ResponseChallenges returns a list of authorization challenges -// for the given http Response. Challenges are only checked if -// the response status code was a 401. -func ResponseChallenges(resp *http.Response) []Challenge { - if resp.StatusCode == http.StatusUnauthorized { - // Parse the WWW-Authenticate Header and store the challenges - // on this endpoint object. - return parseAuthHeader(resp.Header) - } - - return nil -} - -func parseAuthHeader(header http.Header) []Challenge { - challenges := []Challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/docs/client/auth/authchallenge_test.go b/docs/client/auth/authchallenge_test.go deleted file mode 100644 index 953ed5b4..00000000 --- a/docs/client/auth/authchallenge_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package auth - -import ( - "fmt" - "net/http" - "net/url" - "strings" - "testing" -) - -func TestAuthChallengeParse(t *testing.T) { - header := http.Header{} - header.Add("WWW-Authenticate", `Bearer realm="https://auth.example.com/token",service="registry.example.com",other=fun,slashed="he\"\l\lo"`) - - challenges := parseAuthHeader(header) - if len(challenges) != 1 { - t.Fatalf("Unexpected number of auth challenges: %d, expected 1", len(challenges)) - } - challenge := challenges[0] - - if expected := "bearer"; challenge.Scheme != expected { - t.Fatalf("Unexpected scheme: %s, expected: %s", challenge.Scheme, expected) - } - - if expected := "https://auth.example.com/token"; challenge.Parameters["realm"] != expected { - t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["realm"], expected) - } - - if expected := "registry.example.com"; challenge.Parameters["service"] != expected { - t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["service"], expected) - } - - if expected := "fun"; challenge.Parameters["other"] != expected { - t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["other"], expected) - } - - if expected := "he\"llo"; challenge.Parameters["slashed"] != expected { - t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected) - } - -} - -func TestAuthChallengeNormalization(t *testing.T) { - testAuthChallengeNormalization(t, "reg.EXAMPLE.com") - testAuthChallengeNormalization(t, "bɿɒʜɔiɿ-ɿɘƚƨim-ƚol-ɒ-ƨʞnɒʜƚ.com") -} - -func testAuthChallengeNormalization(t *testing.T, host string) { - - scm := NewSimpleChallengeManager() - - url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) - if err != nil { - t.Fatal(err) - } - - resp := &http.Response{ - Request: &http.Request{ - URL: url, - }, - Header: make(http.Header), - StatusCode: http.StatusUnauthorized, - } - resp.Header.Add("WWW-Authenticate", fmt.Sprintf("Bearer realm=\"https://%s/token\",service=\"registry.example.com\"", host)) - - err = scm.AddResponse(resp) - if err != nil { - t.Fatal(err) - } - - lowered := *url - lowered.Host = strings.ToLower(lowered.Host) - c, err := scm.GetChallenges(lowered) - if err != nil { - t.Fatal(err) - } - - if len(c) == 0 { - t.Fatal("Expected challenge for lower-cased-host URL") - } -} diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go deleted file mode 100644 index f3497b17..00000000 --- a/docs/client/auth/session.go +++ /dev/null @@ -1,480 +0,0 @@ -package auth - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/transport" -) - -var ( - // ErrNoBasicAuthCredentials is returned if a request can't be authorized with - // basic auth due to lack of credentials. - ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") - - // ErrNoToken is returned if a request is successful but the body does not - // contain an authorization token. - ErrNoToken = errors.New("authorization server did not include a token in the response") -) - -const defaultClientID = "registry-client" - -// AuthenticationHandler is an interface for authorizing a request from -// params from a "WWW-Authenicate" header for a single scheme. -type AuthenticationHandler interface { - // Scheme returns the scheme as expected from the "WWW-Authenicate" header. - Scheme() string - - // AuthorizeRequest adds the authorization header to a request (if needed) - // using the parameters from "WWW-Authenticate" method. The parameters - // values depend on the scheme. - AuthorizeRequest(req *http.Request, params map[string]string) error -} - -// CredentialStore is an interface for getting credentials for -// a given URL -type CredentialStore interface { - // Basic returns basic auth for the given URL - Basic(*url.URL) (string, string) - - // RefreshToken returns a refresh token for the - // given URL and service - RefreshToken(*url.URL, string) string - - // SetRefreshToken sets the refresh token if none - // is provided for the given url and service - SetRefreshToken(realm *url.URL, service, token string) -} - -// NewAuthorizer creates an authorizer which can handle multiple authentication -// schemes. The handlers are tried in order, the higher priority authentication -// methods should be first. The challengeMap holds a list of challenges for -// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). -func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler) transport.RequestModifier { - return &endpointAuthorizer{ - challenges: manager, - handlers: handlers, - } -} - -type endpointAuthorizer struct { - challenges ChallengeManager - handlers []AuthenticationHandler - transport http.RoundTripper -} - -func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { - v2Root := strings.Index(req.URL.Path, "/v2/") - if v2Root == -1 { - return nil - } - - ping := url.URL{ - Host: req.URL.Host, - Scheme: req.URL.Scheme, - Path: req.URL.Path[:v2Root+4], - } - - challenges, err := ea.challenges.GetChallenges(ping) - if err != nil { - return err - } - - if len(challenges) > 0 { - for _, handler := range ea.handlers { - for _, challenge := range challenges { - if challenge.Scheme != handler.Scheme() { - continue - } - if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil { - return err - } - } - } - } - - return nil -} - -// This is the minimum duration a token can last (in seconds). -// A token must not live less than 60 seconds because older versions -// of the Docker client didn't read their expiration from the token -// response and assumed 60 seconds. So to remain compatible with -// those implementations, a token must live at least this long. -const minimumTokenLifetimeSeconds = 60 - -// Private interface for time used by this package to enable tests to provide their own implementation. -type clock interface { - Now() time.Time -} - -type tokenHandler struct { - header http.Header - creds CredentialStore - transport http.RoundTripper - clock clock - - offlineAccess bool - forceOAuth bool - clientID string - scopes []Scope - - tokenLock sync.Mutex - tokenCache string - tokenExpiration time.Time -} - -// Scope is a type which is serializable to a string -// using the allow scope grammar. -type Scope interface { - String() string -} - -// RepositoryScope represents a token scope for access -// to a repository. -type RepositoryScope struct { - Repository string - Actions []string -} - -// String returns the string representation of the repository -// using the scope grammar -func (rs RepositoryScope) String() string { - return fmt.Sprintf("repository:%s:%s", rs.Repository, strings.Join(rs.Actions, ",")) -} - -// TokenHandlerOptions is used to configure a new token handler -type TokenHandlerOptions struct { - Transport http.RoundTripper - Credentials CredentialStore - - OfflineAccess bool - ForceOAuth bool - ClientID string - Scopes []Scope -} - -// An implementation of clock for providing real time data. -type realClock struct{} - -// Now implements clock -func (realClock) Now() time.Time { return time.Now() } - -// NewTokenHandler creates a new AuthenicationHandler which supports -// fetching tokens from a remote token server. -func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { - // Create options... - return NewTokenHandlerWithOptions(TokenHandlerOptions{ - Transport: transport, - Credentials: creds, - Scopes: []Scope{ - RepositoryScope{ - Repository: scope, - Actions: actions, - }, - }, - }) -} - -// NewTokenHandlerWithOptions creates a new token handler using the provided -// options structure. -func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { - handler := &tokenHandler{ - transport: options.Transport, - creds: options.Credentials, - offlineAccess: options.OfflineAccess, - forceOAuth: options.ForceOAuth, - clientID: options.ClientID, - scopes: options.Scopes, - clock: realClock{}, - } - - return handler -} - -func (th *tokenHandler) client() *http.Client { - return &http.Client{ - Transport: th.transport, - Timeout: 15 * time.Second, - } -} - -func (th *tokenHandler) Scheme() string { - return "bearer" -} - -func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - var additionalScopes []string - if fromParam := req.URL.Query().Get("from"); fromParam != "" { - additionalScopes = append(additionalScopes, RepositoryScope{ - Repository: fromParam, - Actions: []string{"pull"}, - }.String()) - } - - token, err := th.getToken(params, additionalScopes...) - if err != nil { - return err - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) - - return nil -} - -func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { - th.tokenLock.Lock() - defer th.tokenLock.Unlock() - scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) - for _, scope := range th.scopes { - scopes = append(scopes, scope.String()) - } - var addedScopes bool - for _, scope := range additionalScopes { - scopes = append(scopes, scope) - addedScopes = true - } - - now := th.clock.Now() - if now.After(th.tokenExpiration) || addedScopes { - token, expiration, err := th.fetchToken(params, scopes) - if err != nil { - return "", err - } - - // do not update cache for added scope tokens - if !addedScopes { - th.tokenCache = token - th.tokenExpiration = expiration - } - - return token, nil - } - - return th.tokenCache, nil -} - -type postTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` -} - -func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { - form := url.Values{} - form.Set("scope", strings.Join(scopes, " ")) - form.Set("service", service) - - clientID := th.clientID - if clientID == "" { - // Use default client, this is a required field - clientID = defaultClientID - } - form.Set("client_id", clientID) - - if refreshToken != "" { - form.Set("grant_type", "refresh_token") - form.Set("refresh_token", refreshToken) - } else if th.creds != nil { - form.Set("grant_type", "password") - username, password := th.creds.Basic(realm) - form.Set("username", username) - form.Set("password", password) - - // attempt to get a refresh token - form.Set("access_type", "offline") - } else { - // refuse to do oauth without a grant type - return "", time.Time{}, fmt.Errorf("no supported grant type") - } - - resp, err := th.client().PostForm(realm.String(), form) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr postTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -type getTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` -} - -func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { - - req, err := http.NewRequest("GET", realm.String(), nil) - if err != nil { - return "", time.Time{}, err - } - - reqParams := req.URL.Query() - - if service != "" { - reqParams.Add("service", service) - } - - for _, scope := range scopes { - reqParams.Add("scope", scope) - } - - if th.offlineAccess { - reqParams.Add("offline_token", "true") - clientID := th.clientID - if clientID == "" { - clientID = defaultClientID - } - reqParams.Add("client_id", clientID) - } - - if th.creds != nil { - username, password := th.creds.Basic(realm) - if username != "" && password != "" { - reqParams.Add("account", username) - req.SetBasicAuth(username, password) - } - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := th.client().Do(req) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr getTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && th.creds != nil { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - // `access_token` is equivalent to `token` and if both are specified - // the choice is undefined. Canonicalize `access_token` by sticking - // things in `token`. - if tr.AccessToken != "" { - tr.Token = tr.AccessToken - } - - if tr.Token == "" { - return "", time.Time{}, ErrNoToken - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { - realm, ok := params["realm"] - if !ok { - return "", time.Time{}, errors.New("no realm specified for token auth challenge") - } - - // TODO(dmcgowan): Handle empty scheme and relative realm - realmURL, err := url.Parse(realm) - if err != nil { - return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) - } - - service := params["service"] - - var refreshToken string - - if th.creds != nil { - refreshToken = th.creds.RefreshToken(realmURL, service) - } - - if refreshToken != "" || th.forceOAuth { - return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) - } - - return th.fetchTokenWithBasicAuth(realmURL, service, scopes) -} - -type basicHandler struct { - creds CredentialStore -} - -// NewBasicHandler creaters a new authentiation handler which adds -// basic authentication credentials to a request. -func NewBasicHandler(creds CredentialStore) AuthenticationHandler { - return &basicHandler{ - creds: creds, - } -} - -func (*basicHandler) Scheme() string { - return "basic" -} - -func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - if bh.creds != nil { - username, password := bh.creds.Basic(req.URL) - if username != "" && password != "" { - req.SetBasicAuth(username, password) - return nil - } - } - return ErrNoBasicAuthCredentials -} diff --git a/docs/client/auth/session_test.go b/docs/client/auth/session_test.go deleted file mode 100644 index 96c62990..00000000 --- a/docs/client/auth/session_test.go +++ /dev/null @@ -1,787 +0,0 @@ -package auth - -import ( - "encoding/base64" - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "testing" - "time" - - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/testutil" -) - -// An implementation of clock for providing fake time data. -type fakeClock struct { - current time.Time -} - -// Now implements clock -func (fc *fakeClock) Now() time.Time { return fc.current } - -func testServer(rrm testutil.RequestResponseMap) (string, func()) { - h := testutil.NewHandler(rrm) - s := httptest.NewServer(h) - return s.URL, s.Close -} - -type testAuthenticationWrapper struct { - headers http.Header - authCheck func(string) bool - next http.Handler -} - -func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - auth := r.Header.Get("Authorization") - if auth == "" || !w.authCheck(auth) { - h := rw.Header() - for k, values := range w.headers { - h[k] = values - } - rw.WriteHeader(http.StatusUnauthorized) - return - } - w.next.ServeHTTP(rw, r) -} - -func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) { - h := testutil.NewHandler(rrm) - wrapper := &testAuthenticationWrapper{ - - headers: http.Header(map[string][]string{ - "X-API-Version": {"registry/2.0"}, - "X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"}, - "WWW-Authenticate": {authenticate}, - }), - authCheck: authCheck, - next: h, - } - - s := httptest.NewServer(wrapper) - return s.URL, s.Close -} - -// ping pings the provided endpoint to determine its required authorization challenges. -// If a version header is provided, the versions will be returned. -func ping(manager ChallengeManager, endpoint, versionHeader string) ([]APIVersion, error) { - resp, err := http.Get(endpoint) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := manager.AddResponse(resp); err != nil { - return nil, err - } - - return APIVersions(resp, versionHeader), err -} - -type testCredentialStore struct { - username string - password string - refreshTokens map[string]string -} - -func (tcs *testCredentialStore) Basic(*url.URL) (string, string) { - return tcs.username, tcs.password -} - -func (tcs *testCredentialStore) RefreshToken(u *url.URL, service string) string { - return tcs.refreshTokens[service] -} - -func (tcs *testCredentialStore) SetRefreshToken(u *url.URL, service string, token string) { - if tcs.refreshTokens != nil { - tcs.refreshTokens[service] = token - } -} - -func TestEndpointAuthorizeToken(t *testing.T) { - service := "localhost.localdomain" - repo1 := "some/registry" - repo2 := "other/registry" - scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) - scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) - tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope1), service), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(`{"token":"statictoken"}`), - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope2), service), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(`{"token":"badtoken"}`), - }, - }, - }) - te, tc := testServer(tokenMap) - defer tc() - - m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - }) - - authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) - validCheck := func(a string) bool { - return a == "Bearer statictoken" - } - e, c := testServerWithAuth(m, authenicate, validCheck) - defer c() - - challengeManager1 := NewSimpleChallengeManager() - versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") - if err != nil { - t.Fatal(err) - } - if len(versions) != 1 { - t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) - } - if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { - t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) - } - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, nil, repo1, "pull", "push"))) - client := &http.Client{Transport: transport1} - - req, _ := http.NewRequest("GET", e+"/v2/hello", nil) - resp, err := client.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) - } - - e2, c2 := testServerWithAuth(m, authenicate, validCheck) - defer c2() - - challengeManager2 := NewSimpleChallengeManager() - versions, err = ping(challengeManager2, e2+"/v2/", "x-multi-api-version") - if err != nil { - t.Fatal(err) - } - if len(versions) != 3 { - t.Fatalf("Unexpected version count: %d, expected 3", len(versions)) - } - if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { - t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) - } - if check := (APIVersion{Type: "registry", Version: "2.1"}); versions[1] != check { - t.Fatalf("Unexpected api version: %#v, expected %#v", versions[1], check) - } - if check := (APIVersion{Type: "trust", Version: "1.0"}); versions[2] != check { - t.Fatalf("Unexpected api version: %#v, expected %#v", versions[2], check) - } - transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, nil, repo2, "pull", "push"))) - client2 := &http.Client{Transport: transport2} - - req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) - resp, err = client2.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - - if resp.StatusCode != http.StatusUnauthorized { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) - } -} - -func TestEndpointAuthorizeRefreshToken(t *testing.T) { - service := "localhost.localdomain" - repo1 := "some/registry" - repo2 := "other/registry" - scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) - scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) - refreshToken1 := "0123456790abcdef" - refreshToken2 := "0123456790fedcba" - tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "POST", - Route: "/token", - Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope1), service)), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken1)), - }, - }, - { - // In the future this test may fail and require using basic auth to get a different refresh token - Request: testutil.Request{ - Method: "POST", - Route: "/token", - Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope2), service)), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken2)), - }, - }, - { - Request: testutil.Request{ - Method: "POST", - Route: "/token", - Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken2, url.QueryEscape(scope2), service)), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(`{"access_token":"badtoken","refresh_token":"%s"}`), - }, - }, - }) - te, tc := testServer(tokenMap) - defer tc() - - m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - }) - - authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) - validCheck := func(a string) bool { - return a == "Bearer statictoken" - } - e, c := testServerWithAuth(m, authenicate, validCheck) - defer c() - - challengeManager1 := NewSimpleChallengeManager() - versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") - if err != nil { - t.Fatal(err) - } - if len(versions) != 1 { - t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) - } - if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { - t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) - } - creds := &testCredentialStore{ - refreshTokens: map[string]string{ - service: refreshToken1, - }, - } - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, creds, repo1, "pull", "push"))) - client := &http.Client{Transport: transport1} - - req, _ := http.NewRequest("GET", e+"/v2/hello", nil) - resp, err := client.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) - } - - // Try with refresh token setting - e2, c2 := testServerWithAuth(m, authenicate, validCheck) - defer c2() - - challengeManager2 := NewSimpleChallengeManager() - versions, err = ping(challengeManager2, e2+"/v2/", "x-api-version") - if err != nil { - t.Fatal(err) - } - if len(versions) != 1 { - t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) - } - if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { - t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) - } - - transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, creds, repo2, "pull", "push"))) - client2 := &http.Client{Transport: transport2} - - req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) - resp, err = client2.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) - } - - if creds.refreshTokens[service] != refreshToken2 { - t.Fatalf("Refresh token not set after change") - } - - // Try with bad token - e3, c3 := testServerWithAuth(m, authenicate, validCheck) - defer c3() - - challengeManager3 := NewSimpleChallengeManager() - versions, err = ping(challengeManager3, e3+"/v2/", "x-api-version") - if err != nil { - t.Fatal(err) - } - if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { - t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) - } - - transport3 := transport.NewTransport(nil, NewAuthorizer(challengeManager3, NewTokenHandler(nil, creds, repo2, "pull", "push"))) - client3 := &http.Client{Transport: transport3} - - req, _ = http.NewRequest("GET", e3+"/v2/hello", nil) - resp, err = client3.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - - if resp.StatusCode != http.StatusUnauthorized { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) - } -} - -func basicAuth(username, password string) string { - auth := username + ":" + password - return base64.StdEncoding.EncodeToString([]byte(auth)) -} - -func TestEndpointAuthorizeTokenBasic(t *testing.T) { - service := "localhost.localdomain" - repo := "some/fun/registry" - scope := fmt.Sprintf("repository:%s:pull,push", repo) - username := "tokenuser" - password := "superSecretPa$$word" - - tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(`{"access_token":"statictoken"}`), - }, - }, - }) - - authenicate1 := fmt.Sprintf("Basic realm=localhost") - basicCheck := func(a string) bool { - return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) - } - te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) - defer tc() - - m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - }) - - authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) - bearerCheck := func(a string) bool { - return a == "Bearer statictoken" - } - e, c := testServerWithAuth(m, authenicate2, bearerCheck) - defer c() - - creds := &testCredentialStore{ - username: username, - password: password, - } - - challengeManager := NewSimpleChallengeManager() - _, err := ping(challengeManager, e+"/v2/", "") - if err != nil { - t.Fatal(err) - } - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewTokenHandler(nil, creds, repo, "pull", "push"), NewBasicHandler(creds))) - client := &http.Client{Transport: transport1} - - req, _ := http.NewRequest("GET", e+"/v2/hello", nil) - resp, err := client.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) - } -} - -func TestEndpointAuthorizeTokenBasicWithExpiresIn(t *testing.T) { - service := "localhost.localdomain" - repo := "some/fun/registry" - scope := fmt.Sprintf("repository:%s:pull,push", repo) - username := "tokenuser" - password := "superSecretPa$$word" - - tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(`{"token":"statictoken", "expires_in": 3001}`), - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(`{"access_token":"statictoken", "expires_in": 3001}`), - }, - }, - }) - - authenicate1 := fmt.Sprintf("Basic realm=localhost") - tokenExchanges := 0 - basicCheck := func(a string) bool { - tokenExchanges = tokenExchanges + 1 - return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) - } - te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) - defer tc() - - m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - }) - - authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) - bearerCheck := func(a string) bool { - return a == "Bearer statictoken" - } - e, c := testServerWithAuth(m, authenicate2, bearerCheck) - defer c() - - creds := &testCredentialStore{ - username: username, - password: password, - } - - challengeManager := NewSimpleChallengeManager() - _, err := ping(challengeManager, e+"/v2/", "") - if err != nil { - t.Fatal(err) - } - clock := &fakeClock{current: time.Now()} - options := TokenHandlerOptions{ - Transport: nil, - Credentials: creds, - Scopes: []Scope{ - RepositoryScope{ - Repository: repo, - Actions: []string{"pull", "push"}, - }, - }, - } - tHandler := NewTokenHandlerWithOptions(options) - tHandler.(*tokenHandler).clock = clock - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, tHandler, NewBasicHandler(creds))) - client := &http.Client{Transport: transport1} - - // First call should result in a token exchange - // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. - timeIncrement := 1000 * time.Second - for i := 0; i < 4; i++ { - req, _ := http.NewRequest("GET", e+"/v2/hello", nil) - resp, err := client.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) - } - if tokenExchanges != 1 { - t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) - } - clock.current = clock.current.Add(timeIncrement) - } - - // After we've exceeded the expiration, we should see a second token exchange. - req, _ := http.NewRequest("GET", e+"/v2/hello", nil) - resp, err := client.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) - } - if tokenExchanges != 2 { - t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) - } -} - -func TestEndpointAuthorizeTokenBasicWithExpiresInAndIssuedAt(t *testing.T) { - service := "localhost.localdomain" - repo := "some/fun/registry" - scope := fmt.Sprintf("repository:%s:pull,push", repo) - username := "tokenuser" - password := "superSecretPa$$word" - - // This test sets things up such that the token was issued one increment - // earlier than its sibling in TestEndpointAuthorizeTokenBasicWithExpiresIn. - // This will mean that the token expires after 3 increments instead of 4. - clock := &fakeClock{current: time.Now()} - timeIncrement := 1000 * time.Second - firstIssuedAt := clock.Now() - clock.current = clock.current.Add(timeIncrement) - secondIssuedAt := clock.current.Add(2 * timeIncrement) - tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(`{"token":"statictoken", "issued_at": "` + firstIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: []byte(`{"access_token":"statictoken", "issued_at": "` + secondIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), - }, - }, - }) - - authenicate1 := fmt.Sprintf("Basic realm=localhost") - tokenExchanges := 0 - basicCheck := func(a string) bool { - tokenExchanges = tokenExchanges + 1 - return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) - } - te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) - defer tc() - - m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - }) - - authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) - bearerCheck := func(a string) bool { - return a == "Bearer statictoken" - } - e, c := testServerWithAuth(m, authenicate2, bearerCheck) - defer c() - - creds := &testCredentialStore{ - username: username, - password: password, - } - - challengeManager := NewSimpleChallengeManager() - _, err := ping(challengeManager, e+"/v2/", "") - if err != nil { - t.Fatal(err) - } - - options := TokenHandlerOptions{ - Transport: nil, - Credentials: creds, - Scopes: []Scope{ - RepositoryScope{ - Repository: repo, - Actions: []string{"pull", "push"}, - }, - }, - } - tHandler := NewTokenHandlerWithOptions(options) - tHandler.(*tokenHandler).clock = clock - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, tHandler, NewBasicHandler(creds))) - client := &http.Client{Transport: transport1} - - // First call should result in a token exchange - // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. - // We shaved one increment off of the equivalent logic in TestEndpointAuthorizeTokenBasicWithExpiresIn - // so this loop should have one fewer iteration. - for i := 0; i < 3; i++ { - req, _ := http.NewRequest("GET", e+"/v2/hello", nil) - resp, err := client.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) - } - if tokenExchanges != 1 { - t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) - } - clock.current = clock.current.Add(timeIncrement) - } - - // After we've exceeded the expiration, we should see a second token exchange. - req, _ := http.NewRequest("GET", e+"/v2/hello", nil) - resp, err := client.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) - } - if tokenExchanges != 2 { - t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) - } -} - -func TestEndpointAuthorizeBasic(t *testing.T) { - m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/hello", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - }, - }, - }) - - username := "user1" - password := "funSecretPa$$word" - authenicate := fmt.Sprintf("Basic realm=localhost") - validCheck := func(a string) bool { - return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) - } - e, c := testServerWithAuth(m, authenicate, validCheck) - defer c() - creds := &testCredentialStore{ - username: username, - password: password, - } - - challengeManager := NewSimpleChallengeManager() - _, err := ping(challengeManager, e+"/v2/", "") - if err != nil { - t.Fatal(err) - } - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewBasicHandler(creds))) - client := &http.Client{Transport: transport1} - - req, _ := http.NewRequest("GET", e+"/v2/hello", nil) - resp, err := client.Do(req) - if err != nil { - t.Fatalf("Error sending get request: %s", err) - } - - if resp.StatusCode != http.StatusAccepted { - t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) - } -} diff --git a/docs/client/blob_writer.go b/docs/client/blob_writer.go deleted file mode 100644 index e3ffcb00..00000000 --- a/docs/client/blob_writer.go +++ /dev/null @@ -1,162 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" -) - -type httpBlobUpload struct { - statter distribution.BlobStatter - client *http.Client - - uuid string - startedAt time.Time - - location string // always the last value of the location header. - offset int64 - closed bool -} - -func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { - panic("Not implemented") -} - -func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUploadUnknown - } - return HandleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { - req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) - if err != nil { - return 0, err - } - defer req.Body.Close() - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int64 - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { - req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) - if err != nil { - return 0, err - } - req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) - req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Size() int64 { - return hbu.offset -} - -func (hbu *httpBlobUpload) ID() string { - return hbu.uuid -} - -func (hbu *httpBlobUpload) StartedAt() time.Time { - return hbu.startedAt -} - -func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - // TODO(dmcgowan): Check if already finished, if so just fetch - req, err := http.NewRequest("PUT", hbu.location, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - values := req.URL.Query() - values.Set("digest", desc.Digest.String()) - req.URL.RawQuery = values.Encode() - - resp, err := hbu.client.Do(req) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if !SuccessStatus(resp.StatusCode) { - return distribution.Descriptor{}, hbu.handleErrorResponse(resp) - } - - return hbu.statter.Stat(ctx, desc.Digest) -} - -func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { - req, err := http.NewRequest("DELETE", hbu.location, nil) - if err != nil { - return err - } - resp, err := hbu.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { - return nil - } - return hbu.handleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) Close() error { - hbu.closed = true - return nil -} diff --git a/docs/client/blob_writer_test.go b/docs/client/blob_writer_test.go deleted file mode 100644 index 099dca4f..00000000 --- a/docs/client/blob_writer_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "net/http" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/testutil" -) - -// Test implements distribution.BlobWriter -var _ distribution.BlobWriter = &httpBlobUpload{} - -func TestUploadReadFrom(t *testing.T) { - _, b := newRandomBlob(64) - repo := "test/upload/readfrom" - locationPath := fmt.Sprintf("/v2/%s/uploads/testid", repo) - - m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/", - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Headers: http.Header(map[string][]string{ - "Docker-Distribution-API-Version": {"registry/2.0"}, - }), - }, - }, - // Test Valid case - { - Request: testutil.Request{ - Method: "PATCH", - Route: locationPath, - Body: b, - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, - "Location": {locationPath}, - "Range": {"0-63"}, - }), - }, - }, - // Test invalid range - { - Request: testutil.Request{ - Method: "PATCH", - Route: locationPath, - Body: b, - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, - "Location": {locationPath}, - "Range": {""}, - }), - }, - }, - // Test 404 - { - Request: testutil.Request{ - Method: "PATCH", - Route: locationPath, - Body: b, - }, - Response: testutil.Response{ - StatusCode: http.StatusNotFound, - }, - }, - // Test 400 valid json - { - Request: testutil.Request{ - Method: "PATCH", - Route: locationPath, - Body: b, - }, - Response: testutil.Response{ - StatusCode: http.StatusBadRequest, - Body: []byte(` - { "errors": - [ - { - "code": "BLOB_UPLOAD_INVALID", - "message": "blob upload invalid", - "detail": "more detail" - } - ] - } `), - }, - }, - // Test 400 invalid json - { - Request: testutil.Request{ - Method: "PATCH", - Route: locationPath, - Body: b, - }, - Response: testutil.Response{ - StatusCode: http.StatusBadRequest, - Body: []byte("something bad happened"), - }, - }, - // Test 500 - { - Request: testutil.Request{ - Method: "PATCH", - Route: locationPath, - Body: b, - }, - Response: testutil.Response{ - StatusCode: http.StatusInternalServerError, - }, - }, - }) - - e, c := testServer(m) - defer c() - - blobUpload := &httpBlobUpload{ - client: &http.Client{}, - } - - // Valid case - blobUpload.location = e + locationPath - n, err := blobUpload.ReadFrom(bytes.NewReader(b)) - if err != nil { - t.Fatalf("Error calling ReadFrom: %s", err) - } - if n != 64 { - t.Fatalf("Wrong length returned from ReadFrom: %d, expected 64", n) - } - - // Bad range - blobUpload.location = e + locationPath - _, err = blobUpload.ReadFrom(bytes.NewReader(b)) - if err == nil { - t.Fatalf("Expected error when bad range received") - } - - // 404 - blobUpload.location = e + locationPath - _, err = blobUpload.ReadFrom(bytes.NewReader(b)) - if err == nil { - t.Fatalf("Expected error when not found") - } - if err != distribution.ErrBlobUploadUnknown { - t.Fatalf("Wrong error thrown: %s, expected %s", err, distribution.ErrBlobUploadUnknown) - } - - // 400 valid json - blobUpload.location = e + locationPath - _, err = blobUpload.ReadFrom(bytes.NewReader(b)) - if err == nil { - t.Fatalf("Expected error when not found") - } - if uploadErr, ok := err.(errcode.Errors); !ok { - t.Fatalf("Wrong error type %T: %s", err, err) - } else if len(uploadErr) != 1 { - t.Fatalf("Unexpected number of errors: %d, expected 1", len(uploadErr)) - } else { - v2Err, ok := uploadErr[0].(errcode.Error) - if !ok { - t.Fatalf("Not an 'Error' type: %#v", uploadErr[0]) - } - if v2Err.Code != v2.ErrorCodeBlobUploadInvalid { - t.Fatalf("Unexpected error code: %s, expected %d", v2Err.Code.String(), v2.ErrorCodeBlobUploadInvalid) - } - if expected := "blob upload invalid"; v2Err.Message != expected { - t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Message, expected) - } - if expected := "more detail"; v2Err.Detail.(string) != expected { - t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Detail.(string), expected) - } - } - - // 400 invalid json - blobUpload.location = e + locationPath - _, err = blobUpload.ReadFrom(bytes.NewReader(b)) - if err == nil { - t.Fatalf("Expected error when not found") - } - if uploadErr, ok := err.(*UnexpectedHTTPResponseError); !ok { - t.Fatalf("Wrong error type %T: %s", err, err) - } else { - respStr := string(uploadErr.Response) - if expected := "something bad happened"; respStr != expected { - t.Fatalf("Unexpected response string: %s, expected: %s", respStr, expected) - } - } - - // 500 - blobUpload.location = e + locationPath - _, err = blobUpload.ReadFrom(bytes.NewReader(b)) - if err == nil { - t.Fatalf("Expected error when not found") - } - if uploadErr, ok := err.(*UnexpectedHTTPStatusError); !ok { - t.Fatalf("Wrong error type %T: %s", err, err) - } else if expected := "500 " + http.StatusText(http.StatusInternalServerError); uploadErr.Status != expected { - t.Fatalf("Unexpected response status: %s, expected %s", uploadErr.Status, expected) - } -} diff --git a/docs/client/errors.go b/docs/client/errors.go deleted file mode 100644 index f73e3c23..00000000 --- a/docs/client/errors.go +++ /dev/null @@ -1,107 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty -// errcode.Errors slice. -var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") - -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is -// returned when making a registry api call. -type UnexpectedHTTPStatusError struct { - Status string -} - -func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) -} - -// UnexpectedHTTPResponseError is returned when an expected HTTP status code -// is returned, but the content was unexpected and failed to be parsed. -type UnexpectedHTTPResponseError struct { - ParseErr error - StatusCode int - Response []byte -} - -func (e *UnexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) -} - -func parseHTTPErrorResponse(statusCode int, r io.Reader) error { - var errors errcode.Errors - body, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - // For backward compatibility, handle irregularly formatted - // messages that contain a "details" field. - var detailsErr struct { - Details string `json:"details"` - } - err = json.Unmarshal(body, &detailsErr) - if err == nil && detailsErr.Details != "" { - switch statusCode { - case http.StatusUnauthorized: - return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) - case http.StatusTooManyRequests: - return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) - default: - return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) - } - } - - if err := json.Unmarshal(body, &errors); err != nil { - return &UnexpectedHTTPResponseError{ - ParseErr: err, - StatusCode: statusCode, - Response: body, - } - } - - if len(errors) == 0 { - // If there was no error specified in the body, return - // UnexpectedHTTPResponseError. - return &UnexpectedHTTPResponseError{ - ParseErr: ErrNoErrorsInBody, - StatusCode: statusCode, - Response: body, - } - } - - return errors -} - -// HandleErrorResponse returns error parsed from HTTP response for an -// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An -// UnexpectedHTTPStatusError returned for response code outside of expected -// range. -func HandleErrorResponse(resp *http.Response) error { - if resp.StatusCode == 401 { - err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) - if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { - return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) - } - return err - } - if resp.StatusCode >= 400 && resp.StatusCode < 500 { - return parseHTTPErrorResponse(resp.StatusCode, resp.Body) - } - return &UnexpectedHTTPStatusError{Status: resp.Status} -} - -// SuccessStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func SuccessStatus(status int) bool { - return status >= 200 && status <= 399 -} diff --git a/docs/client/errors_test.go b/docs/client/errors_test.go deleted file mode 100644 index ca9dddd1..00000000 --- a/docs/client/errors_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package client - -import ( - "bytes" - "io" - "net/http" - "strings" - "testing" -) - -type nopCloser struct { - io.Reader -} - -func (nopCloser) Close() error { return nil } - -func TestHandleErrorResponse401ValidBody(t *testing.T) { - json := "{\"errors\":[{\"code\":\"UNAUTHORIZED\",\"message\":\"action requires authentication\"}]}" - response := &http.Response{ - Status: "401 Unauthorized", - StatusCode: 401, - Body: nopCloser{bytes.NewBufferString(json)}, - } - err := HandleErrorResponse(response) - - expectedMsg := "unauthorized: action requires authentication" - if !strings.Contains(err.Error(), expectedMsg) { - t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) - } -} - -func TestHandleErrorResponse401WithInvalidBody(t *testing.T) { - json := "{invalid json}" - response := &http.Response{ - Status: "401 Unauthorized", - StatusCode: 401, - Body: nopCloser{bytes.NewBufferString(json)}, - } - err := HandleErrorResponse(response) - - expectedMsg := "unauthorized: authentication required" - if !strings.Contains(err.Error(), expectedMsg) { - t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) - } -} - -func TestHandleErrorResponseExpectedStatusCode400ValidBody(t *testing.T) { - json := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest does not match\"}]}" - response := &http.Response{ - Status: "400 Bad Request", - StatusCode: 400, - Body: nopCloser{bytes.NewBufferString(json)}, - } - err := HandleErrorResponse(response) - - expectedMsg := "digest invalid: provided digest does not match" - if !strings.Contains(err.Error(), expectedMsg) { - t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) - } -} - -func TestHandleErrorResponseExpectedStatusCode404EmptyErrorSlice(t *testing.T) { - json := `{"randomkey": "randomvalue"}` - response := &http.Response{ - Status: "404 Not Found", - StatusCode: 404, - Body: nopCloser{bytes.NewBufferString(json)}, - } - err := HandleErrorResponse(response) - - expectedMsg := `error parsing HTTP 404 response body: no error details found in HTTP response body: "{\"randomkey\": \"randomvalue\"}"` - if !strings.Contains(err.Error(), expectedMsg) { - t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) - } -} - -func TestHandleErrorResponseExpectedStatusCode404InvalidBody(t *testing.T) { - json := "{invalid json}" - response := &http.Response{ - Status: "404 Not Found", - StatusCode: 404, - Body: nopCloser{bytes.NewBufferString(json)}, - } - err := HandleErrorResponse(response) - - expectedMsg := "error parsing HTTP 404 response body: invalid character 'i' looking for beginning of object key string: \"{invalid json}\"" - if !strings.Contains(err.Error(), expectedMsg) { - t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) - } -} - -func TestHandleErrorResponseUnexpectedStatusCode501(t *testing.T) { - response := &http.Response{ - Status: "501 Not Implemented", - StatusCode: 501, - Body: nopCloser{bytes.NewBufferString("{\"Error Encountered\" : \"Function not implemented.\"}")}, - } - err := HandleErrorResponse(response) - - expectedMsg := "received unexpected HTTP status: 501 Not Implemented" - if !strings.Contains(err.Error(), expectedMsg) { - t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) - } -} diff --git a/docs/client/repository.go b/docs/client/repository.go deleted file mode 100644 index 97312556..00000000 --- a/docs/client/repository.go +++ /dev/null @@ -1,863 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/cache/memory" -) - -// Registry provides an interface for calling Repositories, which returns a catalog of repositories. -type Registry interface { - Repositories(ctx context.Context, repos []string, last string) (n int, err error) -} - -// checkHTTPRedirect is a callback that can manipulate redirected HTTP -// requests. It is used to preserve Accept and Range headers. -func checkHTTPRedirect(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - - if len(via) > 0 { - for headerName, headerVals := range via[0].Header { - if headerName != "Accept" && headerName != "Range" { - continue - } - for _, val := range headerVals { - // Don't add to redirected request if redirected - // request already has a header with the same - // name and value. - hasValue := false - for _, existingVal := range req.Header[headerName] { - if existingVal == val { - hasValue = true - break - } - } - if !hasValue { - req.Header.Add(headerName, val) - } - } - } - } - - return nil -} - -// NewRegistry creates a registry namespace which can be used to get a listing of repositories -func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, - CheckRedirect: checkHTTPRedirect, - } - - return ®istry{ - client: client, - ub: ub, - context: ctx, - }, nil -} - -type registry struct { - client *http.Client - ub *v2.URLBuilder - context context.Context -} - -// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size -// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there -// are no more entries -func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { - var numFilled int - var returnErr error - - values := buildCatalogValues(len(entries), last) - u, err := r.ub.BuildCatalogURL(values) - if err != nil { - return 0, err - } - - resp, err := r.client.Get(u) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - var ctlg struct { - Repositories []string `json:"repositories"` - } - decoder := json.NewDecoder(resp.Body) - - if err := decoder.Decode(&ctlg); err != nil { - return 0, err - } - - for cnt := range ctlg.Repositories { - entries[cnt] = ctlg.Repositories[cnt] - } - numFilled = len(ctlg.Repositories) - - link := resp.Header.Get("Link") - if link == "" { - returnErr = io.EOF - } - } else { - return 0, HandleErrorResponse(resp) - } - - return numFilled, returnErr -} - -// NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - CheckRedirect: checkHTTPRedirect, - // TODO(dmcgowan): create cookie jar - } - - return &repository{ - client: client, - ub: ub, - name: name, - context: ctx, - }, nil -} - -type repository struct { - client *http.Client - ub *v2.URLBuilder - context context.Context - name reference.Named -} - -func (r *repository) Named() reference.Named { - return r.name -} - -func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { - statter := &blobStatter{ - name: r.name, - ub: r.ub, - client: r.client, - } - return &blobs{ - name: r.name, - ub: r.ub, - client: r.client, - statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), - } -} - -func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - // todo(richardscothern): options should be sent over the wire - return &manifests{ - name: r.name, - ub: r.ub, - client: r.client, - etags: make(map[string]string), - }, nil -} - -func (r *repository) Tags(ctx context.Context) distribution.TagService { - return &tags{ - client: r.client, - ub: r.ub, - context: r.context, - name: r.Named(), - } -} - -// tags implements remote tagging operations. -type tags struct { - client *http.Client - ub *v2.URLBuilder - context context.Context - name reference.Named -} - -// All returns all tags -func (t *tags) All(ctx context.Context) ([]string, error) { - var tags []string - - u, err := t.ub.BuildTagsURL(t.name) - if err != nil { - return tags, err - } - - for { - resp, err := t.client.Get(u) - if err != nil { - return tags, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return tags, err - } - - tagsResponse := struct { - Tags []string `json:"tags"` - }{} - if err := json.Unmarshal(b, &tagsResponse); err != nil { - return tags, err - } - tags = append(tags, tagsResponse.Tags...) - if link := resp.Header.Get("Link"); link != "" { - u = strings.Trim(strings.Split(link, ";")[0], "<>") - } else { - return tags, nil - } - } else { - return tags, HandleErrorResponse(resp) - } - } -} - -func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { - desc := distribution.Descriptor{} - headers := response.Header - - ctHeader := headers.Get("Content-Type") - if ctHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") - } - desc.MediaType = ctHeader - - digestHeader := headers.Get("Docker-Content-Digest") - if digestHeader == "" { - bytes, err := ioutil.ReadAll(response.Body) - if err != nil { - return distribution.Descriptor{}, err - } - _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) - if err != nil { - return distribution.Descriptor{}, err - } - return desc, nil - } - - dgst, err := digest.ParseDigest(digestHeader) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Digest = dgst - - lengthHeader := headers.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") - } - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Size = length - - return desc, nil - -} - -// Get issues a HEAD request for a Manifest against its named endpoint in order -// to construct a descriptor for the tag. If the registry doesn't support HEADing -// a manifest, fallback to GET. -func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - ref, err := reference.WithTag(t.name, tag) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := t.ub.BuildManifestURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - req, err := http.NewRequest("HEAD", u, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - - var attempts int - resp, err := t.client.Do(req) -check: - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400: - return descriptorFromResponse(resp) - case resp.StatusCode == http.StatusMethodNotAllowed: - req, err = http.NewRequest("GET", u, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - - resp, err = t.client.Do(req) - attempts++ - if attempts > 1 { - return distribution.Descriptor{}, err - } - goto check - default: - return distribution.Descriptor{}, HandleErrorResponse(resp) - } -} - -func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { - panic("not implemented") -} - -func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - panic("not implemented") -} - -func (t *tags) Untag(ctx context.Context, tag string) error { - panic("not implemented") -} - -type manifests struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - etags map[string]string -} - -func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return false, err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return false, err - } - - resp, err := ms.client.Head(u) - if err != nil { - return false, err - } - - if SuccessStatus(resp.StatusCode) { - return true, nil - } else if resp.StatusCode == http.StatusNotFound { - return false, nil - } - return false, HandleErrorResponse(resp) -} - -// AddEtagToTag allows a client to supply an eTag to Get which will be -// used for a conditional HTTP request. If the eTag matches, a nil manifest -// and ErrManifestNotModified error will be returned. etag is automatically -// quoted when added to this map. -func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { - return etagOption{tag, etag} -} - -type etagOption struct{ tag, etag string } - -func (o etagOption) Apply(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifests); ok { - ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) - return nil - } - return fmt.Errorf("etag options is a client-only option") -} - -// ReturnContentDigest allows a client to set a the content digest on -// a successful request from the 'Docker-Content-Digest' header. This -// returned digest is represents the digest which the registry uses -// to refer to the content and can be used to delete the content. -func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { - return contentDigestOption{dgst} -} - -type contentDigestOption struct{ digest *digest.Digest } - -func (o contentDigestOption) Apply(ms distribution.ManifestService) error { - return nil -} - -func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - var ( - digestOrTag string - ref reference.Named - err error - contentDgst *digest.Digest - ) - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - digestOrTag = opt.Tag - ref, err = reference.WithTag(ms.name, opt.Tag) - if err != nil { - return nil, err - } - } else if opt, ok := option.(contentDigestOption); ok { - contentDgst = opt.digest - } else { - err := option.Apply(ms) - if err != nil { - return nil, err - } - } - } - - if digestOrTag == "" { - digestOrTag = dgst.String() - ref, err = reference.WithDigest(ms.name, dgst) - if err != nil { - return nil, err - } - } - - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - - if _, ok := ms.etags[digestOrTag]; ok { - req.Header.Set("If-None-Match", ms.etags[digestOrTag]) - } - - resp, err := ms.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusNotModified { - return nil, distribution.ErrManifestNotModified - } else if SuccessStatus(resp.StatusCode) { - if contentDgst != nil { - dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest")) - if err == nil { - *contentDgst = dgst - } - } - mt := resp.Header.Get("Content-Type") - body, err := ioutil.ReadAll(resp.Body) - - if err != nil { - return nil, err - } - m, _, err := distribution.UnmarshalManifest(mt, body) - if err != nil { - return nil, err - } - return m, nil - } - return nil, HandleErrorResponse(resp) -} - -// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the -// tag name in order to build the correct upload URL. -func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - ref := ms.name - var tagged bool - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - var err error - ref, err = reference.WithTag(ref, opt.Tag) - if err != nil { - return "", err - } - tagged = true - } else { - err := option.Apply(ms) - if err != nil { - return "", err - } - } - } - mediaType, p, err := m.Payload() - if err != nil { - return "", err - } - - if !tagged { - // generate a canonical digest and Put by digest - _, d, err := distribution.UnmarshalManifest(mediaType, p) - if err != nil { - return "", err - } - ref, err = reference.WithDigest(ref, d.Digest) - if err != nil { - return "", err - } - } - - manifestURL, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return "", err - } - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) - if err != nil { - return "", err - } - - putRequest.Header.Set("Content-Type", mediaType) - - resp, err := ms.client.Do(putRequest) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - dgstHeader := resp.Header.Get("Docker-Content-Digest") - dgst, err := digest.ParseDigest(dgstHeader) - if err != nil { - return "", err - } - - return dgst, nil - } - - return "", HandleErrorResponse(resp) -} - -func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return err - } - req, err := http.NewRequest("DELETE", u, nil) - if err != nil { - return err - } - - resp, err := ms.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -// todo(richardscothern): Restore interface and implementation with merge of #1050 -/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - panic("not supported") -}*/ - -type blobs struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - - statter distribution.BlobDescriptorService - distribution.BlobDeleter -} - -func sanitizeLocation(location, base string) (string, error) { - baseURL, err := url.Parse(base) - if err != nil { - return "", err - } - - locationURL, err := url.Parse(location) - if err != nil { - return "", err - } - - return baseURL.ResolveReference(locationURL).String(), nil -} - -func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return bs.statter.Stat(ctx, dgst) - -} - -func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - reader, err := bs.Open(ctx, dgst) - if err != nil { - return nil, err - } - defer reader.Close() - - return ioutil.ReadAll(reader) -} - -func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return nil, err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return nil, err - } - - return transport.NewHTTPReadSeeker(bs.client, blobURL, - func(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUnknown - } - return HandleErrorResponse(resp) - }), nil -} - -func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - panic("not implemented") -} - -func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - writer, err := bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, err - } - dgstr := digest.Canonical.New() - n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) - if err != nil { - return distribution.Descriptor{}, err - } - if n < int64(len(p)) { - return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) - } - - desc := distribution.Descriptor{ - MediaType: mediaType, - Size: int64(len(p)), - Digest: dgstr.Digest(), - } - - return writer.Commit(ctx, desc) -} - -// createOptions is a collection of blob creation modifiers relevant to general -// blob storage intended to be configured by the BlobCreateOption.Apply method. -type createOptions struct { - Mount struct { - ShouldMount bool - From reference.Canonical - } -} - -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithMountFrom returns a BlobCreateOption which designates that the blob should be -// mounted from the given canonical reference. -func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*createOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - - opts.Mount.ShouldMount = true - opts.Mount.From = ref - - return nil - }) -} - -func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - var opts createOptions - - for _, option := range options { - err := option.Apply(&opts) - if err != nil { - return nil, err - } - } - - var values []url.Values - - if opts.Mount.ShouldMount { - values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) - } - - u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) - if err != nil { - return nil, err - } - - resp, err := bs.client.Post(u, "", nil) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusCreated: - desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) - if err != nil { - return nil, err - } - return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} - case http.StatusAccepted: - // TODO(dmcgowan): Check for invalid UUID - uuid := resp.Header.Get("Docker-Upload-UUID") - location, err := sanitizeLocation(resp.Header.Get("Location"), u) - if err != nil { - return nil, err - } - - return &httpBlobUpload{ - statter: bs.statter, - client: bs.client, - uuid: uuid, - startedAt: time.Now(), - location: location, - }, nil - default: - return nil, HandleErrorResponse(resp) - } -} - -func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - panic("not implemented") -} - -func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { - return bs.statter.Clear(ctx, dgst) -} - -type blobStatter struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client -} - -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - resp, err := bs.client.Head(u) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - lengthHeader := resp.Header.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) - } - - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) - } - - return distribution.Descriptor{ - MediaType: resp.Header.Get("Content-Type"), - Size: length, - Digest: dgst, - }, nil - } else if resp.StatusCode == http.StatusNotFound { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - return distribution.Descriptor{}, HandleErrorResponse(resp) -} - -func buildCatalogValues(maxEntries int, last string) url.Values { - values := url.Values{} - - if maxEntries > 0 { - values.Add("n", strconv.Itoa(maxEntries)) - } - - if last != "" { - values.Add("last", last) - } - - return values -} - -func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return err - } - - req, err := http.NewRequest("DELETE", blobURL, nil) - if err != nil { - return err - } - - resp, err := bs.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - return nil -} diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go deleted file mode 100644 index d945596b..00000000 --- a/docs/client/repository_test.go +++ /dev/null @@ -1,1182 +0,0 @@ -package client - -import ( - "bytes" - "crypto/rand" - "encoding/json" - "fmt" - "io" - "log" - "net/http" - "net/http/httptest" - "strconv" - "strings" - "testing" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/testutil" - "github.com/docker/distribution/uuid" - "github.com/docker/libtrust" -) - -func testServer(rrm testutil.RequestResponseMap) (string, func()) { - h := testutil.NewHandler(rrm) - s := httptest.NewServer(h) - return s.URL, s.Close -} - -func newRandomBlob(size int) (digest.Digest, []byte) { - b := make([]byte, size) - if n, err := rand.Read(b); err != nil { - panic(err) - } else if n != size { - panic("unable to read enough bytes") - } - - return digest.FromBytes(b), b -} - -func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { - *m = append(*m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: content, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - - *m = append(*m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "HEAD", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) -} - -func addTestCatalog(route string, content []byte, link string, m *testutil.RequestResponseMap) { - headers := map[string][]string{ - "Content-Length": {strconv.Itoa(len(content))}, - "Content-Type": {"application/json; charset=utf-8"}, - } - if link != "" { - headers["Link"] = append(headers["Link"], link) - } - - *m = append(*m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: route, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: content, - Headers: http.Header(headers), - }, - }) -} - -func TestBlobDelete(t *testing.T) { - dgst, _ := newRandomBlob(1024) - var m testutil.RequestResponseMap - repo, _ := reference.ParseNamed("test.example.com/repo1") - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "DELETE", - Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - }), - }, - }) - - e, c := testServer(m) - defer c() - - ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - l := r.Blobs(ctx) - err = l.Delete(ctx, dgst) - if err != nil { - t.Errorf("Error deleting blob: %s", err.Error()) - } - -} - -func TestBlobFetch(t *testing.T) { - d1, b1 := newRandomBlob(1024) - var m testutil.RequestResponseMap - addTestFetch("test.example.com/repo1", d1, b1, &m) - - e, c := testServer(m) - defer c() - - ctx := context.Background() - repo, _ := reference.ParseNamed("test.example.com/repo1") - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - l := r.Blobs(ctx) - - b, err := l.Get(ctx, d1) - if err != nil { - t.Fatal(err) - } - if bytes.Compare(b, b1) != 0 { - t.Fatalf("Wrong bytes values fetched: [%d]byte != [%d]byte", len(b), len(b1)) - } - - // TODO(dmcgowan): Test for unknown blob case -} - -func TestBlobExistsNoContentLength(t *testing.T) { - var m testutil.RequestResponseMap - - repo, _ := reference.ParseNamed("biff") - dgst, content := newRandomBlob(1024) - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: content, - Headers: http.Header(map[string][]string{ - // "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "HEAD", - Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Headers: http.Header(map[string][]string{ - // "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - e, c := testServer(m) - defer c() - - ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - l := r.Blobs(ctx) - - _, err = l.Stat(ctx, dgst) - if err == nil { - t.Fatal(err) - } - if !strings.Contains(err.Error(), "missing content-length heade") { - t.Fatalf("Expected missing content-length error message") - } - -} - -func TestBlobExists(t *testing.T) { - d1, b1 := newRandomBlob(1024) - var m testutil.RequestResponseMap - addTestFetch("test.example.com/repo1", d1, b1, &m) - - e, c := testServer(m) - defer c() - - ctx := context.Background() - repo, _ := reference.ParseNamed("test.example.com/repo1") - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - l := r.Blobs(ctx) - - stat, err := l.Stat(ctx, d1) - if err != nil { - t.Fatal(err) - } - - if stat.Digest != d1 { - t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, d1) - } - - if stat.Size != int64(len(b1)) { - t.Fatalf("Unexpected length: %d, expected %d", stat.Size, len(b1)) - } - - // TODO(dmcgowan): Test error cases and ErrBlobUnknown case -} - -func TestBlobUploadChunked(t *testing.T) { - dgst, b1 := newRandomBlob(1024) - var m testutil.RequestResponseMap - chunks := [][]byte{ - b1[0:256], - b1[256:512], - b1[512:513], - b1[513:1024], - } - repo, _ := reference.ParseNamed("test.example.com/uploadrepo") - uuids := []string{uuid.Generate().String()} - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "POST", - Route: "/v2/" + repo.Name() + "/blobs/uploads/", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[0]}, - "Docker-Upload-UUID": {uuids[0]}, - "Range": {"0-0"}, - }), - }, - }) - offset := 0 - for i, chunk := range chunks { - uuids = append(uuids, uuid.Generate().String()) - newOffset := offset + len(chunk) - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PATCH", - Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i], - Body: chunk, - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i+1]}, - "Docker-Upload-UUID": {uuids[i+1]}, - "Range": {fmt.Sprintf("%d-%d", offset, newOffset-1)}, - }), - }, - }) - offset = newOffset - } - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PUT", - Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[len(uuids)-1], - QueryParams: map[string][]string{ - "digest": {dgst.String()}, - }, - }, - Response: testutil.Response{ - StatusCode: http.StatusCreated, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Docker-Content-Digest": {dgst.String()}, - "Content-Range": {fmt.Sprintf("0-%d", offset-1)}, - }), - }, - }) - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "HEAD", - Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(offset)}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - - e, c := testServer(m) - defer c() - - ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - l := r.Blobs(ctx) - - upload, err := l.Create(ctx) - if err != nil { - t.Fatal(err) - } - - if upload.ID() != uuids[0] { - log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uuids[0]) - } - - for _, chunk := range chunks { - n, err := upload.Write(chunk) - if err != nil { - t.Fatal(err) - } - if n != len(chunk) { - t.Fatalf("Unexpected length returned from write: %d; expected: %d", n, len(chunk)) - } - } - - blob, err := upload.Commit(ctx, distribution.Descriptor{ - Digest: dgst, - Size: int64(len(b1)), - }) - if err != nil { - t.Fatal(err) - } - - if blob.Size != int64(len(b1)) { - t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) - } -} - -func TestBlobUploadMonolithic(t *testing.T) { - dgst, b1 := newRandomBlob(1024) - var m testutil.RequestResponseMap - repo, _ := reference.ParseNamed("test.example.com/uploadrepo") - uploadID := uuid.Generate().String() - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "POST", - Route: "/v2/" + repo.Name() + "/blobs/uploads/", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, - "Docker-Upload-UUID": {uploadID}, - "Range": {"0-0"}, - }), - }, - }) - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PATCH", - Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, - Body: b1, - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, - "Docker-Upload-UUID": {uploadID}, - "Content-Length": {"0"}, - "Docker-Content-Digest": {dgst.String()}, - "Range": {fmt.Sprintf("0-%d", len(b1)-1)}, - }), - }, - }) - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PUT", - Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, - QueryParams: map[string][]string{ - "digest": {dgst.String()}, - }, - }, - Response: testutil.Response{ - StatusCode: http.StatusCreated, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Docker-Content-Digest": {dgst.String()}, - "Content-Range": {fmt.Sprintf("0-%d", len(b1)-1)}, - }), - }, - }) - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "HEAD", - Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(b1))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - - e, c := testServer(m) - defer c() - - ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - l := r.Blobs(ctx) - - upload, err := l.Create(ctx) - if err != nil { - t.Fatal(err) - } - - if upload.ID() != uploadID { - log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uploadID) - } - - n, err := upload.ReadFrom(bytes.NewReader(b1)) - if err != nil { - t.Fatal(err) - } - if n != int64(len(b1)) { - t.Fatalf("Unexpected ReadFrom length: %d; expected: %d", n, len(b1)) - } - - blob, err := upload.Commit(ctx, distribution.Descriptor{ - Digest: dgst, - Size: int64(len(b1)), - }) - if err != nil { - t.Fatal(err) - } - - if blob.Size != int64(len(b1)) { - t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) - } -} - -func TestBlobMount(t *testing.T) { - dgst, content := newRandomBlob(1024) - var m testutil.RequestResponseMap - repo, _ := reference.ParseNamed("test.example.com/uploadrepo") - - sourceRepo, _ := reference.ParseNamed("test.example.com/sourcerepo") - canonicalRef, _ := reference.WithDigest(sourceRepo, dgst) - - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "POST", - Route: "/v2/" + repo.Name() + "/blobs/uploads/", - QueryParams: map[string][]string{"from": {sourceRepo.Name()}, "mount": {dgst.String()}}, - }, - Response: testutil.Response{ - StatusCode: http.StatusCreated, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Location": {"/v2/" + repo.Name() + "/blobs/" + dgst.String()}, - "Docker-Content-Digest": {dgst.String()}, - }), - }, - }) - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "HEAD", - Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - - e, c := testServer(m) - defer c() - - ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - - l := r.Blobs(ctx) - - bw, err := l.Create(ctx, WithMountFrom(canonicalRef)) - if bw != nil { - t.Fatalf("Expected blob writer to be nil, was %v", bw) - } - - if ebm, ok := err.(distribution.ErrBlobMounted); ok { - if ebm.From.Digest() != dgst { - t.Fatalf("Unexpected digest: %s, expected %s", ebm.From.Digest(), dgst) - } - if ebm.From.Name() != sourceRepo.Name() { - t.Fatalf("Unexpected from: %s, expected %s", ebm.From.Name(), sourceRepo) - } - } else { - t.Fatalf("Unexpected error: %v, expected an ErrBlobMounted", err) - } -} - -func newRandomSchemaV1Manifest(name reference.Named, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { - blobs := make([]schema1.FSLayer, blobCount) - history := make([]schema1.History, blobCount) - - for i := 0; i < blobCount; i++ { - dgst, blob := newRandomBlob((i % 5) * 16) - - blobs[i] = schema1.FSLayer{BlobSum: dgst} - history[i] = schema1.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} - } - - m := schema1.Manifest{ - Name: name.String(), - Tag: tag, - Architecture: "x86", - FSLayers: blobs, - History: history, - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - } - - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - panic(err) - } - - sm, err := schema1.Sign(&m, pk) - if err != nil { - panic(err) - } - - return sm, digest.FromBytes(sm.Canonical), sm.Canonical -} - -func addTestManifestWithEtag(repo reference.Named, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { - actualDigest := digest.FromBytes(content) - getReqWithEtag := testutil.Request{ - Method: "GET", - Route: "/v2/" + repo.Name() + "/manifests/" + reference, - Headers: http.Header(map[string][]string{ - "If-None-Match": {fmt.Sprintf(`"%s"`, dgst)}, - }), - } - - var getRespWithEtag testutil.Response - if actualDigest.String() == dgst { - getRespWithEtag = testutil.Response{ - StatusCode: http.StatusNotModified, - Body: []byte{}, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {schema1.MediaTypeSignedManifest}, - }), - } - } else { - getRespWithEtag = testutil.Response{ - StatusCode: http.StatusOK, - Body: content, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {schema1.MediaTypeSignedManifest}, - }), - } - - } - *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) -} - -func contentDigestString(mediatype string, content []byte) string { - if mediatype == schema1.MediaTypeSignedManifest { - m, _, _ := distribution.UnmarshalManifest(mediatype, content) - content = m.(*schema1.SignedManifest).Canonical - } - return digest.Canonical.FromBytes(content).String() -} - -func addTestManifest(repo reference.Named, reference string, mediatype string, content []byte, m *testutil.RequestResponseMap) { - *m = append(*m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + repo.Name() + "/manifests/" + reference, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: content, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {mediatype}, - "Docker-Content-Digest": {contentDigestString(mediatype, content)}, - }), - }, - }) - *m = append(*m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "HEAD", - Route: "/v2/" + repo.Name() + "/manifests/" + reference, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {mediatype}, - "Docker-Content-Digest": {digest.Canonical.FromBytes(content).String()}, - }), - }, - }) - -} - -func checkEqualManifest(m1, m2 *schema1.SignedManifest) error { - if m1.Name != m2.Name { - return fmt.Errorf("name does not match %q != %q", m1.Name, m2.Name) - } - if m1.Tag != m2.Tag { - return fmt.Errorf("tag does not match %q != %q", m1.Tag, m2.Tag) - } - if len(m1.FSLayers) != len(m2.FSLayers) { - return fmt.Errorf("fs blob length does not match %d != %d", len(m1.FSLayers), len(m2.FSLayers)) - } - for i := range m1.FSLayers { - if m1.FSLayers[i].BlobSum != m2.FSLayers[i].BlobSum { - return fmt.Errorf("blobsum does not match %q != %q", m1.FSLayers[i].BlobSum, m2.FSLayers[i].BlobSum) - } - } - if len(m1.History) != len(m2.History) { - return fmt.Errorf("history length does not match %d != %d", len(m1.History), len(m2.History)) - } - for i := range m1.History { - if m1.History[i].V1Compatibility != m2.History[i].V1Compatibility { - return fmt.Errorf("blobsum does not match %q != %q", m1.History[i].V1Compatibility, m2.History[i].V1Compatibility) - } - } - return nil -} - -func TestV1ManifestFetch(t *testing.T) { - ctx := context.Background() - repo, _ := reference.ParseNamed("test.example.com/repo") - m1, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) - var m testutil.RequestResponseMap - _, pl, err := m1.Payload() - if err != nil { - t.Fatal(err) - } - addTestManifest(repo, dgst.String(), schema1.MediaTypeSignedManifest, pl, &m) - addTestManifest(repo, "latest", schema1.MediaTypeSignedManifest, pl, &m) - addTestManifest(repo, "badcontenttype", "text/html", pl, &m) - - e, c := testServer(m) - defer c() - - r, err := NewRepository(context.Background(), repo, e, nil) - if err != nil { - t.Fatal(err) - } - ms, err := r.Manifests(ctx) - if err != nil { - t.Fatal(err) - } - - ok, err := ms.Exists(ctx, dgst) - if err != nil { - t.Fatal(err) - } - if !ok { - t.Fatal("Manifest does not exist") - } - - manifest, err := ms.Get(ctx, dgst) - if err != nil { - t.Fatal(err) - } - v1manifest, ok := manifest.(*schema1.SignedManifest) - if !ok { - t.Fatalf("Unexpected manifest type from Get: %T", manifest) - } - - if err := checkEqualManifest(v1manifest, m1); err != nil { - t.Fatal(err) - } - - var contentDigest digest.Digest - manifest, err = ms.Get(ctx, dgst, distribution.WithTag("latest"), ReturnContentDigest(&contentDigest)) - if err != nil { - t.Fatal(err) - } - v1manifest, ok = manifest.(*schema1.SignedManifest) - if !ok { - t.Fatalf("Unexpected manifest type from Get: %T", manifest) - } - - if err = checkEqualManifest(v1manifest, m1); err != nil { - t.Fatal(err) - } - - if contentDigest != dgst { - t.Fatalf("Unexpected returned content digest %v, expected %v", contentDigest, dgst) - } - - manifest, err = ms.Get(ctx, dgst, distribution.WithTag("badcontenttype")) - if err != nil { - t.Fatal(err) - } - v1manifest, ok = manifest.(*schema1.SignedManifest) - if !ok { - t.Fatalf("Unexpected manifest type from Get: %T", manifest) - } - - if err = checkEqualManifest(v1manifest, m1); err != nil { - t.Fatal(err) - } -} - -func TestManifestFetchWithEtag(t *testing.T) { - repo, _ := reference.ParseNamed("test.example.com/repo/by/tag") - _, d1, p1 := newRandomSchemaV1Manifest(repo, "latest", 6) - var m testutil.RequestResponseMap - addTestManifestWithEtag(repo, "latest", p1, &m, d1.String()) - - e, c := testServer(m) - defer c() - - ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) - if err != nil { - t.Fatal(err) - } - - ms, err := r.Manifests(ctx) - if err != nil { - t.Fatal(err) - } - - clientManifestService, ok := ms.(*manifests) - if !ok { - panic("wrong type for client manifest service") - } - _, err = clientManifestService.Get(ctx, d1, distribution.WithTag("latest"), AddEtagToTag("latest", d1.String())) - if err != distribution.ErrManifestNotModified { - t.Fatal(err) - } -} - -func TestManifestDelete(t *testing.T) { - repo, _ := reference.ParseNamed("test.example.com/repo/delete") - _, dgst1, _ := newRandomSchemaV1Manifest(repo, "latest", 6) - _, dgst2, _ := newRandomSchemaV1Manifest(repo, "latest", 6) - var m testutil.RequestResponseMap - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "DELETE", - Route: "/v2/" + repo.Name() + "/manifests/" + dgst1.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - }), - }, - }) - - e, c := testServer(m) - defer c() - - r, err := NewRepository(context.Background(), repo, e, nil) - if err != nil { - t.Fatal(err) - } - ctx := context.Background() - ms, err := r.Manifests(ctx) - if err != nil { - t.Fatal(err) - } - - if err := ms.Delete(ctx, dgst1); err != nil { - t.Fatal(err) - } - if err := ms.Delete(ctx, dgst2); err == nil { - t.Fatal("Expected error deleting unknown manifest") - } - // TODO(dmcgowan): Check for specific unknown error -} - -func TestManifestPut(t *testing.T) { - repo, _ := reference.ParseNamed("test.example.com/repo/delete") - m1, dgst, _ := newRandomSchemaV1Manifest(repo, "other", 6) - - _, payload, err := m1.Payload() - if err != nil { - t.Fatal(err) - } - - var m testutil.RequestResponseMap - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PUT", - Route: "/v2/" + repo.Name() + "/manifests/other", - Body: payload, - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Docker-Content-Digest": {dgst.String()}, - }), - }, - }) - - putDgst := digest.FromBytes(m1.Canonical) - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PUT", - Route: "/v2/" + repo.Name() + "/manifests/" + putDgst.String(), - Body: payload, - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Content-Length": {"0"}, - "Docker-Content-Digest": {putDgst.String()}, - }), - }, - }) - - e, c := testServer(m) - defer c() - - r, err := NewRepository(context.Background(), repo, e, nil) - if err != nil { - t.Fatal(err) - } - ctx := context.Background() - ms, err := r.Manifests(ctx) - if err != nil { - t.Fatal(err) - } - - if _, err := ms.Put(ctx, m1, distribution.WithTag(m1.Tag)); err != nil { - t.Fatal(err) - } - - if _, err := ms.Put(ctx, m1); err != nil { - t.Fatal(err) - } - - // TODO(dmcgowan): Check for invalid input error -} - -func TestManifestTags(t *testing.T) { - repo, _ := reference.ParseNamed("test.example.com/repo/tags/list") - tagsList := []byte(strings.TrimSpace(` -{ - "name": "test.example.com/repo/tags/list", - "tags": [ - "tag1", - "tag2", - "funtag" - ] -} - `)) - var m testutil.RequestResponseMap - for i := 0; i < 3; i++ { - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + repo.Name() + "/tags/list", - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: tagsList, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(tagsList))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - } - e, c := testServer(m) - defer c() - - r, err := NewRepository(context.Background(), repo, e, nil) - if err != nil { - t.Fatal(err) - } - - ctx := context.Background() - tagService := r.Tags(ctx) - - tags, err := tagService.All(ctx) - if err != nil { - t.Fatal(err) - } - if len(tags) != 3 { - t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) - } - - expected := map[string]struct{}{ - "tag1": {}, - "tag2": {}, - "funtag": {}, - } - for _, t := range tags { - delete(expected, t) - } - if len(expected) != 0 { - t.Fatalf("unexpected tags returned: %v", expected) - } - // TODO(dmcgowan): Check for error cases -} - -func TestManifestTagsPaginated(t *testing.T) { - s := httptest.NewServer(http.NotFoundHandler()) - defer s.Close() - - repo, _ := reference.ParseNamed("test.example.com/repo/tags/list") - tagsList := []string{"tag1", "tag2", "funtag"} - var m testutil.RequestResponseMap - for i := 0; i < 3; i++ { - body, err := json.Marshal(map[string]interface{}{ - "name": "test.example.com/repo/tags/list", - "tags": []string{tagsList[i]}, - }) - if err != nil { - t.Fatal(err) - } - queryParams := make(map[string][]string) - if i > 0 { - queryParams["n"] = []string{"1"} - queryParams["last"] = []string{tagsList[i-1]} - } - headers := http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(body))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }) - if i < 2 { - headers.Set("Link", "<"+s.URL+"/v2/"+repo.Name()+"/tags/list?n=1&last="+tagsList[i]+`>; rel="next"`) - } - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + repo.Name() + "/tags/list", - QueryParams: queryParams, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: body, - Headers: headers, - }, - }) - } - - s.Config.Handler = testutil.NewHandler(m) - - r, err := NewRepository(context.Background(), repo, s.URL, nil) - if err != nil { - t.Fatal(err) - } - - ctx := context.Background() - tagService := r.Tags(ctx) - - tags, err := tagService.All(ctx) - if err != nil { - t.Fatal(tags, err) - } - if len(tags) != 3 { - t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) - } - - expected := map[string]struct{}{ - "tag1": {}, - "tag2": {}, - "funtag": {}, - } - for _, t := range tags { - delete(expected, t) - } - if len(expected) != 0 { - t.Fatalf("unexpected tags returned: %v", expected) - } -} - -func TestManifestUnauthorized(t *testing.T) { - repo, _ := reference.ParseNamed("test.example.com/repo") - _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) - var m testutil.RequestResponseMap - - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + repo.Name() + "/manifests/" + dgst.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusUnauthorized, - Body: []byte("garbage"), - }, - }) - - e, c := testServer(m) - defer c() - - r, err := NewRepository(context.Background(), repo, e, nil) - if err != nil { - t.Fatal(err) - } - ctx := context.Background() - ms, err := r.Manifests(ctx) - if err != nil { - t.Fatal(err) - } - - _, err = ms.Get(ctx, dgst) - if err == nil { - t.Fatal("Expected error fetching manifest") - } - v2Err, ok := err.(errcode.Error) - if !ok { - t.Fatalf("Unexpected error type: %#v", err) - } - if v2Err.Code != errcode.ErrorCodeUnauthorized { - t.Fatalf("Unexpected error code: %s", v2Err.Code.String()) - } - if expected := errcode.ErrorCodeUnauthorized.Message(); v2Err.Message != expected { - t.Fatalf("Unexpected message value: %q, expected %q", v2Err.Message, expected) - } -} - -func TestCatalog(t *testing.T) { - var m testutil.RequestResponseMap - addTestCatalog( - "/v2/_catalog?n=5", - []byte("{\"repositories\":[\"foo\", \"bar\", \"baz\"]}"), "", &m) - - e, c := testServer(m) - defer c() - - entries := make([]string, 5) - - r, err := NewRegistry(context.Background(), e, nil) - if err != nil { - t.Fatal(err) - } - - ctx := context.Background() - numFilled, err := r.Repositories(ctx, entries, "") - if err != io.EOF { - t.Fatal(err) - } - - if numFilled != 3 { - t.Fatalf("Got wrong number of repos") - } -} - -func TestCatalogInParts(t *testing.T) { - var m testutil.RequestResponseMap - addTestCatalog( - "/v2/_catalog?n=2", - []byte("{\"repositories\":[\"bar\", \"baz\"]}"), - "", &m) - addTestCatalog( - "/v2/_catalog?last=baz&n=2", - []byte("{\"repositories\":[\"foo\"]}"), - "", &m) - - e, c := testServer(m) - defer c() - - entries := make([]string, 2) - - r, err := NewRegistry(context.Background(), e, nil) - if err != nil { - t.Fatal(err) - } - - ctx := context.Background() - numFilled, err := r.Repositories(ctx, entries, "") - if err != nil { - t.Fatal(err) - } - - if numFilled != 2 { - t.Fatalf("Got wrong number of repos") - } - - numFilled, err = r.Repositories(ctx, entries, "baz") - if err != io.EOF { - t.Fatal(err) - } - - if numFilled != 1 { - t.Fatalf("Got wrong number of repos") - } -} - -func TestSanitizeLocation(t *testing.T) { - for _, testcase := range []struct { - description string - location string - source string - expected string - err error - }{ - { - description: "ensure relative location correctly resolved", - location: "/v2/foo/baasdf", - source: "http://blahalaja.com/v1", - expected: "http://blahalaja.com/v2/foo/baasdf", - }, - { - description: "ensure parameters are preserved", - location: "/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", - source: "http://blahalaja.com/v1", - expected: "http://blahalaja.com/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", - }, - { - description: "ensure new hostname overidden", - location: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", - source: "http://blahalaja.com/v1", - expected: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", - }, - } { - fatalf := func(format string, args ...interface{}) { - t.Fatalf(testcase.description+": "+format, args...) - } - - s, err := sanitizeLocation(testcase.location, testcase.source) - if err != testcase.err { - if testcase.err != nil { - fatalf("expected error: %v != %v", err, testcase) - } else { - fatalf("unexpected error sanitizing: %v", err) - } - } - - if s != testcase.expected { - fatalf("bad sanitize: %q != %q", s, testcase.expected) - } - } -} diff --git a/docs/client/transport/http_reader.go b/docs/client/transport/http_reader.go deleted file mode 100644 index e1b17a03..00000000 --- a/docs/client/transport/http_reader.go +++ /dev/null @@ -1,250 +0,0 @@ -package transport - -import ( - "errors" - "fmt" - "io" - "net/http" - "os" - "regexp" - "strconv" -) - -var ( - contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) - - // ErrWrongCodeForByteRange is returned if the client sends a request - // with a Range header but the server returns a 2xx or 3xx code other - // than 206 Partial Content. - ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") -) - -// ReadSeekCloser combines io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET -// request. When seeking and starting a read from a non-zero offset -// the a "Range" header will be added which sets the offset. -// TODO(dmcgowan): Move this into a separate utility package -func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { - return &httpReadSeeker{ - client: client, - url: url, - errorHandler: errorHandler, - } -} - -type httpReadSeeker struct { - client *http.Client - url string - - // errorHandler creates an error from an unsuccessful HTTP response. - // This allows the error to be created with the HTTP response body - // without leaking the body through a returned error. - errorHandler func(*http.Response) error - - size int64 - - // rc is the remote read closer. - rc io.ReadCloser - // readerOffset tracks the offset as of the last read. - readerOffset int64 - // seekOffset allows Seek to override the offset. Seek changes - // seekOffset instead of changing readOffset directly so that - // connection resets can be delayed and possibly avoided if the - // seek is undone (i.e. seeking to the end and then back to the - // beginning). - seekOffset int64 - err error -} - -func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { - if hrs.err != nil { - return 0, hrs.err - } - - // If we sought to a different position, we need to reset the - // connection. This logic is here instead of Seek so that if - // a seek is undone before the next read, the connection doesn't - // need to be closed and reopened. A common example of this is - // seeking to the end to determine the length, and then seeking - // back to the original position. - if hrs.readerOffset != hrs.seekOffset { - hrs.reset() - } - - hrs.readerOffset = hrs.seekOffset - - rd, err := hrs.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - hrs.seekOffset += int64(n) - hrs.readerOffset += int64(n) - - return n, err -} - -func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { - if hrs.err != nil { - return 0, hrs.err - } - - lastReaderOffset := hrs.readerOffset - - if whence == os.SEEK_SET && hrs.rc == nil { - // If no request has been made yet, and we are seeking to an - // absolute position, set the read offset as well to avoid an - // unnecessary request. - hrs.readerOffset = offset - } - - _, err := hrs.reader() - if err != nil { - hrs.readerOffset = lastReaderOffset - return 0, err - } - - newOffset := hrs.seekOffset - - switch whence { - case os.SEEK_CUR: - newOffset += offset - case os.SEEK_END: - if hrs.size < 0 { - return 0, errors.New("content length not known") - } - newOffset = hrs.size + offset - case os.SEEK_SET: - newOffset = offset - } - - if newOffset < 0 { - err = errors.New("cannot seek to negative position") - } else { - hrs.seekOffset = newOffset - } - - return hrs.seekOffset, err -} - -func (hrs *httpReadSeeker) Close() error { - if hrs.err != nil { - return hrs.err - } - - // close and release reader chain - if hrs.rc != nil { - hrs.rc.Close() - } - - hrs.rc = nil - - hrs.err = errors.New("httpLayer: closed") - - return nil -} - -func (hrs *httpReadSeeker) reset() { - if hrs.err != nil { - return - } - if hrs.rc != nil { - hrs.rc.Close() - hrs.rc = nil - } -} - -func (hrs *httpReadSeeker) reader() (io.Reader, error) { - if hrs.err != nil { - return nil, hrs.err - } - - if hrs.rc != nil { - return hrs.rc, nil - } - - req, err := http.NewRequest("GET", hrs.url, nil) - if err != nil { - return nil, err - } - - if hrs.readerOffset > 0 { - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) - // TODO: get context in here - // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) - } - - resp, err := hrs.client.Do(req) - if err != nil { - return nil, err - } - - // Normally would use client.SuccessStatus, but that would be a cyclic - // import - if resp.StatusCode >= 200 && resp.StatusCode <= 399 { - if hrs.readerOffset > 0 { - if resp.StatusCode != http.StatusPartialContent { - return nil, ErrWrongCodeForByteRange - } - - contentRange := resp.Header.Get("Content-Range") - if contentRange == "" { - return nil, errors.New("no Content-Range header found in HTTP 206 response") - } - - submatches := contentRangeRegexp.FindStringSubmatch(contentRange) - if len(submatches) < 4 { - return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) - } - - startByte, err := strconv.ParseUint(submatches[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) - } - - if startByte != uint64(hrs.readerOffset) { - return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) - } - - endByte, err := strconv.ParseUint(submatches[2], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) - } - - if submatches[3] == "*" { - hrs.size = -1 - } else { - size, err := strconv.ParseUint(submatches[3], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) - } - - if endByte+1 != size { - return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) - } - - hrs.size = int64(size) - } - } else if resp.StatusCode == http.StatusOK { - hrs.size = resp.ContentLength - } else { - hrs.size = -1 - } - hrs.rc = resp.Body - } else { - defer resp.Body.Close() - if hrs.errorHandler != nil { - return nil, hrs.errorHandler(resp) - } - return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) - } - - return hrs.rc, nil -} diff --git a/docs/client/transport/transport.go b/docs/client/transport/transport.go deleted file mode 100644 index 30e45fab..00000000 --- a/docs/client/transport/transport.go +++ /dev/null @@ -1,147 +0,0 @@ -package transport - -import ( - "io" - "net/http" - "sync" -) - -// RequestModifier represents an object which will do an inplace -// modification of an HTTP request. -type RequestModifier interface { - ModifyRequest(*http.Request) error -} - -type headerModifier http.Header - -// NewHeaderRequestModifier returns a new RequestModifier which will -// add the given headers to a request. -func NewHeaderRequestModifier(header http.Header) RequestModifier { - return headerModifier(header) -} - -func (h headerModifier) ModifyRequest(req *http.Request) error { - for k, s := range http.Header(h) { - req.Header[k] = append(req.Header[k], s...) - } - - return nil -} - -// NewTransport creates a new transport which will apply modifiers to -// the request on a RoundTrip call. -func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { - return &transport{ - Modifiers: modifiers, - Base: base, - } -} - -// transport is an http.RoundTripper that makes HTTP requests after -// copying and modifying the request -type transport struct { - Modifiers []RequestModifier - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { - req2 := cloneRequest(req) - for _, modifier := range t.Modifiers { - if err := modifier.ModifyRequest(req2); err != nil { - return nil, err - } - } - - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/docs/compatibility.md b/docs/compatibility.md new file mode 100644 index 00000000..cba7e378 --- /dev/null +++ b/docs/compatibility.md @@ -0,0 +1,84 @@ + + +# Registry Compatibility + +## Synopsis +*If a manifest is pulled by _digest_ from a registry 2.3 with Docker Engine 1.9 +and older, and the manifest was pushed with Docker Engine 1.10, a security check +will cause the Engine to receive a manifest it cannot use and the pull will fail.* + +## Registry Manifest Support + +Historically, the registry has supported a [single manifest type](./spec/manifest-v2-1.md) +known as _Schema 1_. + +With the move toward multiple architecture images the distribution project +introduced two new manifest types: Schema 2 manifests and manifest lists. The +registry 2.3 supports all three manifest types and in order to be compatible +with older Docker engines will, in certain cases, do an on-the-fly +transformation of a manifest before serving the JSON in the response. + +This conversion has some implications for pulling manifests by digest and this +document enumerate these implications. + + +## Content Addressable Storage (CAS) + +Manifests are stored and retrieved in the registry by keying off a digest +representing a hash of the contents. One of the advantages provided by CAS is +security: if the contents are changed, then the digest will no longer match. +This prevents any modification of the manifest by a MITM attack or an untrusted +third party. + +When a manifest is stored by the registry, this digest is returned in the HTTP +response headers and, if events are configured, delivered within the event. The +manifest can either be retrieved by the tag, or this digest. + +For registry versions 2.2.1 and below, the registry will always store and +serve _Schema 1_ manifests. The Docker Engine 1.10 will first +attempt to send a _Schema 2_ manifest, falling back to sending a +Schema 1 type manifest when it detects that the registry does not +support the new version. + + +## Registry v2.3 + +### Manifest Push with Docker 1.9 and Older + +The Docker Engine will construct a _Schema 1_ manifest which the +registry will persist to disk. + +When the manifest is pulled by digest or tag with any docker version, a +_Schema 1_ manifest will be returned. + +### Manifest Push with Docker 1.10 + +The docker engine will construct a _Schema 2_ manifest which the +registry will persist to disk. + +When the manifest is pulled by digest or tag with Docker Engine 1.10, a +_Schema 2_ manifest will be returned. The Docker Engine 1.10 +understands the new manifest format. + +When the manifest is pulled by *tag* with Docker Engine 1.9 and older, the +manifest is converted on-the-fly to _Schema 1_ and sent in the +response. The Docker Engine 1.9 is compatible with this older format. + +*When the manifest is pulled by _digest_ with Docker Engine 1.9 and older, the +same rewriting process will not happen in the registry. If this were to happen +the digest would no longer match the hash of the manifest and would violate the +constraints of CAS.* + +For this reason if a manifest is pulled by _digest_ from a registry 2.3 with Docker +Engine 1.9 and older, and the manifest was pushed with Docker Engine 1.10, a +security check will cause the Engine to receive a manifest it cannot use and the +pull will fail. diff --git a/docs/configuration.md b/docs/configuration.md new file mode 100644 index 00000000..1ef680f5 --- /dev/null +++ b/docs/configuration.md @@ -0,0 +1,1877 @@ + + +# Registry Configuration Reference + +The Registry configuration is based on a YAML file, detailed below. While it comes with sane default values out of the box, you are heavily encouraged to review it exhaustively before moving your systems to production. + +## Override specific configuration options + +In a typical setup where you run your Registry from the official image, you can specify a configuration variable from the environment by passing `-e` arguments to your `docker run` stanza, or from within a Dockerfile using the `ENV` instruction. + +To override a configuration option, create an environment variable named +`REGISTRY_variable` where *`variable`* is the name of the configuration option +and the `_` (underscore) represents indention levels. For example, you can +configure the `rootdirectory` of the `filesystem` storage backend: + + storage: + filesystem: + rootdirectory: /var/lib/registry + +To override this value, set an environment variable like this: + + REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere + +This variable overrides the `/var/lib/registry` value to the `/somewhere` +directory. + +>**NOTE**: It is highly recommended to create a base configuration file with which environment variables can be used to tweak individual values. Overriding configuration sections with environment variables is not recommended. + +## Overriding the entire configuration file + +If the default configuration is not a sound basis for your usage, or if you are having issues overriding keys from the environment, you can specify an alternate YAML configuration file by mounting it as a volume in the container. + +Typically, create a new configuration file from scratch, and call it `config.yml`, then: + + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/config.yml:/etc/docker/registry/config.yml \ + registry:2 + +You can (and probably should) use [this as a starting point](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). + +## List of configuration options + +This section lists all the registry configuration options. Some options in +the list are mutually exclusive. So, make sure to read the detailed reference +information about each option that appears later in this page. + + version: 0.1 + log: + level: debug + formatter: text + fields: + service: registry + environment: staging + hooks: + - type: mail + disabled: true + levels: + - panic + options: + smtp: + addr: mail.example.com:25 + username: mailuser + password: password + insecure: true + from: sender@example.com + to: + - errors@example.com + loglevel: debug # deprecated: use "log" + storage: + filesystem: + rootdirectory: /var/lib/registry + maxthreads: 100 + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + gcs: + bucket: bucketname + keyfile: /path/to/keyfile + rootdirectory: /gcs/object/name/prefix + chunksize: 5242880 + s3: + accesskey: awsaccesskey + secretkey: awssecretkey + region: us-west-1 + regionendpoint: http://myobjects.local + bucket: bucketname + encrypt: true + keyid: mykeyid + secure: true + v4auth: true + chunksize: 5242880 + rootdirectory: /s3/object/name/prefix + swift: + username: username + password: password + authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix + oss: + accesskeyid: accesskeyid + accesskeysecret: accesskeysecret + region: OSS region name + endpoint: optional endpoints + internal: optional internal endpoint + bucket: OSS bucket + encrypt: optional data encryption setting + secure: optional ssl setting + chunksize: optional size valye + rootdirectory: optional root directory + inmemory: # This driver takes no parameters + delete: + enabled: false + redirect: + disable: false + cache: + blobdescriptor: redis + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + readonly: + enabled: false + auth: + silly: + realm: silly-realm + service: silly-service + token: + realm: token-realm + service: token-service + issuer: registry-token-issuer + rootcertbundle: /root/certs/bundle + htpasswd: + realm: basic-realm + path: /path/to/htpasswd + middleware: + registry: + - name: ARegistryMiddleware + options: + foo: bar + repository: + - name: ARepositoryMiddleware + options: + foo: bar + storage: + - name: cloudfront + options: + baseurl: https://my.cloudfronted.domain.com/ + privatekey: /path/to/pem + keypairid: cloudfrontkeypairid + duration: 3000s + storage: + - name: redirect + options: + baseurl: https://example.com/ + reporting: + bugsnag: + apikey: bugsnagapikey + releasestage: bugsnagreleasestage + endpoint: bugsnagendpoint + newrelic: + licensekey: newreliclicensekey + name: newrelicname + verbose: true + http: + addr: localhost:5000 + prefix: /my/nested/registry/ + host: https://myregistryaddress.org:5000 + secret: asecretforlocaldevelopment + relativeurls: false + tls: + certificate: /path/to/x509/public + key: /path/to/x509/private + clientcas: + - /path/to/ca.pem + - /path/to/another/ca.pem + letsencrypt: + cachefile: /path/to/cache-file + email: emailused@letsencrypt.com + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] + notifications: + endpoints: + - name: alistener + disabled: false + url: https://my.listener.com/event + headers: + timeout: 500 + threshold: 5 + backoff: 1000 + redis: + addr: localhost:6379 + password: asecret + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + file: + - file: /path/to/checked/file + interval: 10s + http: + - uri: http://server.to.check/must/return/200 + headers: + Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] + statuscode: 200 + timeout: 3s + interval: 10s + threshold: 3 + tcp: + - addr: redis-server.domain.com:6379 + timeout: 3s + interval: 10s + threshold: 3 + proxy: + remoteurl: https://registry-1.docker.io + username: [username] + password: [password] + compatibility: + schema1: + signingkeyfile: /etc/registry/key.json + +In some instances a configuration option is **optional** but it contains child +options marked as **required**. This indicates that you can omit the parent with +all its children. However, if the parent is included, you must also include all +the children marked **required**. + +## version + + version: 0.1 + +The `version` option is **required**. It specifies the configuration's version. +It is expected to remain a top-level field, to allow for a consistent version +check before parsing the remainder of the configuration file. + +## log + +The `log` subsection configures the behavior of the logging system. The logging +system outputs everything to stdout. You can adjust the granularity and format +with this configuration section. + + log: + level: debug + formatter: text + fields: + service: registry + environment: staging + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ level + + no + + Sets the sensitivity of logging output. Permitted values are + error, warn, info and + debug. The default is info. +
+ formatter + + no + + This selects the format of logging output. The format primarily affects how keyed + attributes for a log line are encoded. Options are text, json or + logstash. The default is text. +
+ fields + + no + + A map of field names to values. These are added to every log line for + the context. This is useful for identifying log messages source after + being mixed in other systems. +
+ +## hooks + + hooks: + - type: mail + levels: + - panic + options: + smtp: + addr: smtp.sendhost.com:25 + username: sendername + password: password + insecure: true + from: name@sendhost.com + to: + - name@receivehost.com + +The `hooks` subsection configures the logging hooks' behavior. This subsection +includes a sequence handler which you can use for sending mail, for example. +Refer to `loglevel` to configure the level of messages printed. + +## loglevel + +> **DEPRECATED:** Please use [log](#log) instead. + + loglevel: debug + +Permitted values are `error`, `warn`, `info` and `debug`. The default is +`info`. + +## storage + + storage: + filesystem: + rootdirectory: /var/lib/registry + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + gcs: + bucket: bucketname + keyfile: /path/to/keyfile + rootdirectory: /gcs/object/name/prefix + s3: + accesskey: awsaccesskey + secretkey: awssecretkey + region: us-west-1 + regionendpoint: http://myobjects.local + bucket: bucketname + encrypt: true + keyid: mykeyid + secure: true + v4auth: true + chunksize: 5242880 + rootdirectory: /s3/object/name/prefix + swift: + username: username + password: password + authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix + oss: + accesskeyid: accesskeyid + accesskeysecret: accesskeysecret + region: OSS region name + endpoint: optional endpoints + internal: optional internal endpoint + bucket: OSS bucket + encrypt: optional data encryption setting + secure: optional ssl setting + chunksize: optional size valye + rootdirectory: optional root directory + inmemory: + delete: + enabled: false + cache: + blobdescriptor: inmemory + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + redirect: + disable: false + +The storage option is **required** and defines which storage backend is in use. +You must configure one backend; if you configure more, the registry returns an error. You can choose any of these backend storage drivers: + +| Storage driver | Description +| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `filesystem` | Uses the local disk to store registry files. It is ideal for development and may be appropriate for some small-scale production applications. See the [driver's reference documentation](storage-drivers/filesystem.md). | +| `azure` | Uses Microsoft's Azure Blob Storage. See the [driver's reference documentation](storage-drivers/azure.md). | +| `gcs` | Uses Google Cloud Storage. See the [driver's reference documentation](storage-drivers/gcs.md). | +| `s3` | Uses Amazon's Simple Storage Service (S3) and compatible Storage Services. See the [driver's reference documentation](storage-drivers/s3.md). | +| `swift` | Uses Openstack Swift object storage. See the [driver's reference documentation](storage-drivers/swift.md). | +| `oss` | Uses Aliyun OSS for object storage. See the [driver's reference documentation](storage-drivers/oss.md). | + +For purely tests purposes, you can use the [`inmemory` storage +driver](storage-drivers/inmemory.md). If you would like to run a registry from +volatile memory, use the [`filesystem` driver](storage-drivers/filesystem.md) on +a ramdisk. + +If you are deploying a registry on Windows, be aware that a Windows volume +mounted from the host is not recommended. Instead, you can use a S3, or Azure, +backing data-store. If you do use a Windows volume, you must ensure that the +`PATH` to the mount point is within Windows' `MAX_PATH` limits (typically 255 +characters). Failure to do so can result in the following error message: + + mkdir /XXX protocol error and your registry will not function properly. + +### Maintenance + +Currently upload purging and read-only mode are the only maintenance functions available. +These and future maintenance functions which are related to storage can be configured under +the maintenance section. + +### Upload Purging + +Upload purging is a background process that periodically removes orphaned files from the upload +directories of the registry. Upload purging is enabled by default. To +configure upload directory purging, the following parameters +must be set. + + +| Parameter | Required | Description + --------- | -------- | ----------- +`enabled` | yes | Set to true to enable upload purging. Default=true. | +`age` | yes | Upload directories which are older than this age will be deleted. Default=168h (1 week) +`interval` | yes | The interval between upload directory purging. Default=24h. +`dryrun` | yes | dryrun can be set to true to obtain a summary of what directories will be deleted. Default=false. + +Note: `age` and `interval` are strings containing a number with optional fraction and a unit suffix: e.g. 45m, 2h10m, 168h (1 week). + +### Read-only mode + +If the `readonly` section under `maintenance` has `enabled` set to `true`, +clients will not be allowed to write to the registry. This mode is useful to +temporarily prevent writes to the backend storage so a garbage collection pass +can be run. Before running garbage collection, the registry should be +restarted with readonly's `enabled` set to true. After the garbage collection +pass finishes, the registry may be restarted again, this time with `readonly` +removed from the configuration (or set to false). + +### delete + +Use the `delete` subsection to enable the deletion of image blobs and manifests +by digest. It defaults to false, but it can be enabled by writing the following +on the configuration file: + + delete: + enabled: true + +### cache + +Use the `cache` subsection to enable caching of data accessed in the storage +backend. Currently, the only available cache provides fast access to layer +metadata. This, if configured, uses the `blobdescriptor` field. + +You can set `blobdescriptor` field to `redis` or `inmemory`. The `redis` value uses +a Redis pool to cache layer metadata. The `inmemory` value uses an in memory +map. + +>**NOTE**: Formerly, `blobdescriptor` was known as `layerinfo`. While these +>are equivalent, `layerinfo` has been deprecated, in favor or +>`blobdescriptor`. + +### redirect + +The `redirect` subsection provides configuration for managing redirects from +content backends. For backends that support it, redirecting is enabled by +default. Certain deployment scenarios may prefer to route all data through the +Registry, rather than redirecting to the backend. This may be more efficient +when using a backend that is not co-located or when a registry instance is +doing aggressive caching. + +Redirects can be disabled by adding a single flag `disable`, set to `true` +under the `redirect` section: + + redirect: + disable: true + + +## auth + + auth: + silly: + realm: silly-realm + service: silly-service + token: + realm: token-realm + service: token-service + issuer: registry-token-issuer + rootcertbundle: /root/certs/bundle + htpasswd: + realm: basic-realm + path: /path/to/htpasswd + +The `auth` option is **optional**. There are +currently 3 possible auth providers, `silly`, `token` and `htpasswd`. You can configure only +one `auth` provider. + +### silly + +The `silly` auth is only for development purposes. It simply checks for the +existence of the `Authorization` header in the HTTP request. It has no regard for +the header's value. If the header does not exist, the `silly` auth responds with a +challenge response, echoing back the realm, service, and scope that access was +denied for. + +The following values are used to configure the response: + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ realm + + yes + + The realm in which the registry server authenticates. +
+ service + + yes + + The service being authenticated. +
+ + + +### token + +Token based authentication allows the authentication system to be decoupled from +the registry. It is a well established authentication paradigm with a high +degree of security. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ realm + + yes + + The realm in which the registry server authenticates. +
+ service + + yes + + The service being authenticated. +
+ issuer + + yes + +The name of the token issuer. The issuer inserts this into +the token so it must match the value configured for the issuer. +
+ rootcertbundle + + yes + +The absolute path to the root certificate bundle. This bundle contains the +public part of the certificates that is used to sign authentication tokens. +
+ +For more information about Token based authentication configuration, see the [specification](spec/auth/token.md). + +### htpasswd + +The _htpasswd_ authentication backed allows one to configure basic auth using an +[Apache htpasswd +file](https://httpd.apache.org/docs/2.4/programs/htpasswd.html). Only +[`bcrypt`](http://en.wikipedia.org/wiki/Bcrypt) format passwords are supported. +Entries with other hash types will be ignored. The htpasswd file is loaded once, +at startup. If the file is invalid, the registry will display an error and will +not start. + +> __WARNING:__ This authentication scheme should only be used with TLS +> configured, since basic authentication sends passwords as part of the http +> header. + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ realm + + yes + + The realm in which the registry server authenticates. +
+ path + + yes + + Path to htpasswd file to load at startup. +
+ +## middleware + +The `middleware` option is **optional**. Use this option to inject middleware at +named hook points. All middleware must implement the same interface as the +object they're wrapping. This means a registry middleware must implement the +`distribution.Namespace` interface, repository middleware must implement +`distribution.Repository`, and storage middleware must implement +`driver.StorageDriver`. + +An example configuration of the `cloudfront` middleware, a storage middleware: + + middleware: + registry: + - name: ARegistryMiddleware + options: + foo: bar + repository: + - name: ARepositoryMiddleware + options: + foo: bar + storage: + - name: cloudfront + options: + baseurl: https://my.cloudfronted.domain.com/ + privatekey: /path/to/pem + keypairid: cloudfrontkeypairid + duration: 3000s + +Each middleware entry has `name` and `options` entries. The `name` must +correspond to the name under which the middleware registers itself. The +`options` field is a map that details custom configuration required to +initialize the middleware. It is treated as a `map[string]interface{}`. As such, +it supports any interesting structures desired, leaving it up to the middleware +initialization function to best determine how to handle the specific +interpretation of the options. + +### cloudfront + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ baseurl + + yes + + SCHEME://HOST[/PATH] at which Cloudfront is served. +
+ privatekey + + yes + + Private Key for Cloudfront provided by AWS. +
+ keypairid + + yes + + Key pair ID provided by AWS. +
+ duration + + no + + Specify a `duration` by providing an integer and a unit. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. For example, `3000s` is a valid duration; there should be no space between the integer and unit. If you do not specify a `duration` or specify an integer without a time unit, this defaults to 20 minutes. +
+ +### redirect + +In place of the `cloudfront` storage middleware, the `redirect` +storage middleware can be used to specify a custom URL to a location +of a proxy for the layer stored by the S3 storage driver. + +| Parameter | Required | Description | +| --- | --- | --- | +| baseurl | yes | `SCHEME://HOST` at which layers are served. Can also contain port. For example, `https://example.com:5443`. | + +## reporting + + reporting: + bugsnag: + apikey: bugsnagapikey + releasestage: bugsnagreleasestage + endpoint: bugsnagendpoint + newrelic: + licensekey: newreliclicensekey + name: newrelicname + verbose: true + +The `reporting` option is **optional** and configures error and metrics +reporting tools. At the moment only two services are supported, [New +Relic](http://newrelic.com/) and [Bugsnag](http://bugsnag.com), a valid +configuration may contain both. + +### bugsnag + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ apikey + + yes + + API Key provided by Bugsnag +
+ releasestage + + no + + Tracks where the registry is deployed, for example, + production,staging, or + development. +
+ endpoint + + no + + Specify the enterprise Bugsnag endpoint. +
+ + +### newrelic + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ licensekey + + yes + + License key provided by New Relic. +
+ name + + no + + New Relic application name. +
+ verbose + + no + + Enable New Relic debugging output on stdout. +
+ +## http + + http: + addr: localhost:5000 + net: tcp + prefix: /my/nested/registry/ + host: https://myregistryaddress.org:5000 + secret: asecretforlocaldevelopment + relativeurls: false + tls: + certificate: /path/to/x509/public + key: /path/to/x509/private + clientcas: + - /path/to/ca.pem + - /path/to/another/ca.pem + letsencrypt: + cachefile: /path/to/cache-file + email: emailused@letsencrypt.com + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] + +The `http` option details the configuration for the HTTP server that hosts the registry. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ addr + + yes + + The address for which the server should accept connections. The form depends on a network type (see net option): + HOST:PORT for tcp and FILE for a unix socket. +
+ net + + no + + The network which is used to create a listening socket. Known networks are unix and tcp. + The default empty value means tcp. +
+ prefix + + no + +If the server does not run at the root path use this value to specify the +prefix. The root path is the section before v2. It +should have both preceding and trailing slashes, for example /path/. +
+ host + + no + +This parameter specifies an externally-reachable address for the registry, as a +fully qualified URL. If present, it is used when creating generated URLs. +Otherwise, these URLs are derived from client requests. +
+ secret + + yes + +A random piece of data. This is used to sign state that may be stored with the +client to protect against tampering. For production environments you should generate a +random piece of data using a cryptographically secure random generator. This +configuration parameter may be omitted, in which case the registry will automatically +generate a secret at launch. +

+WARNING: If you are building a cluster of registries behind a load balancer, you MUST +ensure the secret is the same for all registries. +

+ relativeurls + + no + + Specifies that the registry should return relative URLs in Location headers. + The client is responsible for resolving the correct URL. This option is not + compatible with Docker 1.7 and earlier. +
+ + +### tls + +The `tls` struct within `http` is **optional**. Use this to configure TLS +for the server. If you already have a server such as Nginx or Apache running on +the same host as the registry, you may prefer to configure TLS termination there +and proxy connections to the registry server. + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ certificate + + yes + + Absolute path to x509 cert file +
+ key + + yes + + Absolute path to x509 private key file. +
+ clientcas + + no + + An array of absolute paths to an x509 CA file +
+ +### letsencrypt + +The `letsencrypt` struct within `tls` is **optional**. Use this to configure TLS +certificates provided by [Let's Encrypt](https://letsencrypt.org/how-it-works/). + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ cachefile + + yes + + Absolute path to a file for the Let's Encrypt agent to cache data +
+ email + + yes + + Email used to register with Let's Encrypt. +
+ +### debug + +The `debug` option is **optional** . Use it to configure a debug server that +can be helpful in diagnosing problems. The debug endpoint can be used for +monitoring registry metrics and health, as well as profiling. Sensitive +information may be available via the debug endpoint. Please be certain that +access to the debug endpoint is locked down in a production environment. + +The `debug` section takes a single, required `addr` parameter. This parameter +specifies the `HOST:PORT` on which the debug server should accept connections. + + +### headers + +The `headers` option is **optional** . Use it to specify headers that the HTTP +server should include in responses. This can be used for security headers such +as `Strict-Transport-Security`. + +The `headers` option should contain an option for each header to include, where +the parameter name is the header's name, and the parameter value a list of the +header's payload values. + +Including `X-Content-Type-Options: [nosniff]` is recommended, so that browsers +will not interpret content as HTML if they are directed to load a page from the +registry. This header is included in the example configuration files. + + +## notifications + + notifications: + endpoints: + - name: alistener + disabled: false + url: https://my.listener.com/event + headers: + timeout: 500 + threshold: 5 + backoff: 1000 + +The notifications option is **optional** and currently may contain a single +option, `endpoints`. + +### endpoints + +Endpoints is a list of named services (URLs) that can accept event notifications. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ name + + yes + +A human readable name for the service. +
+ disabled + + no + +A boolean to enable/disable notifications for a service. +
+ url + + yes + +The URL to which events should be published. +
+ headers + + yes + + Static headers to add to each request. Each header's name should be a key + underneath headers, and each value is a list of payloads for that + header name. Note that values must always be lists. +
+ timeout + + yes + + An HTTP timeout value. This field takes a positive integer and an optional + suffix indicating the unit of time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. +
+ threshold + + yes + + An integer specifying how long to wait before backing off a failure. +
+ backoff + + yes + + How long the system backs off before retrying. This field takes a positive + integer and an optional suffix indicating the unit of time. Possible units + are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. +
+ + +## redis + + redis: + addr: localhost:6379 + password: asecret + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + +Declare parameters for constructing the redis connections. Registry instances +may use the Redis instance for several applications. The current purpose is +caching information about immutable blobs. Most of the options below control +how the registry connects to redis. You can control the pool's behavior +with the [pool](#pool) subsection. + +It's advisable to configure Redis itself with the **allkeys-lru** eviction policy +as the registry does not set an expire value on keys. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ addr + + yes + + Address (host and port) of redis instance. +
+ password + + no + + A password used to authenticate to the redis instance. +
+ db + + no + + Selects the db for each connection. +
+ dialtimeout + + no + + Timeout for connecting to a redis instance. +
+ readtimeout + + no + + Timeout for reading from redis connections. +
+ writetimeout + + no + + Timeout for writing to redis connections. +
+ + +### pool + + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + +Configure the behavior of the Redis connection pool. + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ maxidle + + no + + Sets the maximum number of idle connections. +
+ maxactive + + no + + sets the maximum number of connections that should + be opened before blocking a connection request. +
+ idletimeout + + no + + sets the amount time to wait before closing + inactive connections. +
+ +## health + + health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + file: + - file: /path/to/checked/file + interval: 10s + http: + - uri: http://server.to.check/must/return/200 + headers: + Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] + statuscode: 200 + timeout: 3s + interval: 10s + threshold: 3 + tcp: + - addr: redis-server.domain.com:6379 + timeout: 3s + interval: 10s + threshold: 3 + +The health option is **optional**. It may contain preferences for a periodic +health check on the storage driver's backend storage, and optional periodic +checks on local files, HTTP URIs, and/or TCP servers. The results of the health +checks are available at /debug/health on the debug HTTP server if the debug +HTTP server is enabled (see http section). + +### storagedriver + +storagedriver contains options for a health check on the configured storage +driver's backend storage. enabled must be set to true for this health check to +be active. + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ enabled + + yes + +"true" to enable the storage driver health check or "false" to disable it. +
+ interval + + no + + The length of time to wait between repetitions of the check. This field + takes a positive integer and an optional suffix indicating the unit of + time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. + The default value is 10 seconds if this field is omitted. +
+ threshold + + no + + An integer specifying the number of times the check must fail before the + check triggers an unhealthy state. If this filed is not specified, a + single failure will trigger an unhealthy state. +
+ +### file + +file is a list of paths to be periodically checked for the existence of a file. +If a file exists at the given path, the health check will fail. This can be +used as a way of bringing a registry out of rotation by creating a file. + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ file + + yes + +The path to check for the existence of a file. +
+ interval + + no + + The length of time to wait between repetitions of the check. This field + takes a positive integer and an optional suffix indicating the unit of + time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. + The default value is 10 seconds if this field is omitted. +
+ +### http + +http is a list of HTTP URIs to be periodically checked with HEAD requests. If +a HEAD request doesn't complete or returns an unexpected status code, the +health check will fail. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ uri + + yes + +The URI to check. +
+ headers + + no + + Static headers to add to each request. Each header's name should be a key + underneath headers, and each value is a list of payloads for that + header name. Note that values must always be lists. +
+ statuscode + + no + +Expected status code from the HTTP URI. Defaults to 200. +
+ timeout + + no + + The length of time to wait before timing out the HTTP request. This field + takes a positive integer and an optional suffix indicating the unit of + time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. +
+ interval + + no + + The length of time to wait between repetitions of the check. This field + takes a positive integer and an optional suffix indicating the unit of + time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. + The default value is 10 seconds if this field is omitted. +
+ threshold + + no + + An integer specifying the number of times the check must fail before the + check triggers an unhealthy state. If this filed is not specified, a + single failure will trigger an unhealthy state. +
+ +### tcp + +tcp is a list of TCP addresses to be periodically checked with connection +attempts. The addresses must include port numbers. If a connection attempt +fails, the health check will fail. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ addr + + yes + +The TCP address to connect to, including a port number. +
+ timeout + + no + + The length of time to wait before timing out the TCP connection. This + field takes a positive integer and an optional suffix indicating the unit + of time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. +
+ interval + + no + + The length of time to wait between repetitions of the check. This field + takes a positive integer and an optional suffix indicating the unit of + time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. + The default value is 10 seconds if this field is omitted. +
+ threshold + + no + + An integer specifying the number of times the check must fail before the + check triggers an unhealthy state. If this filed is not specified, a + single failure will trigger an unhealthy state. +
+ +## Proxy + + proxy: + remoteurl: https://registry-1.docker.io + username: [username] + password: [password] + +Proxy enables a registry to be configured as a pull through cache to the official Docker Hub. See [mirror](recipes/mirror.md) for more information. Pushing to a registry configured as a pull through cache is currently unsupported. + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ remoteurl + + yes + + The URL of the official Docker Hub +
+ username + + no + + The username of the Docker Hub account +
+ password + + no + + The password for the official Docker Hub account +
+ +To enable pulling private repositories (e.g. `batman/robin`) a username and password for user `batman` must be specified. Note: These private repositories will be stored in the proxy cache's storage and relevant measures should be taken to protect access to this. + +## Compatibility + + compatibility: + schema1: + signingkeyfile: /etc/registry/key.json + +Configure handling of older and deprecated features. Each subsection +defines a such a feature with configurable behavior. + +### Schema1 + + + + + + + + + + + + +
ParameterRequiredDescription
+ signingkeyfile + + no + + The signing private key used for adding signatures to schema1 manifests. + If no signing key is provided, a new ECDSA key will be generated on + startup. +
+ +## Example: Development configuration + +The following is a simple example you can use for local development: + + version: 0.1 + log: + level: debug + storage: + filesystem: + rootdirectory: /var/lib/registry + http: + addr: localhost:5000 + secret: asecretforlocaldevelopment + debug: + addr: localhost:5001 + +The above configures the registry instance to run on port `5000`, binding to +`localhost`, with the `debug` server enabled. Registry data storage is in the +`/var/lib/registry` directory. Logging is in `debug` mode, which is the most +verbose. + +A similar simple configuration is available at +[config-example.yml](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). +Both are generally useful for local development. + + +## Example: Middleware configuration + +This example illustrates how to configure storage middleware in a registry. +Middleware allows the registry to serve layers via a content delivery network +(CDN). This is useful for reducing requests to the storage layer. + +The registry supports [Amazon +Cloudfront](http://aws.amazon.com/cloudfront/). You can only use Cloudfront in +conjunction with the S3 storage driver. + + + + + + + + + + + + + + + + + + +
ParameterDescription
nameThe storage middleware name. Currently cloudfront is an accepted value.
disabledSet to false to easily disable the middleware.
options: + A set of key/value options to configure the middleware. +
    +
  • baseurl: The Cloudfront base URL.
  • +
  • privatekey: The location of your AWS private key on the filesystem.
  • +
  • keypairid: The ID of your Cloudfront keypair.
  • +
  • duration: The duration in minutes for which the URL is valid. Default is 20.
  • +
+
+ +The following example illustrates these values: + + middleware: + storage: + - name: cloudfront + disabled: false + options: + baseurl: http://d111111abcdef8.cloudfront.net + privatekey: /path/to/asecret.pem + keypairid: asecret + duration: 60 + + +>**Note**: Cloudfront keys exist separately to other AWS keys. See +>[the documentation on AWS credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) +>for more information. diff --git a/docs/deploying.md b/docs/deploying.md new file mode 100644 index 00000000..2e8ce69e --- /dev/null +++ b/docs/deploying.md @@ -0,0 +1,237 @@ + + +# Deploying a registry server + +You need to [install Docker version 1.6.0 or newer](/engine/installation/index.md). + +## Running on localhost + +Start your registry: + + docker run -d -p 5000:5000 --restart=always --name registry registry:2 + +You can now use it with docker. + +Get any image from the hub and tag it to point to your registry: + + docker pull ubuntu && docker tag ubuntu localhost:5000/ubuntu + +... then push it to your registry: + + docker push localhost:5000/ubuntu + +... then pull it back from your registry: + + docker pull localhost:5000/ubuntu + +To stop your registry, you would: + + docker stop registry && docker rm -v registry + +## Storage + +By default, your registry data is persisted as a [docker volume](/engine/tutorials/dockervolumes.md) on the host filesystem. Properly understanding volumes is essential if you want to stick with a local filesystem storage. + +Specifically, you might want to point your volume location to a specific place in order to more easily access your registry data. To do so you can: + + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/data:/var/lib/registry \ + registry:2 + +### Alternatives + +You should usually consider using [another storage backend](./storage-drivers/index.md) instead of the local filesystem. Use the [storage configuration options](./configuration.md#storage) to configure an alternate storage backend. + +Using one of these will allow you to more easily scale your registry, and leverage your storage redundancy and availability features. + +## Running a domain registry + +While running on `localhost` has its uses, most people want their registry to be more widely available. To do so, the Docker engine requires you to secure it using TLS, which is conceptually very similar to configuring your web server with SSL. + +### Get a certificate + +Assuming that you own the domain `myregistrydomain.com`, and that its DNS record points to the host where you are running your registry, you first need to get a certificate from a CA. + +Create a `certs` directory: + + mkdir -p certs + +Then move and/or rename your crt file to: `certs/domain.crt`, and your key file to: `certs/domain.key`. + +Make sure you stopped your registry from the previous steps, then start your registry again with TLS enabled: + + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/certs:/certs \ + -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ + -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ + registry:2 + +You should now be able to access your registry from another docker host: + + docker pull ubuntu + docker tag ubuntu myregistrydomain.com:5000/ubuntu + docker push myregistrydomain.com:5000/ubuntu + docker pull myregistrydomain.com:5000/ubuntu + +#### Gotcha + +A certificate issuer may supply you with an *intermediate* certificate. In this case, you must combine your certificate with the intermediate's to form a *certificate bundle*. You can do this using the `cat` command: + + cat domain.crt intermediate-certificates.pem > certs/domain.crt + +### Let's Encrypt + +The registry supports using Let's Encrypt to automatically obtain a browser-trusted certificate. For more +information on Let's Encrypt, see [https://letsencrypt.org/how-it-works/](https://letsencrypt.org/how-it-works/) and the relevant section of the [registry configuration](configuration.md#letsencrypt). + +### Alternatives + +While rarely advisable, you may want to use self-signed certificates instead, or use your registry in an insecure fashion. You will find instructions [here](insecure.md). + +## Load Balancing Considerations + +One may want to use a load balancer to distribute load, terminate TLS or +provide high availability. While a full load balancing setup is outside the +scope of this document, there are a few considerations that can make the process +smoother. + +The most important aspect is that a load balanced cluster of registries must +share the same resources. For the current version of the registry, this means +the following must be the same: + + - Storage Driver + - HTTP Secret + - Redis Cache (if configured) + +If any of these are different, the registry will have trouble serving requests. +As an example, if you're using the filesystem driver, all registry instances +must have access to the same filesystem root, which means they should be in +the same machine. For other drivers, such as s3 or azure, they should be +accessing the same resource, and will likely share an identical configuration. +The _HTTP Secret_ coordinates uploads, so also must be the same across +instances. Configuring different redis instances will work (at the time +of writing), but will not be optimal if the instances are not shared, causing +more requests to be directed to the backend. + +#### Important/Required HTTP-Headers +Getting the headers correct is very important. For all responses to any +request under the "/v2/" url space, the `Docker-Distribution-API-Version` +header should be set to the value "registry/2.0", even for a 4xx response. +This header allows the docker engine to quickly resolve authentication realms +and fallback to version 1 registries, if necessary. Confirming this is setup +correctly can help avoid problems with fallback. + +In the same train of thought, you must make sure you are properly sending the +`X-Forwarded-Proto`, `X-Forwarded-For` and `Host` headers to their "client-side" +values. Failure to do so usually makes the registry issue redirects to internal +hostnames or downgrading from https to http. + +A properly secured registry should return 401 when the "/v2/" endpoint is hit +without credentials. The response should include a `WWW-Authenticate` +challenge, providing guidance on how to authenticate, such as with basic auth +or a token service. If the load balancer has health checks, it is recommended +to configure it to consider a 401 response as healthy and any other as down. +This will secure your registry by ensuring that configuration problems with +authentication don't accidentally expose an unprotected registry. If you're +using a less sophisticated load balancer, such as Amazon's Elastic Load +Balancer, that doesn't allow one to change the healthy response code, health +checks can be directed at "/", which will always return a `200 OK` response. + +## Restricting access + +Except for registries running on secure local networks, registries should always implement access restrictions. + +### Native basic auth + +The simplest way to achieve access restriction is through basic authentication (this is very similar to other web servers' basic authentication mechanism). + +> **Warning**: You **cannot** use authentication with an insecure registry. You have to [configure TLS first](#running-a-domain-registry) for this to work. + +First create a password file with one entry for the user "testuser", with password "testpassword": + + mkdir auth + docker run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > auth/htpasswd + +Make sure you stopped your registry from the previous step, then start it again: + + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/auth:/auth \ + -e "REGISTRY_AUTH=htpasswd" \ + -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \ + -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \ + -v `pwd`/certs:/certs \ + -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ + -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ + registry:2 + +You should now be able to: + + docker login myregistrydomain.com:5000 + +And then push and pull images as an authenticated user. + +#### Gotcha + +Seeing X509 errors is usually a sign you are trying to use self-signed certificates, and failed to [configure your docker daemon properly](insecure.md). + +### Alternatives + +1. You may want to leverage more advanced basic auth implementations through a proxy design, in front of the registry. You will find examples of such patterns in the [recipes list](recipes/index.md). + +2. Alternatively, the Registry also supports delegated authentication, redirecting users to a specific, trusted token server. That approach requires significantly more investment, and only makes sense if you want to fully configure ACLs and more control over the Registry integration into your global authorization and authentication systems. + +You will find [background information here](spec/auth/token.md), and [configuration information here](configuration.md#auth). + +Beware that you will have to implement your own authentication service for this to work, or leverage a third-party implementation. + +## Managing with Compose + +As your registry configuration grows more complex, dealing with it can quickly become tedious. + +It's highly recommended to use [Docker Compose](/compose/index.md) to facilitate operating your registry. + +Here is a simple `docker-compose.yml` example that condenses everything explained so far: + +``` +registry: + restart: always + image: registry:2 + ports: + - 5000:5000 + environment: + REGISTRY_HTTP_TLS_CERTIFICATE: /certs/domain.crt + REGISTRY_HTTP_TLS_KEY: /certs/domain.key + REGISTRY_AUTH: htpasswd + REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd + REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm + volumes: + - /path/data:/var/lib/registry + - /path/certs:/certs + - /path/auth:/auth +``` + +> **Warning**: replace `/path` by whatever directory that holds your `certs` and `auth` folder from above. + +You can then start your registry with a simple + + docker-compose up -d + +## Next + +You will find more specific and advanced informations in the following sections: + + - [Configuration reference](configuration.md) + - [Working with notifications](notifications.md) + - [Advanced "recipes"](recipes/index.md) + - [Registry API](spec/api.md) + - [Storage driver model](storage-drivers/index.md) + - [Token authentication](spec/auth/token.md) diff --git a/docs/deprecated.md b/docs/deprecated.md new file mode 100644 index 00000000..73bde497 --- /dev/null +++ b/docs/deprecated.md @@ -0,0 +1,27 @@ + + +# Docker Registry Deprecation + +This document details functionality or components which are deprecated within +the registry. + +### v2.5.0 + +The signature store has been removed from the registry. Since `v2.4.0` it has +been possible to configure the registry to generate manifest signatures rather +than load them from storage. In this version of the registry this becomes +the default behavior. Signatures which are attached to manifests on put are +not stored in the registry. This does not alter the functional behavior of +the registry. + +Old signatures blobs can be removed from the registry storage by running the +garbage-collect subcommand. diff --git a/docs/doc.go b/docs/doc.go deleted file mode 100644 index a1ba7f3a..00000000 --- a/docs/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package registry provides the main entrypoints for running a registry. -package registry diff --git a/docs/garbage-collection.md b/docs/garbage-collection.md new file mode 100644 index 00000000..2d03e787 --- /dev/null +++ b/docs/garbage-collection.md @@ -0,0 +1,137 @@ + + +# Garbage Collection + +As of v2.4.0 a garbage collector command is included within the registry binary. +This document describes what this command does and how and why it should be used. + +## What is Garbage Collection? + +From [wikipedia](https://en.wikipedia.org/wiki/Garbage_collection_(computer_science)): + +"In computer science, garbage collection (GC) is a form of automatic memory management. The +garbage collector, or just collector, attempts to reclaim garbage, or memory occupied by +objects that are no longer in use by the program." + +In the context of the Docker registry, garbage collection is the process of +removing blobs from the filesystem which are no longer referenced by a +manifest. Blobs can include both layers and manifests. + + +## Why Garbage Collection? + +Registry data can occupy considerable amounts of disk space and freeing up +this disk space is an oft-requested feature. Additionally for reasons of security it +can be desirable to ensure that certain layers no longer exist on the filesystem. + + +## Garbage Collection in the Registry + +Filesystem layers are stored by their content address in the Registry. This +has many advantages, one of which is that data is stored once and referred to by manifests. +See [here](compatibility.md#content-addressable-storage-cas) for more details. + +Layers are therefore shared amongst manifests; each manifest maintains a reference +to the layer. As long as a layer is referenced by one manifest, it cannot be garbage +collected. + +Manifests and layers can be 'deleted` with the registry API (refer to the API +documentation [here](spec/api.md#deleting-a-layer) and +[here](spec/api.md#deleting-an-image) for details). This API removes references +to the target and makes them eligible for garbage collection. It also makes them +unable to be read via the API. + +If a layer is deleted it will be removed from the filesystem when garbage collection +is run. If a manifest is deleted the layers to which it refers will be removed from +the filesystem if no other manifests refers to them. + + +### Example + +In this example manifest A references two layers: `a` and `b`. Manifest `B` references +layers `a` and `c`. In this state, nothing is eligible for garbage collection: + +``` +A -----> a <----- B + \--> b | + c <--/ +``` + +Manifest B is deleted via the API: + +``` +A -----> a B + \--> b + c +``` + +In this state layer `c` no longer has a reference and is eligible for garbage +collection. Layer `a` had one reference removed but will not be garbage +collected as it is still referenced by manifest `A`. The blob representing +manifest `B` will also be eligible for garbage collection. + +After garbage collection has been run manifest `A` and its blobs remain. + +``` +A -----> a + \--> b +``` + + +## How Garbage Collection works + +Garbage collection runs in two phases. First, in the 'mark' phase, the process +scans all the manifests in the registry. From these manifests, it constructs a +set of content address digests. This set is the 'mark set' and denotes the set +of blobs to *not* delete. Secondly, in the 'sweep' phase, the process scans all +the blobs and if a blob's content address digest is not in the mark set, the +process will delete it. + + +> **NOTE** You should ensure that the registry is in read-only mode or not running at +> all. If you were to upload an image while garbage collection is running, there is the +> risk that the image's layers will be mistakenly deleted, leading to a corrupted image. + +This type of garbage collection is known as stop-the-world garbage collection. In future +registry versions the intention is that garbage collection will be an automated background +action and this manual process will no longer apply. + + + +# Running garbage collection + +Garbage collection can be run as follows + +`bin/registry garbage-collect [--dry-run] /path/to/config.yml` + +The garbage-collect command accepts a `--dry-run` parameter, which will print the progress +of the mark and sweep phases without removing any data. Running with a log leve of `info` +will give a clear indication of what will and will not be deleted. + +_Sample output from a dry run garbage collection with registry log level set to `info`_ + +``` +hello-world +hello-world: marking manifest sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf +hello-world: marking blob sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb +hello-world: marking blob sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4 +hello-world: marking configuration sha256:690ed74de00f99a7d00a98a5ad855ac4febd66412be132438f9b8dbd300a937d +ubuntu + +4 blobs marked, 5 blobs eligible for deletion +blob eligible for deletion: sha256:28e09fddaacbfc8a13f82871d9d66141a6ed9ca526cb9ed295ef545ab4559b81 +blob eligible for deletion: sha256:7e15ce58ccb2181a8fced7709e9893206f0937cc9543bc0c8178ea1cf4d7e7b5 +blob eligible for deletion: sha256:87192bdbe00f8f2a62527f36bb4c7c7f4eaf9307e4b87e8334fb6abec1765bcb +blob eligible for deletion: sha256:b549a9959a664038fc35c155a95742cf12297672ca0ae35735ec027d55bf4e97 +blob eligible for deletion: sha256:f251d679a7c61455f06d793e43c06786d7766c88b8c24edf242b2c08e3c3f599 +``` + diff --git a/docs/glossary.md b/docs/glossary.md new file mode 100644 index 00000000..8159b520 --- /dev/null +++ b/docs/glossary.md @@ -0,0 +1,70 @@ + + +# Glossary + +This page contains definitions for distribution related terms. + +
+

Blob

+
+
A blob is any kind of content that is stored by a Registry under a content-addressable identifier (a "digest").
+

+ Layers are a good example of "blobs". +

+
+ +

Image

+
+
An image is a named set of immutable data from which a Docker container can be created.
+

+ An image is represented by a json file called a manifest, and is conceptually a set of layers. + + Image names indicate the location where they can be pulled from and pushed to, as they usually start with a registry domain name and port. + +

+
+ +

Layer

+
+
A layer is a tar archive bundling partial content from a filesystem.
+

+ Layers from an image are usually extracted in order on top of each other to make up a root filesystem from which containers run out. +

+
+ +

Manifest

+
A manifest is the JSON representation of an image.
+ +

Namespace

+
A namespace is a collection of repositories with a common name prefix.
+

+ The namespace with an empty prefix is considered the Global Namespace. +

+
+ +

Registry

+
A registry is a service that let you store and deliver images.
+
+ +

Repository

+
+
A repository is a set of data containing all versions of a given image.
+
+ +

Scope

+
A scope is the portion of a namespace onto which a given authorization token is granted.
+ +

Tag

+
A tag is conceptually a "version" of a named image.
+

+ Example: `docker pull myimage:latest` instructs docker to pull the image "myimage" in version "latest". +

+ +
+ + +
diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go deleted file mode 100644 index 3bc18c76..00000000 --- a/docs/handlers/api_test.go +++ /dev/null @@ -1,2474 +0,0 @@ -package handlers - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/http/httputil" - "net/url" - "os" - "path" - "reflect" - "regexp" - "strconv" - "strings" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - _ "github.com/docker/distribution/registry/storage/driver/testdriver" - "github.com/docker/distribution/testutil" - "github.com/docker/libtrust" - "github.com/gorilla/handlers" -) - -var headerConfig = http.Header{ - "X-Content-Type-Options": []string{"nosniff"}, -} - -// TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified -// 200 OK response. -func TestCheckAPI(t *testing.T) { - env := newTestEnv(t, false) - baseURL, err := env.builder.BuildBaseURL() - if err != nil { - t.Fatalf("unexpected error building base url: %v", err) - } - - resp, err := http.Get(baseURL) - if err != nil { - t.Fatalf("unexpected error issuing request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "issuing api base check", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Type": []string{"application/json; charset=utf-8"}, - "Content-Length": []string{"2"}, - }) - - p, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("unexpected error reading response body: %v", err) - } - - if string(p) != "{}" { - t.Fatalf("unexpected response body: %v", string(p)) - } -} - -// TestCatalogAPI tests the /v2/_catalog endpoint -func TestCatalogAPI(t *testing.T) { - chunkLen := 2 - env := newTestEnv(t, false) - - values := url.Values{ - "last": []string{""}, - "n": []string{strconv.Itoa(chunkLen)}} - - catalogURL, err := env.builder.BuildCatalogURL(values) - if err != nil { - t.Fatalf("unexpected error building catalog url: %v", err) - } - - // ----------------------------------- - // try to get an empty catalog - resp, err := http.Get(catalogURL) - if err != nil { - t.Fatalf("unexpected error issuing request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "issuing catalog api check", resp, http.StatusOK) - - var ctlg struct { - Repositories []string `json:"repositories"` - } - - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&ctlg); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - // we haven't pushed anything to the registry yet - if len(ctlg.Repositories) != 0 { - t.Fatalf("repositories has unexpected values") - } - - if resp.Header.Get("Link") != "" { - t.Fatalf("repositories has more data when none expected") - } - - // ----------------------------------- - // push something to the registry and try again - images := []string{"foo/aaaa", "foo/bbbb", "foo/cccc"} - - for _, image := range images { - createRepository(env, t, image, "sometag") - } - - resp, err = http.Get(catalogURL) - if err != nil { - t.Fatalf("unexpected error issuing request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "issuing catalog api check", resp, http.StatusOK) - - dec = json.NewDecoder(resp.Body) - if err = dec.Decode(&ctlg); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - if len(ctlg.Repositories) != chunkLen { - t.Fatalf("repositories has unexpected values") - } - - for _, image := range images[:chunkLen] { - if !contains(ctlg.Repositories, image) { - t.Fatalf("didn't find our repository '%s' in the catalog", image) - } - } - - link := resp.Header.Get("Link") - if link == "" { - t.Fatalf("repositories has less data than expected") - } - - newValues := checkLink(t, link, chunkLen, ctlg.Repositories[len(ctlg.Repositories)-1]) - - // ----------------------------------- - // get the last chunk of data - - catalogURL, err = env.builder.BuildCatalogURL(newValues) - if err != nil { - t.Fatalf("unexpected error building catalog url: %v", err) - } - - resp, err = http.Get(catalogURL) - if err != nil { - t.Fatalf("unexpected error issuing request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "issuing catalog api check", resp, http.StatusOK) - - dec = json.NewDecoder(resp.Body) - if err = dec.Decode(&ctlg); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - if len(ctlg.Repositories) != 1 { - t.Fatalf("repositories has unexpected values") - } - - lastImage := images[len(images)-1] - if !contains(ctlg.Repositories, lastImage) { - t.Fatalf("didn't find our repository '%s' in the catalog", lastImage) - } - - link = resp.Header.Get("Link") - if link != "" { - t.Fatalf("catalog has unexpected data") - } -} - -func checkLink(t *testing.T, urlStr string, numEntries int, last string) url.Values { - re := regexp.MustCompile("<(/v2/_catalog.*)>; rel=\"next\"") - matches := re.FindStringSubmatch(urlStr) - - if len(matches) != 2 { - t.Fatalf("Catalog link address response was incorrect") - } - linkURL, _ := url.Parse(matches[1]) - urlValues := linkURL.Query() - - if urlValues.Get("n") != strconv.Itoa(numEntries) { - t.Fatalf("Catalog link entry size is incorrect") - } - - if urlValues.Get("last") != last { - t.Fatal("Catalog link last entry is incorrect") - } - - return urlValues -} - -func contains(elems []string, e string) bool { - for _, elem := range elems { - if elem == e { - return true - } - } - return false -} - -func TestURLPrefix(t *testing.T) { - config := configuration.Configuration{ - Storage: configuration.Storage{ - "testdriver": configuration.Parameters{}, - }, - } - config.HTTP.Prefix = "/test/" - config.HTTP.Headers = headerConfig - - env := newTestEnvWithConfig(t, &config) - - baseURL, err := env.builder.BuildBaseURL() - if err != nil { - t.Fatalf("unexpected error building base url: %v", err) - } - - parsed, _ := url.Parse(baseURL) - if !strings.HasPrefix(parsed.Path, config.HTTP.Prefix) { - t.Fatalf("Prefix %v not included in test url %v", config.HTTP.Prefix, baseURL) - } - - resp, err := http.Get(baseURL) - if err != nil { - t.Fatalf("unexpected error issuing request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "issuing api base check", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Type": []string{"application/json; charset=utf-8"}, - "Content-Length": []string{"2"}, - }) -} - -type blobArgs struct { - imageName reference.Named - layerFile io.ReadSeeker - layerDigest digest.Digest -} - -func makeBlobArgs(t *testing.T) blobArgs { - layerFile, layerDigest, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating random layer file: %v", err) - } - - args := blobArgs{ - layerFile: layerFile, - layerDigest: layerDigest, - } - args.imageName, _ = reference.ParseNamed("foo/bar") - return args -} - -// TestBlobAPI conducts a full test of the of the blob api. -func TestBlobAPI(t *testing.T) { - deleteEnabled := false - env := newTestEnv(t, deleteEnabled) - args := makeBlobArgs(t) - testBlobAPI(t, env, args) - - deleteEnabled = true - env = newTestEnv(t, deleteEnabled) - args = makeBlobArgs(t) - testBlobAPI(t, env, args) - -} - -func TestBlobDelete(t *testing.T) { - deleteEnabled := true - env := newTestEnv(t, deleteEnabled) - - args := makeBlobArgs(t) - env = testBlobAPI(t, env, args) - testBlobDelete(t, env, args) -} - -func TestRelativeURL(t *testing.T) { - config := configuration.Configuration{ - Storage: configuration.Storage{ - "testdriver": configuration.Parameters{}, - }, - } - config.HTTP.Headers = headerConfig - config.HTTP.RelativeURLs = false - env := newTestEnvWithConfig(t, &config) - ref, _ := reference.WithName("foo/bar") - uploadURLBaseAbs, _ := startPushLayer(t, env, ref) - - u, err := url.Parse(uploadURLBaseAbs) - if err != nil { - t.Fatal(err) - } - if !u.IsAbs() { - t.Fatal("Relative URL returned from blob upload chunk with non-relative configuration") - } - - args := makeBlobArgs(t) - resp, err := doPushLayer(t, env.builder, ref, args.layerDigest, uploadURLBaseAbs, args.layerFile) - if err != nil { - t.Fatalf("unexpected error doing layer push relative url: %v", err) - } - checkResponse(t, "relativeurl blob upload", resp, http.StatusCreated) - u, err = url.Parse(resp.Header.Get("Location")) - if err != nil { - t.Fatal(err) - } - if !u.IsAbs() { - t.Fatal("Relative URL returned from blob upload with non-relative configuration") - } - - config.HTTP.RelativeURLs = true - args = makeBlobArgs(t) - uploadURLBaseRelative, _ := startPushLayer(t, env, ref) - u, err = url.Parse(uploadURLBaseRelative) - if err != nil { - t.Fatal(err) - } - if u.IsAbs() { - t.Fatal("Absolute URL returned from blob upload chunk with relative configuration") - } - - // Start a new upload in absolute mode to get a valid base URL - config.HTTP.RelativeURLs = false - uploadURLBaseAbs, _ = startPushLayer(t, env, ref) - u, err = url.Parse(uploadURLBaseAbs) - if err != nil { - t.Fatal(err) - } - if !u.IsAbs() { - t.Fatal("Relative URL returned from blob upload chunk with non-relative configuration") - } - - // Complete upload with relative URLs enabled to ensure the final location is relative - config.HTTP.RelativeURLs = true - resp, err = doPushLayer(t, env.builder, ref, args.layerDigest, uploadURLBaseAbs, args.layerFile) - if err != nil { - t.Fatalf("unexpected error doing layer push relative url: %v", err) - } - - checkResponse(t, "relativeurl blob upload", resp, http.StatusCreated) - u, err = url.Parse(resp.Header.Get("Location")) - if err != nil { - t.Fatal(err) - } - if u.IsAbs() { - t.Fatal("Relative URL returned from blob upload with non-relative configuration") - } -} - -func TestBlobDeleteDisabled(t *testing.T) { - deleteEnabled := false - env := newTestEnv(t, deleteEnabled) - args := makeBlobArgs(t) - - imageName := args.imageName - layerDigest := args.layerDigest - ref, _ := reference.WithDigest(imageName, layerDigest) - layerURL, err := env.builder.BuildBlobURL(ref) - if err != nil { - t.Fatalf("error building url: %v", err) - } - - resp, err := httpDelete(layerURL) - if err != nil { - t.Fatalf("unexpected error deleting when disabled: %v", err) - } - - checkResponse(t, "status of disabled delete", resp, http.StatusMethodNotAllowed) -} - -func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { - // TODO(stevvooe): This test code is complete junk but it should cover the - // complete flow. This must be broken down and checked against the - // specification *before* we submit the final to docker core. - imageName := args.imageName - layerFile := args.layerFile - layerDigest := args.layerDigest - - // ----------------------------------- - // Test fetch for non-existent content - ref, _ := reference.WithDigest(imageName, layerDigest) - layerURL, err := env.builder.BuildBlobURL(ref) - if err != nil { - t.Fatalf("error building url: %v", err) - } - - resp, err := http.Get(layerURL) - if err != nil { - t.Fatalf("unexpected error fetching non-existent layer: %v", err) - } - - checkResponse(t, "fetching non-existent content", resp, http.StatusNotFound) - - // ------------------------------------------ - // Test head request for non-existent content - resp, err = http.Head(layerURL) - if err != nil { - t.Fatalf("unexpected error checking head on non-existent layer: %v", err) - } - - checkResponse(t, "checking head on non-existent layer", resp, http.StatusNotFound) - - // ------------------------------------------ - // Start an upload, check the status then cancel - uploadURLBase, uploadUUID := startPushLayer(t, env, imageName) - - // A status check should work - resp, err = http.Get(uploadURLBase) - if err != nil { - t.Fatalf("unexpected error getting upload status: %v", err) - } - checkResponse(t, "status of deleted upload", resp, http.StatusNoContent) - checkHeaders(t, resp, http.Header{ - "Location": []string{"*"}, - "Range": []string{"0-0"}, - "Docker-Upload-UUID": []string{uploadUUID}, - }) - - req, err := http.NewRequest("DELETE", uploadURLBase, nil) - if err != nil { - t.Fatalf("unexpected error creating delete request: %v", err) - } - - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("unexpected error sending delete request: %v", err) - } - - checkResponse(t, "deleting upload", resp, http.StatusNoContent) - - // A status check should result in 404 - resp, err = http.Get(uploadURLBase) - if err != nil { - t.Fatalf("unexpected error getting upload status: %v", err) - } - checkResponse(t, "status of deleted upload", resp, http.StatusNotFound) - - // ----------------------------------------- - // Do layer push with an empty body and different digest - uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) - resp, err = doPushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) - if err != nil { - t.Fatalf("unexpected error doing bad layer push: %v", err) - } - - checkResponse(t, "bad layer push", resp, http.StatusBadRequest) - checkBodyHasErrorCodes(t, "bad layer push", resp, v2.ErrorCodeDigestInvalid) - - // ----------------------------------------- - // Do layer push with an empty body and correct digest - zeroDigest, err := digest.FromReader(bytes.NewReader([]byte{})) - if err != nil { - t.Fatalf("unexpected error digesting empty buffer: %v", err) - } - - uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) - pushLayer(t, env.builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{})) - - // ----------------------------------------- - // Do layer push with an empty body and correct digest - - // This is a valid but empty tarfile! - emptyTar := bytes.Repeat([]byte("\x00"), 1024) - emptyDigest, err := digest.FromReader(bytes.NewReader(emptyTar)) - if err != nil { - t.Fatalf("unexpected error digesting empty tar: %v", err) - } - - uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) - pushLayer(t, env.builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar)) - - // ------------------------------------------ - // Now, actually do successful upload. - layerLength, _ := layerFile.Seek(0, os.SEEK_END) - layerFile.Seek(0, os.SEEK_SET) - - uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) - pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) - - // ------------------------------------------ - // Now, push just a chunk - layerFile.Seek(0, 0) - - canonicalDigester := digest.Canonical.New() - if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { - t.Fatalf("error copying to digest: %v", err) - } - canonicalDigest := canonicalDigester.Digest() - - layerFile.Seek(0, 0) - uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) - uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength) - finishUpload(t, env.builder, imageName, uploadURLBase, dgst) - - // ------------------------ - // Use a head request to see if the layer exists. - resp, err = http.Head(layerURL) - if err != nil { - t.Fatalf("unexpected error checking head on existing layer: %v", err) - } - - checkResponse(t, "checking head on existing layer", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{fmt.Sprint(layerLength)}, - "Docker-Content-Digest": []string{canonicalDigest.String()}, - }) - - // ---------------- - // Fetch the layer! - resp, err = http.Get(layerURL) - if err != nil { - t.Fatalf("unexpected error fetching layer: %v", err) - } - - checkResponse(t, "fetching layer", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{fmt.Sprint(layerLength)}, - "Docker-Content-Digest": []string{canonicalDigest.String()}, - }) - - // Verify the body - verifier, err := digest.NewDigestVerifier(layerDigest) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - io.Copy(verifier, resp.Body) - - if !verifier.Verified() { - t.Fatalf("response body did not pass verification") - } - - // ---------------- - // Fetch the layer with an invalid digest - badURL := strings.Replace(layerURL, "sha256", "sha257", 1) - resp, err = http.Get(badURL) - if err != nil { - t.Fatalf("unexpected error fetching layer: %v", err) - } - - checkResponse(t, "fetching layer bad digest", resp, http.StatusBadRequest) - - // Cache headers - resp, err = http.Get(layerURL) - if err != nil { - t.Fatalf("unexpected error fetching layer: %v", err) - } - - checkResponse(t, "fetching layer", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{fmt.Sprint(layerLength)}, - "Docker-Content-Digest": []string{canonicalDigest.String()}, - "ETag": []string{fmt.Sprintf(`"%s"`, canonicalDigest)}, - "Cache-Control": []string{"max-age=31536000"}, - }) - - // Matching etag, gives 304 - etag := resp.Header.Get("Etag") - req, err = http.NewRequest("GET", layerURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - req.Header.Set("If-None-Match", etag) - - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - - checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) - - // Non-matching etag, gives 200 - req, err = http.NewRequest("GET", layerURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - req.Header.Set("If-None-Match", "") - resp, err = http.DefaultClient.Do(req) - checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK) - - // Missing tests: - // - Upload the same tar file under and different repository and - // ensure the content remains uncorrupted. - return env -} - -func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { - // Upload a layer - imageName := args.imageName - layerFile := args.layerFile - layerDigest := args.layerDigest - - ref, _ := reference.WithDigest(imageName, layerDigest) - layerURL, err := env.builder.BuildBlobURL(ref) - if err != nil { - t.Fatalf(err.Error()) - } - // --------------- - // Delete a layer - resp, err := httpDelete(layerURL) - if err != nil { - t.Fatalf("unexpected error deleting layer: %v", err) - } - - checkResponse(t, "deleting layer", resp, http.StatusAccepted) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{"0"}, - }) - - // --------------- - // Try and get it back - // Use a head request to see if the layer exists. - resp, err = http.Head(layerURL) - if err != nil { - t.Fatalf("unexpected error checking head on existing layer: %v", err) - } - - checkResponse(t, "checking existence of deleted layer", resp, http.StatusNotFound) - - // Delete already deleted layer - resp, err = httpDelete(layerURL) - if err != nil { - t.Fatalf("unexpected error deleting layer: %v", err) - } - - checkResponse(t, "deleting layer", resp, http.StatusNotFound) - - // ---------------- - // Attempt to delete a layer with an invalid digest - badURL := strings.Replace(layerURL, "sha256", "sha257", 1) - resp, err = httpDelete(badURL) - if err != nil { - t.Fatalf("unexpected error fetching layer: %v", err) - } - - checkResponse(t, "deleting layer bad digest", resp, http.StatusBadRequest) - - // ---------------- - // Reupload previously deleted blob - layerFile.Seek(0, os.SEEK_SET) - - uploadURLBase, _ := startPushLayer(t, env, imageName) - pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) - - layerFile.Seek(0, os.SEEK_SET) - canonicalDigester := digest.Canonical.New() - if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { - t.Fatalf("error copying to digest: %v", err) - } - canonicalDigest := canonicalDigester.Digest() - - // ------------------------ - // Use a head request to see if it exists - resp, err = http.Head(layerURL) - if err != nil { - t.Fatalf("unexpected error checking head on existing layer: %v", err) - } - - layerLength, _ := layerFile.Seek(0, os.SEEK_END) - checkResponse(t, "checking head on reuploaded layer", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{fmt.Sprint(layerLength)}, - "Docker-Content-Digest": []string{canonicalDigest.String()}, - }) -} - -func TestDeleteDisabled(t *testing.T) { - env := newTestEnv(t, false) - - imageName, _ := reference.ParseNamed("foo/bar") - // "build" our layer file - layerFile, layerDigest, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating random layer file: %v", err) - } - - ref, _ := reference.WithDigest(imageName, layerDigest) - layerURL, err := env.builder.BuildBlobURL(ref) - if err != nil { - t.Fatalf("Error building blob URL") - } - uploadURLBase, _ := startPushLayer(t, env, imageName) - pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) - - resp, err := httpDelete(layerURL) - if err != nil { - t.Fatalf("unexpected error deleting layer: %v", err) - } - - checkResponse(t, "deleting layer with delete disabled", resp, http.StatusMethodNotAllowed) -} - -func TestDeleteReadOnly(t *testing.T) { - env := newTestEnv(t, true) - - imageName, _ := reference.ParseNamed("foo/bar") - // "build" our layer file - layerFile, layerDigest, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating random layer file: %v", err) - } - - ref, _ := reference.WithDigest(imageName, layerDigest) - layerURL, err := env.builder.BuildBlobURL(ref) - if err != nil { - t.Fatalf("Error building blob URL") - } - uploadURLBase, _ := startPushLayer(t, env, imageName) - pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) - - env.app.readOnly = true - - resp, err := httpDelete(layerURL) - if err != nil { - t.Fatalf("unexpected error deleting layer: %v", err) - } - - checkResponse(t, "deleting layer in read-only mode", resp, http.StatusMethodNotAllowed) -} - -func TestStartPushReadOnly(t *testing.T) { - env := newTestEnv(t, true) - env.app.readOnly = true - - imageName, _ := reference.ParseNamed("foo/bar") - - layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) - if err != nil { - t.Fatalf("unexpected error building layer upload url: %v", err) - } - - resp, err := http.Post(layerUploadURL, "", nil) - if err != nil { - t.Fatalf("unexpected error starting layer push: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "starting push in read-only mode", resp, http.StatusMethodNotAllowed) -} - -func httpDelete(url string) (*http.Response, error) { - req, err := http.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - // defer resp.Body.Close() - return resp, err -} - -type manifestArgs struct { - imageName reference.Named - mediaType string - manifest distribution.Manifest - dgst digest.Digest -} - -func TestManifestAPI(t *testing.T) { - schema1Repo, _ := reference.ParseNamed("foo/schema1") - schema2Repo, _ := reference.ParseNamed("foo/schema2") - - deleteEnabled := false - env := newTestEnv(t, deleteEnabled) - testManifestAPISchema1(t, env, schema1Repo) - schema2Args := testManifestAPISchema2(t, env, schema2Repo) - testManifestAPIManifestList(t, env, schema2Args) - - deleteEnabled = true - env = newTestEnv(t, deleteEnabled) - testManifestAPISchema1(t, env, schema1Repo) - schema2Args = testManifestAPISchema2(t, env, schema2Repo) - testManifestAPIManifestList(t, env, schema2Args) -} - -func TestManifestDelete(t *testing.T) { - schema1Repo, _ := reference.ParseNamed("foo/schema1") - schema2Repo, _ := reference.ParseNamed("foo/schema2") - - deleteEnabled := true - env := newTestEnv(t, deleteEnabled) - schema1Args := testManifestAPISchema1(t, env, schema1Repo) - testManifestDelete(t, env, schema1Args) - schema2Args := testManifestAPISchema2(t, env, schema2Repo) - testManifestDelete(t, env, schema2Args) -} - -func TestManifestDeleteDisabled(t *testing.T) { - schema1Repo, _ := reference.ParseNamed("foo/schema1") - deleteEnabled := false - env := newTestEnv(t, deleteEnabled) - testManifestDeleteDisabled(t, env, schema1Repo) -} - -func testManifestDeleteDisabled(t *testing.T, env *testEnv, imageName reference.Named) { - ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar) - manifestURL, err := env.builder.BuildManifestURL(ref) - if err != nil { - t.Fatalf("unexpected error getting manifest url: %v", err) - } - - resp, err := httpDelete(manifestURL) - if err != nil { - t.Fatalf("unexpected error deleting manifest %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "status of disabled delete of manifest", resp, http.StatusMethodNotAllowed) -} - -func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { - tag := "thetag" - args := manifestArgs{imageName: imageName} - - tagRef, _ := reference.WithTag(imageName, tag) - manifestURL, err := env.builder.BuildManifestURL(tagRef) - if err != nil { - t.Fatalf("unexpected error getting manifest url: %v", err) - } - - // ----------------------------- - // Attempt to fetch the manifest - resp, err := http.Get(manifestURL) - if err != nil { - t.Fatalf("unexpected error getting manifest: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) - checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) - - tagsURL, err := env.builder.BuildTagsURL(imageName) - if err != nil { - t.Fatalf("unexpected error building tags url: %v", err) - } - - resp, err = http.Get(tagsURL) - if err != nil { - t.Fatalf("unexpected error getting unknown tags: %v", err) - } - defer resp.Body.Close() - - // Check that we get an unknown repository error when asking for tags - checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) - checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) - - // -------------------------------- - // Attempt to push unsigned manifest with missing layers - unsignedManifest := &schema1.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: imageName.Name(), - Tag: tag, - FSLayers: []schema1.FSLayer{ - { - BlobSum: "asdf", - }, - { - BlobSum: "qwer", - }, - }, - History: []schema1.History{ - { - V1Compatibility: "", - }, - { - V1Compatibility: "", - }, - }, - } - - resp = putManifest(t, "putting unsigned manifest", manifestURL, "", unsignedManifest) - defer resp.Body.Close() - checkResponse(t, "putting unsigned manifest", resp, http.StatusBadRequest) - _, p, counts := checkBodyHasErrorCodes(t, "putting unsigned manifest", resp, v2.ErrorCodeManifestInvalid) - - expectedCounts := map[errcode.ErrorCode]int{ - v2.ErrorCodeManifestInvalid: 1, - } - - if !reflect.DeepEqual(counts, expectedCounts) { - t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) - } - - // sign the manifest and still get some interesting errors. - sm, err := schema1.Sign(unsignedManifest, env.pk) - if err != nil { - t.Fatalf("error signing manifest: %v", err) - } - - resp = putManifest(t, "putting signed manifest with errors", manifestURL, "", sm) - defer resp.Body.Close() - checkResponse(t, "putting signed manifest with errors", resp, http.StatusBadRequest) - _, p, counts = checkBodyHasErrorCodes(t, "putting signed manifest with errors", resp, - v2.ErrorCodeManifestBlobUnknown, v2.ErrorCodeDigestInvalid) - - expectedCounts = map[errcode.ErrorCode]int{ - v2.ErrorCodeManifestBlobUnknown: 2, - v2.ErrorCodeDigestInvalid: 2, - } - - if !reflect.DeepEqual(counts, expectedCounts) { - t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) - } - - // TODO(stevvooe): Add a test case where we take a mostly valid registry, - // tamper with the content and ensure that we get an unverified manifest - // error. - - // Push 2 random layers - expectedLayers := make(map[digest.Digest]io.ReadSeeker) - - for i := range unsignedManifest.FSLayers { - rs, dgstStr, err := testutil.CreateRandomTarFile() - - if err != nil { - t.Fatalf("error creating random layer %d: %v", i, err) - } - dgst := digest.Digest(dgstStr) - - expectedLayers[dgst] = rs - unsignedManifest.FSLayers[i].BlobSum = dgst - - uploadURLBase, _ := startPushLayer(t, env, imageName) - pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) - } - - // ------------------- - // Push the signed manifest with all layers pushed. - signedManifest, err := schema1.Sign(unsignedManifest, env.pk) - if err != nil { - t.Fatalf("unexpected error signing manifest: %v", err) - } - - dgst := digest.FromBytes(signedManifest.Canonical) - args.manifest = signedManifest - args.dgst = dgst - - digestRef, _ := reference.WithDigest(imageName, dgst) - manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) - checkErr(t, err, "building manifest url") - - resp = putManifest(t, "putting signed manifest no error", manifestURL, "", signedManifest) - checkResponse(t, "putting signed manifest no error", resp, http.StatusCreated) - checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // -------------------- - // Push by digest -- should get same result - resp = putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusCreated) - checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // ------------------ - // Fetch by tag name - resp, err = http.Get(manifestURL) - if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, - }) - - var fetchedManifest schema1.SignedManifest - dec := json.NewDecoder(resp.Body) - - if err := dec.Decode(&fetchedManifest); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - if !bytes.Equal(fetchedManifest.Canonical, signedManifest.Canonical) { - t.Fatalf("manifests do not match") - } - - // --------------- - // Fetch by digest - resp, err = http.Get(manifestDigestURL) - checkErr(t, err, "fetching manifest by digest") - defer resp.Body.Close() - - checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, - }) - - var fetchedManifestByDigest schema1.SignedManifest - dec = json.NewDecoder(resp.Body) - if err := dec.Decode(&fetchedManifestByDigest); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - if !bytes.Equal(fetchedManifestByDigest.Canonical, signedManifest.Canonical) { - t.Fatalf("manifests do not match") - } - - // check signature was roundtripped - signatures, err := fetchedManifestByDigest.Signatures() - if err != nil { - t.Fatal(err) - } - - if len(signatures) != 1 { - t.Fatalf("expected 1 signature from manifest, got: %d", len(signatures)) - } - - // Re-sign, push and pull the same digest - sm2, err := schema1.Sign(&fetchedManifestByDigest.Manifest, env.pk) - if err != nil { - t.Fatal(err) - - } - - // Re-push with a few different Content-Types. The official schema1 - // content type should work, as should application/json with/without a - // charset. - resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, schema1.MediaTypeSignedManifest, sm2) - checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) - resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json; charset=utf-8", sm2) - checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) - resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json", sm2) - checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) - - resp, err = http.Get(manifestDigestURL) - checkErr(t, err, "re-fetching manifest by digest") - defer resp.Body.Close() - - checkResponse(t, "re-fetching uploaded manifest", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, - }) - - dec = json.NewDecoder(resp.Body) - if err := dec.Decode(&fetchedManifestByDigest); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - // check only 1 signature is returned - signatures, err = fetchedManifestByDigest.Signatures() - if err != nil { - t.Fatal(err) - } - - if len(signatures) != 1 { - t.Fatalf("expected 2 signature from manifest, got: %d", len(signatures)) - } - - // Get by name with etag, gives 304 - etag := resp.Header.Get("Etag") - req, err := http.NewRequest("GET", manifestURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - req.Header.Set("If-None-Match", etag) - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - - checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) - - // Get by digest with etag, gives 304 - req, err = http.NewRequest("GET", manifestDigestURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - req.Header.Set("If-None-Match", etag) - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - - checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) - - // Ensure that the tag is listed. - resp, err = http.Get(tagsURL) - if err != nil { - t.Fatalf("unexpected error getting unknown tags: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "getting tags", resp, http.StatusOK) - dec = json.NewDecoder(resp.Body) - - var tagsResponse tagsAPIResponse - - if err := dec.Decode(&tagsResponse); err != nil { - t.Fatalf("unexpected error decoding error response: %v", err) - } - - if tagsResponse.Name != imageName.Name() { - t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName.Name()) - } - - if len(tagsResponse.Tags) != 1 { - t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) - } - - if tagsResponse.Tags[0] != tag { - t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) - } - - // Attempt to put a manifest with mismatching FSLayer and History array cardinalities - - unsignedManifest.History = append(unsignedManifest.History, schema1.History{ - V1Compatibility: "", - }) - invalidSigned, err := schema1.Sign(unsignedManifest, env.pk) - if err != nil { - t.Fatalf("error signing manifest") - } - - resp = putManifest(t, "putting invalid signed manifest", manifestDigestURL, "", invalidSigned) - checkResponse(t, "putting invalid signed manifest", resp, http.StatusBadRequest) - - return args -} - -func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { - tag := "schema2tag" - args := manifestArgs{ - imageName: imageName, - mediaType: schema2.MediaTypeManifest, - } - - tagRef, _ := reference.WithTag(imageName, tag) - manifestURL, err := env.builder.BuildManifestURL(tagRef) - if err != nil { - t.Fatalf("unexpected error getting manifest url: %v", err) - } - - // ----------------------------- - // Attempt to fetch the manifest - resp, err := http.Get(manifestURL) - if err != nil { - t.Fatalf("unexpected error getting manifest: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) - checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) - - tagsURL, err := env.builder.BuildTagsURL(imageName) - if err != nil { - t.Fatalf("unexpected error building tags url: %v", err) - } - - resp, err = http.Get(tagsURL) - if err != nil { - t.Fatalf("unexpected error getting unknown tags: %v", err) - } - defer resp.Body.Close() - - // Check that we get an unknown repository error when asking for tags - checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) - checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) - - // -------------------------------- - // Attempt to push manifest with missing config and missing layers - manifest := &schema2.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 2, - MediaType: schema2.MediaTypeManifest, - }, - Config: distribution.Descriptor{ - Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", - Size: 3253, - MediaType: schema2.MediaTypeConfig, - }, - Layers: []distribution.Descriptor{ - { - Digest: "sha256:463434349086340864309863409683460843608348608934092322395278926a", - Size: 6323, - MediaType: schema2.MediaTypeLayer, - }, - { - Digest: "sha256:630923423623623423352523525237238023652897356239852383652aaaaaaa", - Size: 6863, - MediaType: schema2.MediaTypeLayer, - }, - }, - } - - resp = putManifest(t, "putting missing config manifest", manifestURL, schema2.MediaTypeManifest, manifest) - defer resp.Body.Close() - checkResponse(t, "putting missing config manifest", resp, http.StatusBadRequest) - _, p, counts := checkBodyHasErrorCodes(t, "putting missing config manifest", resp, v2.ErrorCodeManifestBlobUnknown) - - expectedCounts := map[errcode.ErrorCode]int{ - v2.ErrorCodeManifestBlobUnknown: 3, - } - - if !reflect.DeepEqual(counts, expectedCounts) { - t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) - } - - // Push a config, and reference it in the manifest - sampleConfig := []byte(`{ - "architecture": "amd64", - "history": [ - { - "created": "2015-10-31T22:22:54.690851953Z", - "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" - }, - { - "created": "2015-10-31T22:22:55.613815829Z", - "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" - } - ], - "rootfs": { - "diff_ids": [ - "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - ], - "type": "layers" - } - }`) - sampleConfigDigest := digest.FromBytes(sampleConfig) - - uploadURLBase, _ := startPushLayer(t, env, imageName) - pushLayer(t, env.builder, imageName, sampleConfigDigest, uploadURLBase, bytes.NewReader(sampleConfig)) - manifest.Config.Digest = sampleConfigDigest - manifest.Config.Size = int64(len(sampleConfig)) - - // The manifest should still be invalid, because its layer doesn't exist - resp = putManifest(t, "putting missing layer manifest", manifestURL, schema2.MediaTypeManifest, manifest) - defer resp.Body.Close() - checkResponse(t, "putting missing layer manifest", resp, http.StatusBadRequest) - _, p, counts = checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeManifestBlobUnknown) - - expectedCounts = map[errcode.ErrorCode]int{ - v2.ErrorCodeManifestBlobUnknown: 2, - } - - if !reflect.DeepEqual(counts, expectedCounts) { - t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) - } - - // Push 2 random layers - expectedLayers := make(map[digest.Digest]io.ReadSeeker) - - for i := range manifest.Layers { - rs, dgstStr, err := testutil.CreateRandomTarFile() - - if err != nil { - t.Fatalf("error creating random layer %d: %v", i, err) - } - dgst := digest.Digest(dgstStr) - - expectedLayers[dgst] = rs - manifest.Layers[i].Digest = dgst - - uploadURLBase, _ := startPushLayer(t, env, imageName) - pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) - } - - // ------------------- - // Push the manifest with all layers pushed. - deserializedManifest, err := schema2.FromStruct(*manifest) - if err != nil { - t.Fatalf("could not create DeserializedManifest: %v", err) - } - _, canonical, err := deserializedManifest.Payload() - if err != nil { - t.Fatalf("could not get manifest payload: %v", err) - } - dgst := digest.FromBytes(canonical) - args.dgst = dgst - args.manifest = deserializedManifest - - digestRef, _ := reference.WithDigest(imageName, dgst) - manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) - checkErr(t, err, "building manifest url") - - resp = putManifest(t, "putting manifest no error", manifestURL, schema2.MediaTypeManifest, manifest) - checkResponse(t, "putting manifest no error", resp, http.StatusCreated) - checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // -------------------- - // Push by digest -- should get same result - resp = putManifest(t, "putting manifest by digest", manifestDigestURL, schema2.MediaTypeManifest, manifest) - checkResponse(t, "putting manifest by digest", resp, http.StatusCreated) - checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // ------------------ - // Fetch by tag name - req, err := http.NewRequest("GET", manifestURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - req.Header.Set("Accept", schema2.MediaTypeManifest) - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, - }) - - var fetchedManifest schema2.DeserializedManifest - dec := json.NewDecoder(resp.Body) - - if err := dec.Decode(&fetchedManifest); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - _, fetchedCanonical, err := fetchedManifest.Payload() - if err != nil { - t.Fatalf("error getting manifest payload: %v", err) - } - - if !bytes.Equal(fetchedCanonical, canonical) { - t.Fatalf("manifests do not match") - } - - // --------------- - // Fetch by digest - req, err = http.NewRequest("GET", manifestDigestURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - req.Header.Set("Accept", schema2.MediaTypeManifest) - resp, err = http.DefaultClient.Do(req) - checkErr(t, err, "fetching manifest by digest") - defer resp.Body.Close() - - checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, - }) - - var fetchedManifestByDigest schema2.DeserializedManifest - dec = json.NewDecoder(resp.Body) - if err := dec.Decode(&fetchedManifestByDigest); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - _, fetchedCanonical, err = fetchedManifest.Payload() - if err != nil { - t.Fatalf("error getting manifest payload: %v", err) - } - - if !bytes.Equal(fetchedCanonical, canonical) { - t.Fatalf("manifests do not match") - } - - // Get by name with etag, gives 304 - etag := resp.Header.Get("Etag") - req, err = http.NewRequest("GET", manifestURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - req.Header.Set("If-None-Match", etag) - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - - checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) - - // Get by digest with etag, gives 304 - req, err = http.NewRequest("GET", manifestDigestURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - req.Header.Set("If-None-Match", etag) - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - - checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) - - // Ensure that the tag is listed. - resp, err = http.Get(tagsURL) - if err != nil { - t.Fatalf("unexpected error getting unknown tags: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK) - dec = json.NewDecoder(resp.Body) - - var tagsResponse tagsAPIResponse - - if err := dec.Decode(&tagsResponse); err != nil { - t.Fatalf("unexpected error decoding error response: %v", err) - } - - if tagsResponse.Name != imageName.Name() { - t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) - } - - if len(tagsResponse.Tags) != 1 { - t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) - } - - if tagsResponse.Tags[0] != tag { - t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) - } - - // ------------------ - // Fetch as a schema1 manifest - resp, err = http.Get(manifestURL) - if err != nil { - t.Fatalf("unexpected error fetching manifest as schema1: %v", err) - } - defer resp.Body.Close() - - manifestBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("error reading response body: %v", err) - } - - checkResponse(t, "fetching uploaded manifest as schema1", resp, http.StatusOK) - - m, desc, err := distribution.UnmarshalManifest(schema1.MediaTypeManifest, manifestBytes) - if err != nil { - t.Fatalf("unexpected error unmarshalling manifest: %v", err) - } - - fetchedSchema1Manifest, ok := m.(*schema1.SignedManifest) - if !ok { - t.Fatalf("expecting schema1 manifest") - } - - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{desc.Digest.String()}, - "ETag": []string{fmt.Sprintf(`"%s"`, desc.Digest)}, - }) - - if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { - t.Fatal("wrong schema version") - } - if fetchedSchema1Manifest.Architecture != "amd64" { - t.Fatal("wrong architecture") - } - if fetchedSchema1Manifest.Name != imageName.Name() { - t.Fatal("wrong image name") - } - if fetchedSchema1Manifest.Tag != tag { - t.Fatal("wrong tag") - } - if len(fetchedSchema1Manifest.FSLayers) != 2 { - t.Fatal("wrong number of FSLayers") - } - for i := range manifest.Layers { - if fetchedSchema1Manifest.FSLayers[i].BlobSum != manifest.Layers[len(manifest.Layers)-i-1].Digest { - t.Fatalf("blob digest mismatch in schema1 manifest for layer %d", i) - } - } - if len(fetchedSchema1Manifest.History) != 2 { - t.Fatal("wrong number of History entries") - } - - // Don't check V1Compatibility fields because we're using randomly-generated - // layers. - - return args -} - -func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) { - imageName := args.imageName - tag := "manifestlisttag" - - tagRef, _ := reference.WithTag(imageName, tag) - manifestURL, err := env.builder.BuildManifestURL(tagRef) - if err != nil { - t.Fatalf("unexpected error getting manifest url: %v", err) - } - - // -------------------------------- - // Attempt to push manifest list that refers to an unknown manifest - manifestList := &manifestlist.ManifestList{ - Versioned: manifest.Versioned{ - SchemaVersion: 2, - MediaType: manifestlist.MediaTypeManifestList, - }, - Manifests: []manifestlist.ManifestDescriptor{ - { - Descriptor: distribution.Descriptor{ - Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", - Size: 3253, - MediaType: schema2.MediaTypeManifest, - }, - Platform: manifestlist.PlatformSpec{ - Architecture: "amd64", - OS: "linux", - }, - }, - }, - } - - resp := putManifest(t, "putting missing manifest manifestlist", manifestURL, manifestlist.MediaTypeManifestList, manifestList) - defer resp.Body.Close() - checkResponse(t, "putting missing manifest manifestlist", resp, http.StatusBadRequest) - _, p, counts := checkBodyHasErrorCodes(t, "putting missing manifest manifestlist", resp, v2.ErrorCodeManifestBlobUnknown) - - expectedCounts := map[errcode.ErrorCode]int{ - v2.ErrorCodeManifestBlobUnknown: 1, - } - - if !reflect.DeepEqual(counts, expectedCounts) { - t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) - } - - // ------------------- - // Push a manifest list that references an actual manifest - manifestList.Manifests[0].Digest = args.dgst - deserializedManifestList, err := manifestlist.FromDescriptors(manifestList.Manifests) - if err != nil { - t.Fatalf("could not create DeserializedManifestList: %v", err) - } - _, canonical, err := deserializedManifestList.Payload() - if err != nil { - t.Fatalf("could not get manifest list payload: %v", err) - } - dgst := digest.FromBytes(canonical) - - digestRef, _ := reference.WithDigest(imageName, dgst) - manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) - checkErr(t, err, "building manifest url") - - resp = putManifest(t, "putting manifest list no error", manifestURL, manifestlist.MediaTypeManifestList, deserializedManifestList) - checkResponse(t, "putting manifest list no error", resp, http.StatusCreated) - checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // -------------------- - // Push by digest -- should get same result - resp = putManifest(t, "putting manifest list by digest", manifestDigestURL, manifestlist.MediaTypeManifestList, deserializedManifestList) - checkResponse(t, "putting manifest list by digest", resp, http.StatusCreated) - checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // ------------------ - // Fetch by tag name - req, err := http.NewRequest("GET", manifestURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - // multiple headers in mixed list format to ensure we parse correctly server-side - req.Header.Set("Accept", fmt.Sprintf(` %s ; q=0.8 , %s ; q=0.5 `, manifestlist.MediaTypeManifestList, schema1.MediaTypeSignedManifest)) - req.Header.Add("Accept", schema2.MediaTypeManifest) - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("unexpected error fetching manifest list: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "fetching uploaded manifest list", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, - }) - - var fetchedManifestList manifestlist.DeserializedManifestList - dec := json.NewDecoder(resp.Body) - - if err := dec.Decode(&fetchedManifestList); err != nil { - t.Fatalf("error decoding fetched manifest list: %v", err) - } - - _, fetchedCanonical, err := fetchedManifestList.Payload() - if err != nil { - t.Fatalf("error getting manifest list payload: %v", err) - } - - if !bytes.Equal(fetchedCanonical, canonical) { - t.Fatalf("manifest lists do not match") - } - - // --------------- - // Fetch by digest - req, err = http.NewRequest("GET", manifestDigestURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - req.Header.Set("Accept", manifestlist.MediaTypeManifestList) - resp, err = http.DefaultClient.Do(req) - checkErr(t, err, "fetching manifest list by digest") - defer resp.Body.Close() - - checkResponse(t, "fetching uploaded manifest list", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, - }) - - var fetchedManifestListByDigest manifestlist.DeserializedManifestList - dec = json.NewDecoder(resp.Body) - if err := dec.Decode(&fetchedManifestListByDigest); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - _, fetchedCanonical, err = fetchedManifestListByDigest.Payload() - if err != nil { - t.Fatalf("error getting manifest list payload: %v", err) - } - - if !bytes.Equal(fetchedCanonical, canonical) { - t.Fatalf("manifests do not match") - } - - // Get by name with etag, gives 304 - etag := resp.Header.Get("Etag") - req, err = http.NewRequest("GET", manifestURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - req.Header.Set("If-None-Match", etag) - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - - checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) - - // Get by digest with etag, gives 304 - req, err = http.NewRequest("GET", manifestDigestURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - req.Header.Set("If-None-Match", etag) - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - - checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) - - // ------------------ - // Fetch as a schema1 manifest - resp, err = http.Get(manifestURL) - if err != nil { - t.Fatalf("unexpected error fetching manifest list as schema1: %v", err) - } - defer resp.Body.Close() - - manifestBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("error reading response body: %v", err) - } - - checkResponse(t, "fetching uploaded manifest list as schema1", resp, http.StatusOK) - - m, desc, err := distribution.UnmarshalManifest(schema1.MediaTypeManifest, manifestBytes) - if err != nil { - t.Fatalf("unexpected error unmarshalling manifest: %v", err) - } - - fetchedSchema1Manifest, ok := m.(*schema1.SignedManifest) - if !ok { - t.Fatalf("expecting schema1 manifest") - } - - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{desc.Digest.String()}, - "ETag": []string{fmt.Sprintf(`"%s"`, desc.Digest)}, - }) - - if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { - t.Fatal("wrong schema version") - } - if fetchedSchema1Manifest.Architecture != "amd64" { - t.Fatal("wrong architecture") - } - if fetchedSchema1Manifest.Name != imageName.Name() { - t.Fatal("wrong image name") - } - if fetchedSchema1Manifest.Tag != tag { - t.Fatal("wrong tag") - } - if len(fetchedSchema1Manifest.FSLayers) != 2 { - t.Fatal("wrong number of FSLayers") - } - layers := args.manifest.(*schema2.DeserializedManifest).Layers - for i := range layers { - if fetchedSchema1Manifest.FSLayers[i].BlobSum != layers[len(layers)-i-1].Digest { - t.Fatalf("blob digest mismatch in schema1 manifest for layer %d", i) - } - } - if len(fetchedSchema1Manifest.History) != 2 { - t.Fatal("wrong number of History entries") - } - - // Don't check V1Compatibility fields because we're using randomly-generated - // layers. -} - -func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { - imageName := args.imageName - dgst := args.dgst - manifest := args.manifest - - ref, _ := reference.WithDigest(imageName, dgst) - manifestDigestURL, err := env.builder.BuildManifestURL(ref) - // --------------- - // Delete by digest - resp, err := httpDelete(manifestDigestURL) - checkErr(t, err, "deleting manifest by digest") - - checkResponse(t, "deleting manifest", resp, http.StatusAccepted) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{"0"}, - }) - - // --------------- - // Attempt to fetch deleted manifest - resp, err = http.Get(manifestDigestURL) - checkErr(t, err, "fetching deleted manifest by digest") - defer resp.Body.Close() - - checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) - - // --------------- - // Delete already deleted manifest by digest - resp, err = httpDelete(manifestDigestURL) - checkErr(t, err, "re-deleting manifest by digest") - - checkResponse(t, "re-deleting manifest", resp, http.StatusNotFound) - - // -------------------- - // Re-upload manifest by digest - resp = putManifest(t, "putting manifest", manifestDigestURL, args.mediaType, manifest) - checkResponse(t, "putting manifest", resp, http.StatusCreated) - checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // --------------- - // Attempt to fetch re-uploaded deleted digest - resp, err = http.Get(manifestDigestURL) - checkErr(t, err, "fetching re-uploaded manifest by digest") - defer resp.Body.Close() - - checkResponse(t, "fetching re-uploaded manifest", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // --------------- - // Attempt to delete an unknown manifest - unknownDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - unknownRef, _ := reference.WithDigest(imageName, unknownDigest) - unknownManifestDigestURL, err := env.builder.BuildManifestURL(unknownRef) - checkErr(t, err, "building unknown manifest url") - - resp, err = httpDelete(unknownManifestDigestURL) - checkErr(t, err, "delting unknown manifest by digest") - checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) - - // -------------------- - // Upload manifest by tag - tag := "atag" - tagRef, _ := reference.WithTag(imageName, tag) - manifestTagURL, err := env.builder.BuildManifestURL(tagRef) - resp = putManifest(t, "putting manifest by tag", manifestTagURL, args.mediaType, manifest) - checkResponse(t, "putting manifest by tag", resp, http.StatusCreated) - checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - - tagsURL, err := env.builder.BuildTagsURL(imageName) - if err != nil { - t.Fatalf("unexpected error building tags url: %v", err) - } - - // Ensure that the tag is listed. - resp, err = http.Get(tagsURL) - if err != nil { - t.Fatalf("unexpected error getting unknown tags: %v", err) - } - defer resp.Body.Close() - - dec := json.NewDecoder(resp.Body) - var tagsResponse tagsAPIResponse - if err := dec.Decode(&tagsResponse); err != nil { - t.Fatalf("unexpected error decoding error response: %v", err) - } - - if tagsResponse.Name != imageName.Name() { - t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) - } - - if len(tagsResponse.Tags) != 1 { - t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) - } - - if tagsResponse.Tags[0] != tag { - t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) - } - - // --------------- - // Delete by digest - resp, err = httpDelete(manifestDigestURL) - checkErr(t, err, "deleting manifest by digest") - - checkResponse(t, "deleting manifest with tag", resp, http.StatusAccepted) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{"0"}, - }) - - // Ensure that the tag is not listed. - resp, err = http.Get(tagsURL) - if err != nil { - t.Fatalf("unexpected error getting unknown tags: %v", err) - } - defer resp.Body.Close() - - dec = json.NewDecoder(resp.Body) - if err := dec.Decode(&tagsResponse); err != nil { - t.Fatalf("unexpected error decoding error response: %v", err) - } - - if tagsResponse.Name != imageName.Name() { - t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) - } - - if len(tagsResponse.Tags) != 0 { - t.Fatalf("expected 0 tags in response: %v", tagsResponse.Tags) - } - -} - -type testEnv struct { - pk libtrust.PrivateKey - ctx context.Context - config configuration.Configuration - app *App - server *httptest.Server - builder *v2.URLBuilder -} - -func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv { - config := configuration.Configuration{ - Storage: configuration.Storage{ - "testdriver": configuration.Parameters{}, - "delete": configuration.Parameters{"enabled": deleteEnabled}, - }, - Proxy: configuration.Proxy{ - RemoteURL: "http://example.com", - }, - } - - return newTestEnvWithConfig(t, &config) - -} - -func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { - config := configuration.Configuration{ - Storage: configuration.Storage{ - "testdriver": configuration.Parameters{}, - "delete": configuration.Parameters{"enabled": deleteEnabled}, - }, - } - - config.HTTP.Headers = headerConfig - - return newTestEnvWithConfig(t, &config) -} - -func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *testEnv { - ctx := context.Background() - - app := NewApp(ctx, config) - server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - builder, err := v2.NewURLBuilderFromString(server.URL+config.HTTP.Prefix, false) - - if err != nil { - t.Fatalf("error creating url builder: %v", err) - } - - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key: %v", err) - } - - return &testEnv{ - pk: pk, - ctx: ctx, - config: *config, - app: app, - server: server, - builder: builder, - } -} - -func putManifest(t *testing.T, msg, url, contentType string, v interface{}) *http.Response { - var body []byte - - switch m := v.(type) { - case *schema1.SignedManifest: - _, pl, err := m.Payload() - if err != nil { - t.Fatalf("error getting payload: %v", err) - } - body = pl - case *manifestlist.DeserializedManifestList: - _, pl, err := m.Payload() - if err != nil { - t.Fatalf("error getting payload: %v", err) - } - body = pl - default: - var err error - body, err = json.MarshalIndent(v, "", " ") - if err != nil { - t.Fatalf("unexpected error marshaling %v: %v", v, err) - } - } - - req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) - if err != nil { - t.Fatalf("error creating request for %s: %v", msg, err) - } - - if contentType != "" { - req.Header.Set("Content-Type", contentType) - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("error doing put request while %s: %v", msg, err) - } - - return resp -} - -func startPushLayer(t *testing.T, env *testEnv, name reference.Named) (location string, uuid string) { - layerUploadURL, err := env.builder.BuildBlobUploadURL(name) - if err != nil { - t.Fatalf("unexpected error building layer upload url: %v", err) - } - - u, err := url.Parse(layerUploadURL) - if err != nil { - t.Fatalf("error parsing layer upload URL: %v", err) - } - - base, err := url.Parse(env.server.URL) - if err != nil { - t.Fatalf("error parsing server URL: %v", err) - } - - layerUploadURL = base.ResolveReference(u).String() - resp, err := http.Post(layerUploadURL, "", nil) - if err != nil { - t.Fatalf("unexpected error starting layer push: %v", err) - } - - defer resp.Body.Close() - - checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name.String()), resp, http.StatusAccepted) - - u, err = url.Parse(resp.Header.Get("Location")) - if err != nil { - t.Fatalf("error parsing location header: %v", err) - } - - uuid = path.Base(u.Path) - checkHeaders(t, resp, http.Header{ - "Location": []string{"*"}, - "Content-Length": []string{"0"}, - "Docker-Upload-UUID": []string{uuid}, - }) - - return resp.Header.Get("Location"), uuid -} - -// doPushLayer pushes the layer content returning the url on success returning -// the response. If you're only expecting a successful response, use pushLayer. -func doPushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) (*http.Response, error) { - u, err := url.Parse(uploadURLBase) - if err != nil { - t.Fatalf("unexpected error parsing pushLayer url: %v", err) - } - - u.RawQuery = url.Values{ - "_state": u.Query()["_state"], - "digest": []string{dgst.String()}, - }.Encode() - - uploadURL := u.String() - - // Just do a monolithic upload - req, err := http.NewRequest("PUT", uploadURL, body) - if err != nil { - t.Fatalf("unexpected error creating new request: %v", err) - } - - return http.DefaultClient.Do(req) -} - -// pushLayer pushes the layer content returning the url on success. -func pushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) string { - digester := digest.Canonical.New() - - resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) - if err != nil { - t.Fatalf("unexpected error doing push layer request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) - - if err != nil { - t.Fatalf("error generating sha256 digest of body") - } - - sha256Dgst := digester.Digest() - - ref, _ := reference.WithDigest(name, sha256Dgst) - expectedLayerURL, err := ub.BuildBlobURL(ref) - if err != nil { - t.Fatalf("error building expected layer url: %v", err) - } - - checkHeaders(t, resp, http.Header{ - "Location": []string{expectedLayerURL}, - "Content-Length": []string{"0"}, - "Docker-Content-Digest": []string{sha256Dgst.String()}, - }) - - return resp.Header.Get("Location") -} - -func finishUpload(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadURLBase string, dgst digest.Digest) string { - resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, nil) - if err != nil { - t.Fatalf("unexpected error doing push layer request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) - - ref, _ := reference.WithDigest(name, dgst) - expectedLayerURL, err := ub.BuildBlobURL(ref) - if err != nil { - t.Fatalf("error building expected layer url: %v", err) - } - - checkHeaders(t, resp, http.Header{ - "Location": []string{expectedLayerURL}, - "Content-Length": []string{"0"}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - - return resp.Header.Get("Location") -} - -func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Response, digest.Digest, error) { - u, err := url.Parse(uploadURLBase) - if err != nil { - t.Fatalf("unexpected error parsing pushLayer url: %v", err) - } - - u.RawQuery = url.Values{ - "_state": u.Query()["_state"], - }.Encode() - - uploadURL := u.String() - - digester := digest.Canonical.New() - - req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash())) - if err != nil { - t.Fatalf("unexpected error creating new request: %v", err) - } - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := http.DefaultClient.Do(req) - - return resp, digester.Digest(), err -} - -func pushChunk(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadURLBase string, body io.Reader, length int64) (string, digest.Digest) { - resp, dgst, err := doPushChunk(t, uploadURLBase, body) - if err != nil { - t.Fatalf("unexpected error doing push layer request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "putting chunk", resp, http.StatusAccepted) - - if err != nil { - t.Fatalf("error generating sha256 digest of body") - } - - checkHeaders(t, resp, http.Header{ - "Range": []string{fmt.Sprintf("0-%d", length-1)}, - "Content-Length": []string{"0"}, - }) - - return resp.Header.Get("Location"), dgst -} - -func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) { - if resp.StatusCode != expectedStatus { - t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus) - maybeDumpResponse(t, resp) - - t.FailNow() - } - - // We expect the headers included in the configuration, unless the - // status code is 405 (Method Not Allowed), which means the handler - // doesn't even get called. - if resp.StatusCode != 405 && !reflect.DeepEqual(resp.Header["X-Content-Type-Options"], []string{"nosniff"}) { - t.Logf("missing or incorrect header X-Content-Type-Options %s", msg) - maybeDumpResponse(t, resp) - - t.FailNow() - } -} - -// checkBodyHasErrorCodes ensures the body is an error body and has the -// expected error codes, returning the error structure, the json slice and a -// count of the errors by code. -func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, errorCodes ...errcode.ErrorCode) (errcode.Errors, []byte, map[errcode.ErrorCode]int) { - p, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("unexpected error reading body %s: %v", msg, err) - } - - var errs errcode.Errors - if err := json.Unmarshal(p, &errs); err != nil { - t.Fatalf("unexpected error decoding error response: %v", err) - } - - if len(errs) == 0 { - t.Fatalf("expected errors in response") - } - - // TODO(stevvooe): Shoot. The error setup is not working out. The content- - // type headers are being set after writing the status code. - // if resp.Header.Get("Content-Type") != "application/json; charset=utf-8" { - // t.Fatalf("unexpected content type: %v != 'application/json'", - // resp.Header.Get("Content-Type")) - // } - - expected := map[errcode.ErrorCode]struct{}{} - counts := map[errcode.ErrorCode]int{} - - // Initialize map with zeros for expected - for _, code := range errorCodes { - expected[code] = struct{}{} - counts[code] = 0 - } - - for _, e := range errs { - err, ok := e.(errcode.ErrorCoder) - if !ok { - t.Fatalf("not an ErrorCoder: %#v", e) - } - if _, ok := expected[err.ErrorCode()]; !ok { - t.Fatalf("unexpected error code %v encountered during %s: %s ", err.ErrorCode(), msg, string(p)) - } - counts[err.ErrorCode()]++ - } - - // Ensure that counts of expected errors were all non-zero - for code := range expected { - if counts[code] == 0 { - t.Fatalf("expected error code %v not encounterd during %s: %s", code, msg, string(p)) - } - } - - return errs, p, counts -} - -func maybeDumpResponse(t *testing.T, resp *http.Response) { - if d, err := httputil.DumpResponse(resp, true); err != nil { - t.Logf("error dumping response: %v", err) - } else { - t.Logf("response:\n%s", string(d)) - } -} - -// matchHeaders checks that the response has at least the headers. If not, the -// test will fail. If a passed in header value is "*", any non-zero value will -// suffice as a match. -func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { - for k, vs := range headers { - if resp.Header.Get(k) == "" { - t.Fatalf("response missing header %q", k) - } - - for _, v := range vs { - if v == "*" { - // Just ensure there is some value. - if len(resp.Header[http.CanonicalHeaderKey(k)]) > 0 { - continue - } - } - - for _, hv := range resp.Header[http.CanonicalHeaderKey(k)] { - if hv != v { - t.Fatalf("%+v %v header value not matched in response: %q != %q", resp.Header, k, hv, v) - } - } - } - } -} - -func checkErr(t *testing.T, err error, msg string) { - if err != nil { - t.Fatalf("unexpected error %s: %v", msg, err) - } -} - -func createRepository(env *testEnv, t *testing.T, imageName string, tag string) digest.Digest { - imageNameRef, err := reference.ParseNamed(imageName) - if err != nil { - t.Fatalf("unable to parse reference: %v", err) - } - - unsignedManifest := &schema1.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: imageName, - Tag: tag, - FSLayers: []schema1.FSLayer{ - { - BlobSum: "asdf", - }, - }, - History: []schema1.History{ - { - V1Compatibility: "", - }, - }, - } - - // Push 2 random layers - expectedLayers := make(map[digest.Digest]io.ReadSeeker) - - for i := range unsignedManifest.FSLayers { - rs, dgstStr, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating random layer %d: %v", i, err) - } - dgst := digest.Digest(dgstStr) - - expectedLayers[dgst] = rs - unsignedManifest.FSLayers[i].BlobSum = dgst - uploadURLBase, _ := startPushLayer(t, env, imageNameRef) - pushLayer(t, env.builder, imageNameRef, dgst, uploadURLBase, rs) - } - - signedManifest, err := schema1.Sign(unsignedManifest, env.pk) - if err != nil { - t.Fatalf("unexpected error signing manifest: %v", err) - } - - dgst := digest.FromBytes(signedManifest.Canonical) - - // Create this repository by tag to ensure the tag mapping is made in the registry - tagRef, _ := reference.WithTag(imageNameRef, tag) - manifestDigestURL, err := env.builder.BuildManifestURL(tagRef) - checkErr(t, err, "building manifest url") - - digestRef, _ := reference.WithDigest(imageNameRef, dgst) - location, err := env.builder.BuildManifestURL(digestRef) - checkErr(t, err, "building location URL") - - resp := putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusCreated) - checkHeaders(t, resp, http.Header{ - "Location": []string{location}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - return dgst -} - -// Test mutation operations on a registry configured as a cache. Ensure that they return -// appropriate errors. -func TestRegistryAsCacheMutationAPIs(t *testing.T) { - deleteEnabled := true - env := newTestEnvMirror(t, deleteEnabled) - - imageName, _ := reference.ParseNamed("foo/bar") - tag := "latest" - tagRef, _ := reference.WithTag(imageName, tag) - manifestURL, err := env.builder.BuildManifestURL(tagRef) - if err != nil { - t.Fatalf("unexpected error building base url: %v", err) - } - - // Manifest upload - m := &schema1.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: imageName.Name(), - Tag: tag, - FSLayers: []schema1.FSLayer{}, - History: []schema1.History{}, - } - - sm, err := schema1.Sign(m, env.pk) - if err != nil { - t.Fatalf("error signing manifest: %v", err) - } - - resp := putManifest(t, "putting unsigned manifest", manifestURL, "", sm) - checkResponse(t, "putting signed manifest to cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) - - // Manifest Delete - resp, err = httpDelete(manifestURL) - checkResponse(t, "deleting signed manifest from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) - - // Blob upload initialization - layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) - if err != nil { - t.Fatalf("unexpected error building layer upload url: %v", err) - } - - resp, err = http.Post(layerUploadURL, "", nil) - if err != nil { - t.Fatalf("unexpected error starting layer push: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, fmt.Sprintf("starting layer push to cache %v", imageName), resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) - - // Blob Delete - ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar) - blobURL, err := env.builder.BuildBlobURL(ref) - resp, err = httpDelete(blobURL) - checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) - -} - -// TestCheckContextNotifier makes sure the API endpoints get a ResponseWriter -// that implements http.ContextNotifier. -func TestCheckContextNotifier(t *testing.T) { - env := newTestEnv(t, false) - - // Register a new endpoint for testing - env.app.router.Handle("/unittest/{name}/", env.app.dispatcher(func(ctx *Context, r *http.Request) http.Handler { - return handlers.MethodHandler{ - "GET": http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if _, ok := w.(http.CloseNotifier); !ok { - t.Fatal("could not cast ResponseWriter to CloseNotifier") - } - w.WriteHeader(200) - }), - } - })) - - resp, err := http.Get(env.server.URL + "/unittest/reponame/") - if err != nil { - t.Fatalf("unexpected error issuing request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Fatalf("wrong status code - expected 200, got %d", resp.StatusCode) - } -} - -func TestProxyManifestGetByTag(t *testing.T) { - truthConfig := configuration.Configuration{ - Storage: configuration.Storage{ - "testdriver": configuration.Parameters{}, - }, - } - truthConfig.HTTP.Headers = headerConfig - - imageName, _ := reference.ParseNamed("foo/bar") - tag := "latest" - - truthEnv := newTestEnvWithConfig(t, &truthConfig) - // create a repository in the truth registry - dgst := createRepository(truthEnv, t, imageName.Name(), tag) - - proxyConfig := configuration.Configuration{ - Storage: configuration.Storage{ - "testdriver": configuration.Parameters{}, - }, - Proxy: configuration.Proxy{ - RemoteURL: truthEnv.server.URL, - }, - } - proxyConfig.HTTP.Headers = headerConfig - - proxyEnv := newTestEnvWithConfig(t, &proxyConfig) - - digestRef, _ := reference.WithDigest(imageName, dgst) - manifestDigestURL, err := proxyEnv.builder.BuildManifestURL(digestRef) - checkErr(t, err, "building manifest url") - - resp, err := http.Get(manifestDigestURL) - checkErr(t, err, "fetching manifest from proxy by digest") - defer resp.Body.Close() - - tagRef, _ := reference.WithTag(imageName, tag) - manifestTagURL, err := proxyEnv.builder.BuildManifestURL(tagRef) - checkErr(t, err, "building manifest url") - - resp, err = http.Get(manifestTagURL) - checkErr(t, err, "fetching manifest from proxy by tag") - defer resp.Body.Close() - checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // Create another manifest in the remote with the same image/tag pair - newDigest := createRepository(truthEnv, t, imageName.Name(), tag) - if dgst == newDigest { - t.Fatalf("non-random test data") - } - - // fetch it with the same proxy URL as before. Ensure the updated content is at the same tag - resp, err = http.Get(manifestTagURL) - checkErr(t, err, "fetching manifest from proxy by tag") - defer resp.Body.Close() - checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{newDigest.String()}, - }) -} diff --git a/docs/handlers/app.go b/docs/handlers/app.go deleted file mode 100644 index 33f49670..00000000 --- a/docs/handlers/app.go +++ /dev/null @@ -1,996 +0,0 @@ -package handlers - -import ( - cryptorand "crypto/rand" - "expvar" - "fmt" - "math/rand" - "net" - "net/http" - "net/url" - "os" - "runtime" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/configuration" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/health" - "github.com/docker/distribution/health/checks" - "github.com/docker/distribution/notifications" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/auth" - registrymiddleware "github.com/docker/distribution/registry/middleware/registry" - repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" - "github.com/docker/distribution/registry/proxy" - "github.com/docker/distribution/registry/storage" - memorycache "github.com/docker/distribution/registry/storage/cache/memory" - rediscache "github.com/docker/distribution/registry/storage/cache/redis" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/factory" - storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" - "github.com/docker/distribution/version" - "github.com/docker/libtrust" - "github.com/garyburd/redigo/redis" - "github.com/gorilla/mux" - "golang.org/x/net/context" -) - -// randomSecretSize is the number of random bytes to generate if no secret -// was specified. -const randomSecretSize = 32 - -// defaultCheckInterval is the default time in between health checks -const defaultCheckInterval = 10 * time.Second - -// App is a global registry application object. Shared resources can be placed -// on this object that will be accessible from all requests. Any writable -// fields should be protected. -type App struct { - context.Context - - Config *configuration.Configuration - - router *mux.Router // main application router, configured with dispatchers - driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. - registry distribution.Namespace // registry is the primary registry backend for the app instance. - accessController auth.AccessController // main access controller for application - - // httpHost is a parsed representation of the http.host parameter from - // the configuration. Only the Scheme and Host fields are used. - httpHost url.URL - - // events contains notification related configuration. - events struct { - sink notifications.Sink - source notifications.SourceRecord - } - - redis *redis.Pool - - // trustKey is a deprecated key used to sign manifests converted to - // schema1 for backward compatibility. It should not be used for any - // other purposes. - trustKey libtrust.PrivateKey - - // isCache is true if this registry is configured as a pull through cache - isCache bool - - // readOnly is true if the registry is in a read-only maintenance mode - readOnly bool -} - -// NewApp takes a configuration and returns a configured app, ready to serve -// requests. The app only implements ServeHTTP and can be wrapped in other -// handlers accordingly. -func NewApp(ctx context.Context, config *configuration.Configuration) *App { - app := &App{ - Config: config, - Context: ctx, - router: v2.RouterWithPrefix(config.HTTP.Prefix), - isCache: config.Proxy.RemoteURL != "", - } - - // Register the handler dispatchers. - app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { - return http.HandlerFunc(apiBase) - }) - app.register(v2.RouteNameManifest, imageManifestDispatcher) - app.register(v2.RouteNameCatalog, catalogDispatcher) - app.register(v2.RouteNameTags, tagsDispatcher) - app.register(v2.RouteNameBlob, blobDispatcher) - app.register(v2.RouteNameBlobUpload, blobUploadDispatcher) - app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher) - - // override the storage driver's UA string for registry outbound HTTP requests - storageParams := config.Storage.Parameters() - if storageParams == nil { - storageParams = make(configuration.Parameters) - } - storageParams["useragent"] = fmt.Sprintf("docker-distribution/%s %s", version.Version, runtime.Version()) - - var err error - app.driver, err = factory.Create(config.Storage.Type(), storageParams) - if err != nil { - // TODO(stevvooe): Move the creation of a service into a protected - // method, where this is created lazily. Its status can be queried via - // a health check. - panic(err) - } - - purgeConfig := uploadPurgeDefaultConfig() - if mc, ok := config.Storage["maintenance"]; ok { - if v, ok := mc["uploadpurging"]; ok { - purgeConfig, ok = v.(map[interface{}]interface{}) - if !ok { - panic("uploadpurging config key must contain additional keys") - } - } - if v, ok := mc["readonly"]; ok { - readOnly, ok := v.(map[interface{}]interface{}) - if !ok { - panic("readonly config key must contain additional keys") - } - if readOnlyEnabled, ok := readOnly["enabled"]; ok { - app.readOnly, ok = readOnlyEnabled.(bool) - if !ok { - panic("readonly's enabled config key must have a boolean value") - } - } - } - } - - startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig) - - app.driver, err = applyStorageMiddleware(app.driver, config.Middleware["storage"]) - if err != nil { - panic(err) - } - - app.configureSecret(config) - app.configureEvents(config) - app.configureRedis(config) - app.configureLogHook(config) - - options := registrymiddleware.GetRegistryOptions() - if config.Compatibility.Schema1.TrustKey != "" { - app.trustKey, err = libtrust.LoadKeyFile(config.Compatibility.Schema1.TrustKey) - if err != nil { - panic(fmt.Sprintf(`could not load schema1 "signingkey" parameter: %v`, err)) - } - } else { - // Generate an ephemeral key to be used for signing converted manifests - // for clients that don't support schema2. - app.trustKey, err = libtrust.GenerateECP256PrivateKey() - if err != nil { - panic(err) - } - } - - options = append(options, storage.Schema1SigningKey(app.trustKey)) - - if config.HTTP.Host != "" { - u, err := url.Parse(config.HTTP.Host) - if err != nil { - panic(fmt.Sprintf(`could not parse http "host" parameter: %v`, err)) - } - app.httpHost = *u - } - - if app.isCache { - options = append(options, storage.DisableDigestResumption) - } - - // configure deletion - if d, ok := config.Storage["delete"]; ok { - e, ok := d["enabled"] - if ok { - if deleteEnabled, ok := e.(bool); ok && deleteEnabled { - options = append(options, storage.EnableDelete) - } - } - } - - // configure redirects - var redirectDisabled bool - if redirectConfig, ok := config.Storage["redirect"]; ok { - v := redirectConfig["disable"] - switch v := v.(type) { - case bool: - redirectDisabled = v - default: - panic(fmt.Sprintf("invalid type for redirect config: %#v", redirectConfig)) - } - } - if redirectDisabled { - ctxu.GetLogger(app).Infof("backend redirection disabled") - } else { - options = append(options, storage.EnableRedirect) - } - - // configure storage caches - if cc, ok := config.Storage["cache"]; ok { - v, ok := cc["blobdescriptor"] - if !ok { - // Backwards compatible: "layerinfo" == "blobdescriptor" - v = cc["layerinfo"] - } - - switch v { - case "redis": - if app.redis == nil { - panic("redis configuration required to use for layerinfo cache") - } - cacheProvider := rediscache.NewRedisBlobDescriptorCacheProvider(app.redis) - localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) - app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) - if err != nil { - panic("could not create registry: " + err.Error()) - } - ctxu.GetLogger(app).Infof("using redis blob descriptor cache") - case "inmemory": - cacheProvider := memorycache.NewInMemoryBlobDescriptorCacheProvider() - localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) - app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) - if err != nil { - panic("could not create registry: " + err.Error()) - } - ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") - default: - if v != "" { - ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", config.Storage["cache"]) - } - } - } - - if app.registry == nil { - // configure the registry if no cache section is available. - app.registry, err = storage.NewRegistry(app.Context, app.driver, options...) - if err != nil { - panic("could not create registry: " + err.Error()) - } - } - - app.registry, err = applyRegistryMiddleware(app, app.registry, config.Middleware["registry"]) - if err != nil { - panic(err) - } - - authType := config.Auth.Type() - - if authType != "" { - accessController, err := auth.GetAccessController(config.Auth.Type(), config.Auth.Parameters()) - if err != nil { - panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) - } - app.accessController = accessController - ctxu.GetLogger(app).Debugf("configured %q access controller", authType) - } - - // configure as a pull through cache - if config.Proxy.RemoteURL != "" { - app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, config.Proxy) - if err != nil { - panic(err.Error()) - } - app.isCache = true - ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", config.Proxy.RemoteURL) - } - - return app -} - -// RegisterHealthChecks is an awful hack to defer health check registration -// control to callers. This should only ever be called once per registry -// process, typically in a main function. The correct way would be register -// health checks outside of app, since multiple apps may exist in the same -// process. Because the configuration and app are tightly coupled, -// implementing this properly will require a refactor. This method may panic -// if called twice in the same process. -func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { - if len(healthRegistries) > 1 { - panic("RegisterHealthChecks called with more than one registry") - } - healthRegistry := health.DefaultRegistry - if len(healthRegistries) == 1 { - healthRegistry = healthRegistries[0] - } - - if app.Config.Health.StorageDriver.Enabled { - interval := app.Config.Health.StorageDriver.Interval - if interval == 0 { - interval = defaultCheckInterval - } - - storageDriverCheck := func() error { - _, err := app.driver.List(app, "/") // "/" should always exist - return err // any error will be treated as failure - } - - if app.Config.Health.StorageDriver.Threshold != 0 { - healthRegistry.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), interval, app.Config.Health.StorageDriver.Threshold, storageDriverCheck) - } else { - healthRegistry.RegisterPeriodicFunc("storagedriver_"+app.Config.Storage.Type(), interval, storageDriverCheck) - } - } - - for _, fileChecker := range app.Config.Health.FileCheckers { - interval := fileChecker.Interval - if interval == 0 { - interval = defaultCheckInterval - } - ctxu.GetLogger(app).Infof("configuring file health check path=%s, interval=%d", fileChecker.File, interval/time.Second) - healthRegistry.Register(fileChecker.File, health.PeriodicChecker(checks.FileChecker(fileChecker.File), interval)) - } - - for _, httpChecker := range app.Config.Health.HTTPCheckers { - interval := httpChecker.Interval - if interval == 0 { - interval = defaultCheckInterval - } - - statusCode := httpChecker.StatusCode - if statusCode == 0 { - statusCode = 200 - } - - checker := checks.HTTPChecker(httpChecker.URI, statusCode, httpChecker.Timeout, httpChecker.Headers) - - if httpChecker.Threshold != 0 { - ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d, threshold=%d", httpChecker.URI, interval/time.Second, httpChecker.Threshold) - healthRegistry.Register(httpChecker.URI, health.PeriodicThresholdChecker(checker, interval, httpChecker.Threshold)) - } else { - ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d", httpChecker.URI, interval/time.Second) - healthRegistry.Register(httpChecker.URI, health.PeriodicChecker(checker, interval)) - } - } - - for _, tcpChecker := range app.Config.Health.TCPCheckers { - interval := tcpChecker.Interval - if interval == 0 { - interval = defaultCheckInterval - } - - checker := checks.TCPChecker(tcpChecker.Addr, tcpChecker.Timeout) - - if tcpChecker.Threshold != 0 { - ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d, threshold=%d", tcpChecker.Addr, interval/time.Second, tcpChecker.Threshold) - healthRegistry.Register(tcpChecker.Addr, health.PeriodicThresholdChecker(checker, interval, tcpChecker.Threshold)) - } else { - ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d", tcpChecker.Addr, interval/time.Second) - healthRegistry.Register(tcpChecker.Addr, health.PeriodicChecker(checker, interval)) - } - } -} - -// register a handler with the application, by route name. The handler will be -// passed through the application filters and context will be constructed at -// request time. -func (app *App) register(routeName string, dispatch dispatchFunc) { - - // TODO(stevvooe): This odd dispatcher/route registration is by-product of - // some limitations in the gorilla/mux router. We are using it to keep - // routing consistent between the client and server, but we may want to - // replace it with manual routing and structure-based dispatch for better - // control over the request execution. - - app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch)) -} - -// configureEvents prepares the event sink for action. -func (app *App) configureEvents(configuration *configuration.Configuration) { - // Configure all of the endpoint sinks. - var sinks []notifications.Sink - for _, endpoint := range configuration.Notifications.Endpoints { - if endpoint.Disabled { - ctxu.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name) - continue - } - - ctxu.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) - endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{ - Timeout: endpoint.Timeout, - Threshold: endpoint.Threshold, - Backoff: endpoint.Backoff, - Headers: endpoint.Headers, - }) - - sinks = append(sinks, endpoint) - } - - // NOTE(stevvooe): Moving to a new queuing implementation is as easy as - // replacing broadcaster with a rabbitmq implementation. It's recommended - // that the registry instances also act as the workers to keep deployment - // simple. - app.events.sink = notifications.NewBroadcaster(sinks...) - - // Populate registry event source - hostname, err := os.Hostname() - if err != nil { - hostname = configuration.HTTP.Addr - } else { - // try to pick the port off the config - _, port, err := net.SplitHostPort(configuration.HTTP.Addr) - if err == nil { - hostname = net.JoinHostPort(hostname, port) - } - } - - app.events.source = notifications.SourceRecord{ - Addr: hostname, - InstanceID: ctxu.GetStringValue(app, "instance.id"), - } -} - -func (app *App) configureRedis(configuration *configuration.Configuration) { - if configuration.Redis.Addr == "" { - ctxu.GetLogger(app).Infof("redis not configured") - return - } - - pool := &redis.Pool{ - Dial: func() (redis.Conn, error) { - // TODO(stevvooe): Yet another use case for contextual timing. - ctx := context.WithValue(app, "redis.connect.startedat", time.Now()) - - done := func(err error) { - logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration", - ctxu.Since(ctx, "redis.connect.startedat")) - if err != nil { - logger.Errorf("redis: error connecting: %v", err) - } else { - logger.Infof("redis: connect %v", configuration.Redis.Addr) - } - } - - conn, err := redis.DialTimeout("tcp", - configuration.Redis.Addr, - configuration.Redis.DialTimeout, - configuration.Redis.ReadTimeout, - configuration.Redis.WriteTimeout) - if err != nil { - ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v", - configuration.Redis.Addr, err) - done(err) - return nil, err - } - - // authorize the connection - if configuration.Redis.Password != "" { - if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil { - defer conn.Close() - done(err) - return nil, err - } - } - - // select the database to use - if configuration.Redis.DB != 0 { - if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil { - defer conn.Close() - done(err) - return nil, err - } - } - - done(nil) - return conn, nil - }, - MaxIdle: configuration.Redis.Pool.MaxIdle, - MaxActive: configuration.Redis.Pool.MaxActive, - IdleTimeout: configuration.Redis.Pool.IdleTimeout, - TestOnBorrow: func(c redis.Conn, t time.Time) error { - // TODO(stevvooe): We can probably do something more interesting - // here with the health package. - _, err := c.Do("PING") - return err - }, - Wait: false, // if a connection is not avialable, proceed without cache. - } - - app.redis = pool - - // setup expvar - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { - return map[string]interface{}{ - "Config": configuration.Redis, - "Active": app.redis.ActiveCount(), - } - })) -} - -// configureLogHook prepares logging hook parameters. -func (app *App) configureLogHook(configuration *configuration.Configuration) { - entry, ok := ctxu.GetLogger(app).(*log.Entry) - if !ok { - // somehow, we are not using logrus - return - } - - logger := entry.Logger - - for _, configHook := range configuration.Log.Hooks { - if !configHook.Disabled { - switch configHook.Type { - case "mail": - hook := &logHook{} - hook.LevelsParam = configHook.Levels - hook.Mail = &mailer{ - Addr: configHook.MailOptions.SMTP.Addr, - Username: configHook.MailOptions.SMTP.Username, - Password: configHook.MailOptions.SMTP.Password, - Insecure: configHook.MailOptions.SMTP.Insecure, - From: configHook.MailOptions.From, - To: configHook.MailOptions.To, - } - logger.Hooks.Add(hook) - default: - } - } - } -} - -// configureSecret creates a random secret if a secret wasn't included in the -// configuration. -func (app *App) configureSecret(configuration *configuration.Configuration) { - if configuration.HTTP.Secret == "" { - var secretBytes [randomSecretSize]byte - if _, err := cryptorand.Read(secretBytes[:]); err != nil { - panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err)) - } - configuration.HTTP.Secret = string(secretBytes[:]) - ctxu.GetLogger(app).Warn("No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable.") - } -} - -func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() // ensure that request body is always closed. - - // Instantiate an http context here so we can track the error codes - // returned by the request router. - ctx := defaultContextManager.context(app, w, r) - - defer func() { - status, ok := ctx.Value("http.response.status").(int) - if ok && status >= 200 && status <= 399 { - ctxu.GetResponseLogger(ctx).Infof("response completed") - } - }() - defer defaultContextManager.release(ctx) - - // NOTE(stevvooe): Total hack to get instrumented responsewriter from context. - var err error - w, err = ctxu.GetResponseWriter(ctx) - if err != nil { - ctxu.GetLogger(ctx).Warnf("response writer not found in context") - } - - // Set a header with the Docker Distribution API Version for all responses. - w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") - app.router.ServeHTTP(w, r) -} - -// dispatchFunc takes a context and request and returns a constructed handler -// for the route. The dispatcher will use this to dynamically create request -// specific handlers for each endpoint without creating a new router for each -// request. -type dispatchFunc func(ctx *Context, r *http.Request) http.Handler - -// TODO(stevvooe): dispatchers should probably have some validation error -// chain with proper error reporting. - -// dispatcher returns a handler that constructs a request specific context and -// handler, using the dispatch factory function. -func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - for headerName, headerValues := range app.Config.HTTP.Headers { - for _, value := range headerValues { - w.Header().Add(headerName, value) - } - } - - context := app.context(w, r) - - if err := app.authorized(w, r, context); err != nil { - ctxu.GetLogger(context).Warnf("error authorizing context: %v", err) - return - } - - // Add username to request logging - context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, auth.UserNameKey)) - - if app.nameRequired(r) { - nameRef, err := reference.ParseNamed(getName(context)) - if err != nil { - ctxu.GetLogger(context).Errorf("error parsing reference from context: %v", err) - context.Errors = append(context.Errors, distribution.ErrRepositoryNameInvalid{ - Name: getName(context), - Reason: err, - }) - if err := errcode.ServeJSON(w, context.Errors); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - return - } - repository, err := app.registry.Repository(context, nameRef) - - if err != nil { - ctxu.GetLogger(context).Errorf("error resolving repository: %v", err) - - switch err := err.(type) { - case distribution.ErrRepositoryUnknown: - context.Errors = append(context.Errors, v2.ErrorCodeNameUnknown.WithDetail(err)) - case distribution.ErrRepositoryNameInvalid: - context.Errors = append(context.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) - case errcode.Error: - context.Errors = append(context.Errors, err) - } - - if err := errcode.ServeJSON(w, context.Errors); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - return - } - - // assign and decorate the authorized repository with an event bridge. - context.Repository = notifications.Listen( - repository, - app.eventBridge(context, r)) - - context.Repository, err = applyRepoMiddleware(app, context.Repository, app.Config.Middleware["repository"]) - if err != nil { - ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) - context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - - if err := errcode.ServeJSON(w, context.Errors); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - return - } - } - - dispatch(context, r).ServeHTTP(w, r) - // Automated error response handling here. Handlers may return their - // own errors if they need different behavior (such as range errors - // for layer upload). - if context.Errors.Len() > 0 { - if err := errcode.ServeJSON(w, context.Errors); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - - app.logError(context, context.Errors) - } - }) -} - -func (app *App) logError(context context.Context, errors errcode.Errors) { - for _, e1 := range errors { - var c ctxu.Context - - switch e1.(type) { - case errcode.Error: - e, _ := e1.(errcode.Error) - c = ctxu.WithValue(context, "err.code", e.Code) - c = ctxu.WithValue(c, "err.message", e.Code.Message()) - c = ctxu.WithValue(c, "err.detail", e.Detail) - case errcode.ErrorCode: - e, _ := e1.(errcode.ErrorCode) - c = ctxu.WithValue(context, "err.code", e) - c = ctxu.WithValue(c, "err.message", e.Message()) - default: - // just normal go 'error' - c = ctxu.WithValue(context, "err.code", errcode.ErrorCodeUnknown) - c = ctxu.WithValue(c, "err.message", e1.Error()) - } - - c = ctxu.WithLogger(c, ctxu.GetLogger(c, - "err.code", - "err.message", - "err.detail")) - ctxu.GetResponseLogger(c).Errorf("response completed with error") - } -} - -// context constructs the context object for the application. This only be -// called once per request. -func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { - ctx := defaultContextManager.context(app, w, r) - ctx = ctxu.WithVars(ctx, r) - ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, - "vars.name", - "vars.reference", - "vars.digest", - "vars.uuid")) - - context := &Context{ - App: app, - Context: ctx, - } - - if app.httpHost.Scheme != "" && app.httpHost.Host != "" { - // A "host" item in the configuration takes precedence over - // X-Forwarded-Proto and X-Forwarded-Host headers, and the - // hostname in the request. - context.urlBuilder = v2.NewURLBuilder(&app.httpHost, false) - } else { - context.urlBuilder = v2.NewURLBuilderFromRequest(r, app.Config.HTTP.RelativeURLs) - } - - return context -} - -// authorized checks if the request can proceed with access to the requested -// repository. If it succeeds, the context may access the requested -// repository. An error will be returned if access is not available. -func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { - ctxu.GetLogger(context).Debug("authorizing request") - repo := getName(context) - - if app.accessController == nil { - return nil // access controller is not enabled. - } - - var accessRecords []auth.Access - - if repo != "" { - accessRecords = appendAccessRecords(accessRecords, r.Method, repo) - if fromRepo := r.FormValue("from"); fromRepo != "" { - // mounting a blob from one repository to another requires pull (GET) - // access to the source repository. - accessRecords = appendAccessRecords(accessRecords, "GET", fromRepo) - } - } else { - // Only allow the name not to be set on the base route. - if app.nameRequired(r) { - // For this to be properly secured, repo must always be set for a - // resource that may make a modification. The only condition under - // which name is not set and we still allow access is when the - // base route is accessed. This section prevents us from making - // that mistake elsewhere in the code, allowing any operation to - // proceed. - if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - return fmt.Errorf("forbidden: no repository name") - } - accessRecords = appendCatalogAccessRecord(accessRecords, r) - } - - ctx, err := app.accessController.Authorized(context.Context, accessRecords...) - if err != nil { - switch err := err.(type) { - case auth.Challenge: - // Add the appropriate WWW-Auth header - err.SetHeaders(w) - - if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - default: - // This condition is a potential security problem either in - // the configuration or whatever is backing the access - // controller. Just return a bad request with no information - // to avoid exposure. The request should not proceed. - ctxu.GetLogger(context).Errorf("error checking authorization: %v", err) - w.WriteHeader(http.StatusBadRequest) - } - - return err - } - - // TODO(stevvooe): This pattern needs to be cleaned up a bit. One context - // should be replaced by another, rather than replacing the context on a - // mutable object. - context.Context = ctx - return nil -} - -// eventBridge returns a bridge for the current request, configured with the -// correct actor and source. -func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener { - actor := notifications.ActorRecord{ - Name: getUserName(ctx, r), - } - request := notifications.NewRequestRecord(ctxu.GetRequestID(ctx), r) - - return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) -} - -// nameRequired returns true if the route requires a name. -func (app *App) nameRequired(r *http.Request) bool { - route := mux.CurrentRoute(r) - routeName := route.GetName() - return route == nil || (routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog) -} - -// apiBase implements a simple yes-man for doing overall checks against the -// api. This can support auth roundtrips to support docker login. -func apiBase(w http.ResponseWriter, r *http.Request) { - const emptyJSON = "{}" - // Provide a simple /v2/ 200 OK response with empty json response. - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) - - fmt.Fprint(w, emptyJSON) -} - -// appendAccessRecords checks the method and adds the appropriate Access records to the records list. -func appendAccessRecords(records []auth.Access, method string, repo string) []auth.Access { - resource := auth.Resource{ - Type: "repository", - Name: repo, - } - - switch method { - case "GET", "HEAD": - records = append(records, - auth.Access{ - Resource: resource, - Action: "pull", - }) - case "POST", "PUT", "PATCH": - records = append(records, - auth.Access{ - Resource: resource, - Action: "pull", - }, - auth.Access{ - Resource: resource, - Action: "push", - }) - case "DELETE": - // DELETE access requires full admin rights, which is represented - // as "*". This may not be ideal. - records = append(records, - auth.Access{ - Resource: resource, - Action: "*", - }) - } - return records -} - -// Add the access record for the catalog if it's our current route -func appendCatalogAccessRecord(accessRecords []auth.Access, r *http.Request) []auth.Access { - route := mux.CurrentRoute(r) - routeName := route.GetName() - - if routeName == v2.RouteNameCatalog { - resource := auth.Resource{ - Type: "registry", - Name: "catalog", - } - - accessRecords = append(accessRecords, - auth.Access{ - Resource: resource, - Action: "*", - }) - } - return accessRecords -} - -// applyRegistryMiddleware wraps a registry instance with the configured middlewares -func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { - for _, mw := range middlewares { - rmw, err := registrymiddleware.Get(ctx, mw.Name, mw.Options, registry) - if err != nil { - return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err) - } - registry = rmw - } - return registry, nil - -} - -// applyRepoMiddleware wraps a repository with the configured middlewares -func applyRepoMiddleware(ctx context.Context, repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { - for _, mw := range middlewares { - rmw, err := repositorymiddleware.Get(ctx, mw.Name, mw.Options, repository) - if err != nil { - return nil, err - } - repository = rmw - } - return repository, nil -} - -// applyStorageMiddleware wraps a storage driver with the configured middlewares -func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (storagedriver.StorageDriver, error) { - for _, mw := range middlewares { - smw, err := storagemiddleware.Get(mw.Name, mw.Options, driver) - if err != nil { - return nil, fmt.Errorf("unable to configure storage middleware (%s): %v", mw.Name, err) - } - driver = smw - } - return driver, nil -} - -// uploadPurgeDefaultConfig provides a default configuration for upload -// purging to be used in the absence of configuration in the -// confifuration file -func uploadPurgeDefaultConfig() map[interface{}]interface{} { - config := map[interface{}]interface{}{} - config["enabled"] = true - config["age"] = "168h" - config["interval"] = "24h" - config["dryrun"] = false - return config -} - -func badPurgeUploadConfig(reason string) { - panic(fmt.Sprintf("Unable to parse upload purge configuration: %s", reason)) -} - -// startUploadPurger schedules a goroutine which will periodically -// check upload directories for old files and delete them -func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) { - if config["enabled"] == false { - return - } - - var purgeAgeDuration time.Duration - var err error - purgeAge, ok := config["age"] - if ok { - ageStr, ok := purgeAge.(string) - if !ok { - badPurgeUploadConfig("age is not a string") - } - purgeAgeDuration, err = time.ParseDuration(ageStr) - if err != nil { - badPurgeUploadConfig(fmt.Sprintf("Cannot parse duration: %s", err.Error())) - } - } else { - badPurgeUploadConfig("age missing") - } - - var intervalDuration time.Duration - interval, ok := config["interval"] - if ok { - intervalStr, ok := interval.(string) - if !ok { - badPurgeUploadConfig("interval is not a string") - } - - intervalDuration, err = time.ParseDuration(intervalStr) - if err != nil { - badPurgeUploadConfig(fmt.Sprintf("Cannot parse interval: %s", err.Error())) - } - } else { - badPurgeUploadConfig("interval missing") - } - - var dryRunBool bool - dryRun, ok := config["dryrun"] - if ok { - dryRunBool, ok = dryRun.(bool) - if !ok { - badPurgeUploadConfig("cannot parse dryrun") - } - } else { - badPurgeUploadConfig("dryrun missing") - } - - go func() { - rand.Seed(time.Now().Unix()) - jitter := time.Duration(rand.Int()%60) * time.Minute - log.Infof("Starting upload purge in %s", jitter) - time.Sleep(jitter) - - for { - storage.PurgeUploads(ctx, storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool) - log.Infof("Starting upload purge in %s", intervalDuration) - time.Sleep(intervalDuration) - } - }() -} diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go deleted file mode 100644 index 3a8e4e1e..00000000 --- a/docs/handlers/app_test.go +++ /dev/null @@ -1,274 +0,0 @@ -package handlers - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "testing" - - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/auth" - _ "github.com/docker/distribution/registry/auth/silly" - "github.com/docker/distribution/registry/storage" - memorycache "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/docker/distribution/registry/storage/driver/testdriver" -) - -// TestAppDispatcher builds an application with a test dispatcher and ensures -// that requests are properly dispatched and the handlers are constructed. -// This only tests the dispatch mechanism. The underlying dispatchers must be -// tested individually. -func TestAppDispatcher(t *testing.T) { - driver := testdriver.New() - ctx := context.Background() - registry, err := storage.NewRegistry(ctx, driver, storage.BlobDescriptorCacheProvider(memorycache.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - app := &App{ - Config: &configuration.Configuration{}, - Context: ctx, - router: v2.Router(), - driver: driver, - registry: registry, - } - server := httptest.NewServer(app) - router := v2.Router() - - serverURL, err := url.Parse(server.URL) - if err != nil { - t.Fatalf("error parsing server url: %v", err) - } - - varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { - return func(ctx *Context, r *http.Request) http.Handler { - // Always checks the same name context - if ctx.Repository.Named().Name() != getName(ctx) { - t.Fatalf("unexpected name: %q != %q", ctx.Repository.Named().Name(), "foo/bar") - } - - // Check that we have all that is expected - for expectedK, expectedV := range expectedVars { - if ctx.Value(expectedK) != expectedV { - t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.Value(expectedK), expectedV) - } - } - - // Check that we only have variables that are expected - for k, v := range ctx.Value("vars").(map[string]string) { - _, ok := expectedVars[k] - - if !ok { // name is checked on context - // We have an unexpected key, fail - t.Fatalf("unexpected key %q in vars with value %q", k, v) - } - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - }) - } - } - - // unflatten a list of variables, suitable for gorilla/mux, to a map[string]string - unflatten := func(vars []string) map[string]string { - m := make(map[string]string) - for i := 0; i < len(vars)-1; i = i + 2 { - m[vars[i]] = vars[i+1] - } - - return m - } - - for _, testcase := range []struct { - endpoint string - vars []string - }{ - { - endpoint: v2.RouteNameManifest, - vars: []string{ - "name", "foo/bar", - "reference", "sometag", - }, - }, - { - endpoint: v2.RouteNameTags, - vars: []string{ - "name", "foo/bar", - }, - }, - { - endpoint: v2.RouteNameBlobUpload, - vars: []string{ - "name", "foo/bar", - }, - }, - { - endpoint: v2.RouteNameBlobUploadChunk, - vars: []string{ - "name", "foo/bar", - "uuid", "theuuid", - }, - }, - } { - app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars))) - route := router.GetRoute(testcase.endpoint).Host(serverURL.Host) - u, err := route.URL(testcase.vars...) - - if err != nil { - t.Fatal(err) - } - - resp, err := http.Get(u.String()) - - if err != nil { - t.Fatal(err) - } - - if resp.StatusCode != http.StatusOK { - t.Fatalf("unexpected status code: %v != %v", resp.StatusCode, http.StatusOK) - } - } -} - -// TestNewApp covers the creation of an application via NewApp with a -// configuration. -func TestNewApp(t *testing.T) { - ctx := context.Background() - config := configuration.Configuration{ - Storage: configuration.Storage{ - "testdriver": nil, - }, - Auth: configuration.Auth{ - // For now, we simply test that new auth results in a viable - // application. - "silly": { - "realm": "realm-test", - "service": "service-test", - }, - }, - } - - // Mostly, with this test, given a sane configuration, we are simply - // ensuring that NewApp doesn't panic. We might want to tweak this - // behavior. - app := NewApp(ctx, &config) - - server := httptest.NewServer(app) - builder, err := v2.NewURLBuilderFromString(server.URL, false) - if err != nil { - t.Fatalf("error creating urlbuilder: %v", err) - } - - baseURL, err := builder.BuildBaseURL() - if err != nil { - t.Fatalf("error creating baseURL: %v", err) - } - - // TODO(stevvooe): The rest of this test might belong in the API tests. - - // Just hit the app and make sure we get a 401 Unauthorized error. - req, err := http.Get(baseURL) - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer req.Body.Close() - - if req.StatusCode != http.StatusUnauthorized { - t.Fatalf("unexpected status code during request: %v", err) - } - - if req.Header.Get("Content-Type") != "application/json; charset=utf-8" { - t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8") - } - - expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\"" - if e, a := expectedAuthHeader, req.Header.Get("WWW-Authenticate"); e != a { - t.Fatalf("unexpected WWW-Authenticate header: %q != %q", e, a) - } - - var errs errcode.Errors - dec := json.NewDecoder(req.Body) - if err := dec.Decode(&errs); err != nil { - t.Fatalf("error decoding error response: %v", err) - } - - err2, ok := errs[0].(errcode.ErrorCoder) - if !ok { - t.Fatalf("not an ErrorCoder: %#v", errs[0]) - } - if err2.ErrorCode() != errcode.ErrorCodeUnauthorized { - t.Fatalf("unexpected error code: %v != %v", err2.ErrorCode(), errcode.ErrorCodeUnauthorized) - } -} - -// Test the access record accumulator -func TestAppendAccessRecords(t *testing.T) { - repo := "testRepo" - - expectedResource := auth.Resource{ - Type: "repository", - Name: repo, - } - - expectedPullRecord := auth.Access{ - Resource: expectedResource, - Action: "pull", - } - expectedPushRecord := auth.Access{ - Resource: expectedResource, - Action: "push", - } - expectedAllRecord := auth.Access{ - Resource: expectedResource, - Action: "*", - } - - records := []auth.Access{} - result := appendAccessRecords(records, "GET", repo) - expectedResult := []auth.Access{expectedPullRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - - records = []auth.Access{} - result = appendAccessRecords(records, "HEAD", repo) - expectedResult = []auth.Access{expectedPullRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - - records = []auth.Access{} - result = appendAccessRecords(records, "POST", repo) - expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - - records = []auth.Access{} - result = appendAccessRecords(records, "PUT", repo) - expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - - records = []auth.Access{} - result = appendAccessRecords(records, "PATCH", repo) - expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - - records = []auth.Access{} - result = appendAccessRecords(records, "DELETE", repo) - expectedResult = []auth.Access{expectedAllRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - -} diff --git a/docs/handlers/basicauth.go b/docs/handlers/basicauth.go deleted file mode 100644 index 8727a3cd..00000000 --- a/docs/handlers/basicauth.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.4 - -package handlers - -import ( - "net/http" -) - -func basicAuth(r *http.Request) (username, password string, ok bool) { - return r.BasicAuth() -} diff --git a/docs/handlers/basicauth_prego14.go b/docs/handlers/basicauth_prego14.go deleted file mode 100644 index 6cf10a25..00000000 --- a/docs/handlers/basicauth_prego14.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build !go1.4 - -package handlers - -import ( - "encoding/base64" - "net/http" - "strings" -) - -// NOTE(stevvooe): This is basic auth support from go1.4 present to ensure we -// can compile on go1.3 and earlier. - -// BasicAuth returns the username and password provided in the request's -// Authorization header, if the request uses HTTP Basic Authentication. -// See RFC 2617, Section 2. -func basicAuth(r *http.Request) (username, password string, ok bool) { - auth := r.Header.Get("Authorization") - if auth == "" { - return - } - return parseBasicAuth(auth) -} - -// parseBasicAuth parses an HTTP Basic Authentication string. -// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). -func parseBasicAuth(auth string) (username, password string, ok bool) { - if !strings.HasPrefix(auth, "Basic ") { - return - } - c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) - if err != nil { - return - } - cs := string(c) - s := strings.IndexByte(cs, ':') - if s < 0 { - return - } - return cs[:s], cs[s+1:], true -} diff --git a/docs/handlers/blob.go b/docs/handlers/blob.go deleted file mode 100644 index fb250acd..00000000 --- a/docs/handlers/blob.go +++ /dev/null @@ -1,99 +0,0 @@ -package handlers - -import ( - "net/http" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// blobDispatcher uses the request context to build a blobHandler. -func blobDispatcher(ctx *Context, r *http.Request) http.Handler { - dgst, err := getDigest(ctx) - if err != nil { - - if err == errDigestNotAvailable { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) - }) - } - - blobHandler := &blobHandler{ - Context: ctx, - Digest: dgst, - } - - mhandler := handlers.MethodHandler{ - "GET": http.HandlerFunc(blobHandler.GetBlob), - "HEAD": http.HandlerFunc(blobHandler.GetBlob), - } - - if !ctx.readOnly { - mhandler["DELETE"] = http.HandlerFunc(blobHandler.DeleteBlob) - } - - return mhandler -} - -// blobHandler serves http blob requests. -type blobHandler struct { - *Context - - Digest digest.Digest -} - -// GetBlob fetches the binary data from backend storage returns it in the -// response. -func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) { - context.GetLogger(bh).Debug("GetBlob") - blobs := bh.Repository.Blobs(bh) - desc, err := blobs.Stat(bh, bh.Digest) - if err != nil { - if err == distribution.ErrBlobUnknown { - bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(bh.Digest)) - } else { - bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - return - } - - if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil { - context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err) - bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } -} - -// DeleteBlob deletes a layer blob -func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) { - context.GetLogger(bh).Debug("DeleteBlob") - - blobs := bh.Repository.Blobs(bh) - err := blobs.Delete(bh, bh.Digest) - if err != nil { - switch err { - case distribution.ErrUnsupported: - bh.Errors = append(bh.Errors, errcode.ErrorCodeUnsupported) - return - case distribution.ErrBlobUnknown: - bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown) - return - default: - bh.Errors = append(bh.Errors, err) - context.GetLogger(bh).Errorf("Unknown error deleting blob: %s", err.Error()) - return - } - } - - w.Header().Set("Content-Length", "0") - w.WriteHeader(http.StatusAccepted) -} diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go deleted file mode 100644 index 3afb4739..00000000 --- a/docs/handlers/blobupload.go +++ /dev/null @@ -1,368 +0,0 @@ -package handlers - -import ( - "fmt" - "net/http" - "net/url" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/storage" - "github.com/gorilla/handlers" -) - -// blobUploadDispatcher constructs and returns the blob upload handler for the -// given request context. -func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { - buh := &blobUploadHandler{ - Context: ctx, - UUID: getUploadUUID(ctx), - } - - handler := handlers.MethodHandler{ - "GET": http.HandlerFunc(buh.GetUploadStatus), - "HEAD": http.HandlerFunc(buh.GetUploadStatus), - } - - if !ctx.readOnly { - handler["POST"] = http.HandlerFunc(buh.StartBlobUpload) - handler["PATCH"] = http.HandlerFunc(buh.PatchBlobData) - handler["PUT"] = http.HandlerFunc(buh.PutBlobUploadComplete) - handler["DELETE"] = http.HandlerFunc(buh.CancelBlobUpload) - } - - if buh.UUID != "" { - state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) - if err != nil { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - }) - } - buh.State = state - - if state.Name != ctx.Repository.Named().Name() { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Named().Name()) - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - }) - } - - if state.UUID != buh.UUID { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - }) - } - - blobs := ctx.Repository.Blobs(buh) - upload, err := blobs.Resume(buh, buh.UUID) - if err != nil { - ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) - if err == distribution.ErrBlobUploadUnknown { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown.WithDetail(err)) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - }) - } - buh.Upload = upload - - if size := upload.Size(); size != buh.State.Offset { - defer upload.Close() - ctxu.GetLogger(ctx).Errorf("upload resumed at wrong offest: %d != %d", size, buh.State.Offset) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - upload.Cancel(buh) - }) - } - return closeResources(handler, buh.Upload) - } - - return handler -} - -// blobUploadHandler handles the http blob upload process. -type blobUploadHandler struct { - *Context - - // UUID identifies the upload instance for the current request. Using UUID - // to key blob writers since this implementation uses UUIDs. - UUID string - - Upload distribution.BlobWriter - - State blobUploadState -} - -// StartBlobUpload begins the blob upload process and allocates a server-side -// blob writer session, optionally mounting the blob from a separate repository. -func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) { - var options []distribution.BlobCreateOption - - fromRepo := r.FormValue("from") - mountDigest := r.FormValue("mount") - - if mountDigest != "" && fromRepo != "" { - opt, err := buh.createBlobMountOption(fromRepo, mountDigest) - if opt != nil && err == nil { - options = append(options, opt) - } - } - - blobs := buh.Repository.Blobs(buh) - upload, err := blobs.Create(buh, options...) - - if err != nil { - if ebm, ok := err.(distribution.ErrBlobMounted); ok { - if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - } else if err == distribution.ErrUnsupported { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) - } else { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - return - } - - buh.Upload = upload - - if err := buh.blobUploadResponse(w, r, true); err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - w.Header().Set("Docker-Upload-UUID", buh.Upload.ID()) - w.WriteHeader(http.StatusAccepted) -} - -// GetUploadStatus returns the status of a given upload, identified by id. -func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { - if buh.Upload == nil { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) - return - } - - // TODO(dmcgowan): Set last argument to false in blobUploadResponse when - // resumable upload is supported. This will enable returning a non-zero - // range for clients to begin uploading at an offset. - if err := buh.blobUploadResponse(w, r, true); err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - w.Header().Set("Docker-Upload-UUID", buh.UUID) - w.WriteHeader(http.StatusNoContent) -} - -// PatchBlobData writes data to an upload. -func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) { - if buh.Upload == nil { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) - return - } - - ct := r.Header.Get("Content-Type") - if ct != "" && ct != "application/octet-stream" { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type"))) - // TODO(dmcgowan): encode error - return - } - - // TODO(dmcgowan): support Content-Range header to seek and write range - - if err := copyFullPayload(w, r, buh.Upload, buh, "blob PATCH", &buh.Errors); err != nil { - // copyFullPayload reports the error if necessary - return - } - - if err := buh.blobUploadResponse(w, r, false); err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - w.WriteHeader(http.StatusAccepted) -} - -// PutBlobUploadComplete takes the final request of a blob upload. The -// request may include all the blob data or no blob data. Any data -// provided is received and verified. If successful, the blob is linked -// into the blob store and 201 Created is returned with the canonical -// url of the blob. -func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) { - if buh.Upload == nil { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) - return - } - - dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! - - if dgstStr == "" { - // no digest? return error, but allow retry. - buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest missing")) - return - } - - dgst, err := digest.ParseDigest(dgstStr) - if err != nil { - // no digest? return error, but allow retry. - buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest parsing failed")) - return - } - - if err := copyFullPayload(w, r, buh.Upload, buh, "blob PUT", &buh.Errors); err != nil { - // copyFullPayload reports the error if necessary - return - } - - desc, err := buh.Upload.Commit(buh, distribution.Descriptor{ - Digest: dgst, - - // TODO(stevvooe): This isn't wildly important yet, but we should - // really set the mediatype. For now, we can let the backend take care - // of this. - }) - - if err != nil { - switch err := err.(type) { - case distribution.ErrBlobInvalidDigest: - buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) - case errcode.Error: - buh.Errors = append(buh.Errors, err) - default: - switch err { - case distribution.ErrAccessDenied: - buh.Errors = append(buh.Errors, errcode.ErrorCodeDenied) - case distribution.ErrUnsupported: - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) - case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - default: - ctxu.GetLogger(buh).Errorf("unknown error completing upload: %v", err) - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - - } - - // Clean up the backend blob data if there was an error. - if err := buh.Upload.Cancel(buh); err != nil { - // If the cleanup fails, all we can do is observe and report. - ctxu.GetLogger(buh).Errorf("error canceling upload after error: %v", err) - } - - return - } - if err := buh.writeBlobCreatedHeaders(w, desc); err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } -} - -// CancelBlobUpload cancels an in-progress upload of a blob. -func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) { - if buh.Upload == nil { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) - return - } - - w.Header().Set("Docker-Upload-UUID", buh.UUID) - if err := buh.Upload.Cancel(buh); err != nil { - ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - - w.WriteHeader(http.StatusNoContent) -} - -// blobUploadResponse provides a standard request for uploading blobs and -// chunk responses. This sets the correct headers but the response status is -// left to the caller. The fresh argument is used to ensure that new blob -// uploads always start at a 0 offset. This allows disabling resumable push by -// always returning a 0 offset on check status. -func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { - // TODO(stevvooe): Need a better way to manage the upload state automatically. - buh.State.Name = buh.Repository.Named().Name() - buh.State.UUID = buh.Upload.ID() - buh.Upload.Close() - buh.State.Offset = buh.Upload.Size() - buh.State.StartedAt = buh.Upload.StartedAt() - - token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State) - if err != nil { - ctxu.GetLogger(buh).Infof("error building upload state token: %s", err) - return err - } - - uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL( - buh.Repository.Named(), buh.Upload.ID(), - url.Values{ - "_state": []string{token}, - }) - if err != nil { - ctxu.GetLogger(buh).Infof("error building upload url: %s", err) - return err - } - - endRange := buh.Upload.Size() - if endRange > 0 { - endRange = endRange - 1 - } - - w.Header().Set("Docker-Upload-UUID", buh.UUID) - w.Header().Set("Location", uploadURL) - - w.Header().Set("Content-Length", "0") - w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) - - return nil -} - -// mountBlob attempts to mount a blob from another repository by its digest. If -// successful, the blob is linked into the blob store and 201 Created is -// returned with the canonical url of the blob. -func (buh *blobUploadHandler) createBlobMountOption(fromRepo, mountDigest string) (distribution.BlobCreateOption, error) { - dgst, err := digest.ParseDigest(mountDigest) - if err != nil { - return nil, err - } - - ref, err := reference.ParseNamed(fromRepo) - if err != nil { - return nil, err - } - - canonical, err := reference.WithDigest(ref, dgst) - if err != nil { - return nil, err - } - - return storage.WithMountFrom(canonical), nil -} - -// writeBlobCreatedHeaders writes the standard headers describing a newly -// created blob. A 201 Created is written as well as the canonical URL and -// blob digest. -func (buh *blobUploadHandler) writeBlobCreatedHeaders(w http.ResponseWriter, desc distribution.Descriptor) error { - ref, err := reference.WithDigest(buh.Repository.Named(), desc.Digest) - if err != nil { - return err - } - blobURL, err := buh.urlBuilder.BuildBlobURL(ref) - if err != nil { - return err - } - - w.Header().Set("Location", blobURL) - w.Header().Set("Content-Length", "0") - w.Header().Set("Docker-Content-Digest", desc.Digest.String()) - w.WriteHeader(http.StatusCreated) - return nil -} diff --git a/docs/handlers/catalog.go b/docs/handlers/catalog.go deleted file mode 100644 index 6ec1fe55..00000000 --- a/docs/handlers/catalog.go +++ /dev/null @@ -1,95 +0,0 @@ -package handlers - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/gorilla/handlers" -) - -const maximumReturnedEntries = 100 - -func catalogDispatcher(ctx *Context, r *http.Request) http.Handler { - catalogHandler := &catalogHandler{ - Context: ctx, - } - - return handlers.MethodHandler{ - "GET": http.HandlerFunc(catalogHandler.GetCatalog), - } -} - -type catalogHandler struct { - *Context -} - -type catalogAPIResponse struct { - Repositories []string `json:"repositories"` -} - -func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { - var moreEntries = true - - q := r.URL.Query() - lastEntry := q.Get("last") - maxEntries, err := strconv.Atoi(q.Get("n")) - if err != nil || maxEntries < 0 { - maxEntries = maximumReturnedEntries - } - - repos := make([]string, maxEntries) - - filled, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry) - if err == io.EOF { - moreEntries = false - } else if err != nil { - ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - // Add a link header if there are more entries to retrieve - if moreEntries { - lastEntry = repos[len(repos)-1] - urlStr, err := createLinkEntry(r.URL.String(), maxEntries, lastEntry) - if err != nil { - ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - w.Header().Set("Link", urlStr) - } - - enc := json.NewEncoder(w) - if err := enc.Encode(catalogAPIResponse{ - Repositories: repos[0:filled], - }); err != nil { - ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } -} - -// Use the original URL from the request to create a new URL for -// the link header -func createLinkEntry(origURL string, maxEntries int, lastEntry string) (string, error) { - calledURL, err := url.Parse(origURL) - if err != nil { - return "", err - } - - v := url.Values{} - v.Add("n", strconv.Itoa(maxEntries)) - v.Add("last", lastEntry) - - calledURL.RawQuery = v.Encode() - - calledURL.Fragment = "" - urlStr := fmt.Sprintf("<%s>; rel=\"next\"", calledURL.String()) - - return urlStr, nil -} diff --git a/docs/handlers/context.go b/docs/handlers/context.go deleted file mode 100644 index 552db2df..00000000 --- a/docs/handlers/context.go +++ /dev/null @@ -1,152 +0,0 @@ -package handlers - -import ( - "fmt" - "net/http" - "sync" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/auth" - "golang.org/x/net/context" -) - -// Context should contain the request specific context for use in across -// handlers. Resources that don't need to be shared across handlers should not -// be on this object. -type Context struct { - // App points to the application structure that created this context. - *App - context.Context - - // Repository is the repository for the current request. All requests - // should be scoped to a single repository. This field may be nil. - Repository distribution.Repository - - // Errors is a collection of errors encountered during the request to be - // returned to the client API. If errors are added to the collection, the - // handler *must not* start the response via http.ResponseWriter. - Errors errcode.Errors - - urlBuilder *v2.URLBuilder - - // TODO(stevvooe): The goal is too completely factor this context and - // dispatching out of the web application. Ideally, we should lean on - // context.Context for injection of these resources. -} - -// Value overrides context.Context.Value to ensure that calls are routed to -// correct context. -func (ctx *Context) Value(key interface{}) interface{} { - return ctx.Context.Value(key) -} - -func getName(ctx context.Context) (name string) { - return ctxu.GetStringValue(ctx, "vars.name") -} - -func getReference(ctx context.Context) (reference string) { - return ctxu.GetStringValue(ctx, "vars.reference") -} - -var errDigestNotAvailable = fmt.Errorf("digest not available in context") - -func getDigest(ctx context.Context) (dgst digest.Digest, err error) { - dgstStr := ctxu.GetStringValue(ctx, "vars.digest") - - if dgstStr == "" { - ctxu.GetLogger(ctx).Errorf("digest not available") - return "", errDigestNotAvailable - } - - d, err := digest.ParseDigest(dgstStr) - if err != nil { - ctxu.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err) - return "", err - } - - return d, nil -} - -func getUploadUUID(ctx context.Context) (uuid string) { - return ctxu.GetStringValue(ctx, "vars.uuid") -} - -// getUserName attempts to resolve a username from the context and request. If -// a username cannot be resolved, the empty string is returned. -func getUserName(ctx context.Context, r *http.Request) string { - username := ctxu.GetStringValue(ctx, auth.UserNameKey) - - // Fallback to request user with basic auth - if username == "" { - var ok bool - uname, _, ok := basicAuth(r) - if ok { - username = uname - } - } - - return username -} - -// contextManager allows us to associate net/context.Context instances with a -// request, based on the memory identity of http.Request. This prepares http- -// level context, which is not application specific. If this is called, -// (*contextManager).release must be called on the context when the request is -// completed. -// -// Providing this circumvents a lot of necessity for dispatchers with the -// benefit of instantiating the request context much earlier. -// -// TODO(stevvooe): Consider making this facility a part of the context package. -type contextManager struct { - contexts map[*http.Request]context.Context - mu sync.Mutex -} - -// defaultContextManager is just a global instance to register request contexts. -var defaultContextManager = newContextManager() - -func newContextManager() *contextManager { - return &contextManager{ - contexts: make(map[*http.Request]context.Context), - } -} - -// context either returns a new context or looks it up in the manager. -func (cm *contextManager) context(parent context.Context, w http.ResponseWriter, r *http.Request) context.Context { - cm.mu.Lock() - defer cm.mu.Unlock() - - ctx, ok := cm.contexts[r] - if ok { - return ctx - } - - if parent == nil { - parent = ctxu.Background() - } - - ctx = ctxu.WithRequest(parent, r) - ctx, w = ctxu.WithResponseWriter(ctx, w) - ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) - cm.contexts[r] = ctx - - return ctx -} - -// releases frees any associated with resources from request. -func (cm *contextManager) release(ctx context.Context) { - cm.mu.Lock() - defer cm.mu.Unlock() - - r, err := ctxu.GetRequest(ctx) - if err != nil { - ctxu.GetLogger(ctx).Errorf("no request found in context during release") - return - } - delete(cm.contexts, r) -} diff --git a/docs/handlers/health_test.go b/docs/handlers/health_test.go deleted file mode 100644 index 5fe65ede..00000000 --- a/docs/handlers/health_test.go +++ /dev/null @@ -1,201 +0,0 @@ -package handlers - -import ( - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "os" - "testing" - "time" - - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/context" - "github.com/docker/distribution/health" -) - -func TestFileHealthCheck(t *testing.T) { - interval := time.Second - - tmpfile, err := ioutil.TempFile(os.TempDir(), "healthcheck") - if err != nil { - t.Fatalf("could not create temporary file: %v", err) - } - defer tmpfile.Close() - - config := &configuration.Configuration{ - Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, - }, - Health: configuration.Health{ - FileCheckers: []configuration.FileChecker{ - { - Interval: interval, - File: tmpfile.Name(), - }, - }, - }, - } - - ctx := context.Background() - - app := NewApp(ctx, config) - healthRegistry := health.NewRegistry() - app.RegisterHealthChecks(healthRegistry) - - // Wait for health check to happen - <-time.After(2 * interval) - - status := healthRegistry.CheckStatus() - if len(status) != 1 { - t.Fatal("expected 1 item in health check results") - } - if status[tmpfile.Name()] != "file exists" { - t.Fatal(`did not get "file exists" result for health check`) - } - - os.Remove(tmpfile.Name()) - - <-time.After(2 * interval) - if len(healthRegistry.CheckStatus()) != 0 { - t.Fatal("expected 0 items in health check results") - } -} - -func TestTCPHealthCheck(t *testing.T) { - interval := time.Second - - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("could not create listener: %v", err) - } - addrStr := ln.Addr().String() - - // Start accepting - go func() { - for { - conn, err := ln.Accept() - if err != nil { - // listener was closed - return - } - defer conn.Close() - } - }() - - config := &configuration.Configuration{ - Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, - }, - Health: configuration.Health{ - TCPCheckers: []configuration.TCPChecker{ - { - Interval: interval, - Addr: addrStr, - Timeout: 500 * time.Millisecond, - }, - }, - }, - } - - ctx := context.Background() - - app := NewApp(ctx, config) - healthRegistry := health.NewRegistry() - app.RegisterHealthChecks(healthRegistry) - - // Wait for health check to happen - <-time.After(2 * interval) - - if len(healthRegistry.CheckStatus()) != 0 { - t.Fatal("expected 0 items in health check results") - } - - ln.Close() - <-time.After(2 * interval) - - // Health check should now fail - status := healthRegistry.CheckStatus() - if len(status) != 1 { - t.Fatal("expected 1 item in health check results") - } - if status[addrStr] != "connection to "+addrStr+" failed" { - t.Fatal(`did not get "connection failed" result for health check`) - } -} - -func TestHTTPHealthCheck(t *testing.T) { - interval := time.Second - threshold := 3 - - stopFailing := make(chan struct{}) - - checkedServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != "HEAD" { - t.Fatalf("expected HEAD request, got %s", r.Method) - } - select { - case <-stopFailing: - w.WriteHeader(http.StatusOK) - default: - w.WriteHeader(http.StatusInternalServerError) - } - })) - - config := &configuration.Configuration{ - Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, - }, - Health: configuration.Health{ - HTTPCheckers: []configuration.HTTPChecker{ - { - Interval: interval, - URI: checkedServer.URL, - Threshold: threshold, - }, - }, - }, - } - - ctx := context.Background() - - app := NewApp(ctx, config) - healthRegistry := health.NewRegistry() - app.RegisterHealthChecks(healthRegistry) - - for i := 0; ; i++ { - <-time.After(interval) - - status := healthRegistry.CheckStatus() - - if i < threshold-1 { - // definitely shouldn't have hit the threshold yet - if len(status) != 0 { - t.Fatal("expected 1 item in health check results") - } - continue - } - if i < threshold+1 { - // right on the threshold - don't expect a failure yet - continue - } - - if len(status) != 1 { - t.Fatal("expected 1 item in health check results") - } - if status[checkedServer.URL] != "downstream service returned unexpected status: 500" { - t.Fatal("did not get expected result for health check") - } - - break - } - - // Signal HTTP handler to start returning 200 - close(stopFailing) - - <-time.After(2 * interval) - - if len(healthRegistry.CheckStatus()) != 0 { - t.Fatal("expected 0 items in health check results") - } -} diff --git a/docs/handlers/helpers.go b/docs/handlers/helpers.go deleted file mode 100644 index dac4f7a8..00000000 --- a/docs/handlers/helpers.go +++ /dev/null @@ -1,66 +0,0 @@ -package handlers - -import ( - "errors" - "io" - "net/http" - - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/api/errcode" -) - -// closeResources closes all the provided resources after running the target -// handler. -func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - for _, closer := range closers { - defer closer.Close() - } - handler.ServeHTTP(w, r) - }) -} - -// copyFullPayload copies the payload of an HTTP request to destWriter. If it -// receives less content than expected, and the client disconnected during the -// upload, it avoids sending a 400 error to keep the logs cleaner. -func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error { - // Get a channel that tells us if the client disconnects - var clientClosed <-chan bool - if notifier, ok := responseWriter.(http.CloseNotifier); ok { - clientClosed = notifier.CloseNotify() - } else { - ctxu.GetLogger(context).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter) - } - - // Read in the data, if any. - copied, err := io.Copy(destWriter, r.Body) - if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { - // Didn't receive as much content as expected. Did the client - // disconnect during the request? If so, avoid returning a 400 - // error to keep the logs cleaner. - select { - case <-clientClosed: - // Set the response code to "499 Client Closed Request" - // Even though the connection has already been closed, - // this causes the logger to pick up a 499 error - // instead of showing 0 for the HTTP status. - responseWriter.WriteHeader(499) - - ctxu.GetLoggerWithFields(context, map[interface{}]interface{}{ - "error": err, - "copied": copied, - "contentLength": r.ContentLength, - }, "error", "copied", "contentLength").Error("client disconnected during " + action) - return errors.New("client disconnected") - default: - } - } - - if err != nil { - ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err) - *errSlice = append(*errSlice, errcode.ErrorCodeUnknown.WithDetail(err)) - return err - } - - return nil -} diff --git a/docs/handlers/hmac.go b/docs/handlers/hmac.go deleted file mode 100644 index 1725d240..00000000 --- a/docs/handlers/hmac.go +++ /dev/null @@ -1,72 +0,0 @@ -package handlers - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "fmt" - "time" -) - -// blobUploadState captures the state serializable state of the blob upload. -type blobUploadState struct { - // name is the primary repository under which the blob will be linked. - Name string - - // UUID identifies the upload. - UUID string - - // offset contains the current progress of the upload. - Offset int64 - - // StartedAt is the original start time of the upload. - StartedAt time.Time -} - -type hmacKey string - -// unpackUploadState unpacks and validates the blob upload state from the -// token, using the hmacKey secret. -func (secret hmacKey) unpackUploadState(token string) (blobUploadState, error) { - var state blobUploadState - - tokenBytes, err := base64.URLEncoding.DecodeString(token) - if err != nil { - return state, err - } - mac := hmac.New(sha256.New, []byte(secret)) - - if len(tokenBytes) < mac.Size() { - return state, fmt.Errorf("Invalid token") - } - - macBytes := tokenBytes[:mac.Size()] - messageBytes := tokenBytes[mac.Size():] - - mac.Write(messageBytes) - if !hmac.Equal(mac.Sum(nil), macBytes) { - return state, fmt.Errorf("Invalid token") - } - - if err := json.Unmarshal(messageBytes, &state); err != nil { - return state, err - } - - return state, nil -} - -// packUploadState packs the upload state signed with and hmac digest using -// the hmacKey secret, encoding to url safe base64. The resulting token can be -// used to share data with minimized risk of external tampering. -func (secret hmacKey) packUploadState(lus blobUploadState) (string, error) { - mac := hmac.New(sha256.New, []byte(secret)) - p, err := json.Marshal(lus) - if err != nil { - return "", err - } - - mac.Write(p) - - return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), p...)), nil -} diff --git a/docs/handlers/hmac_test.go b/docs/handlers/hmac_test.go deleted file mode 100644 index 366c7279..00000000 --- a/docs/handlers/hmac_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package handlers - -import "testing" - -var blobUploadStates = []blobUploadState{ - { - Name: "hello", - UUID: "abcd-1234-qwer-0987", - Offset: 0, - }, - { - Name: "hello-world", - UUID: "abcd-1234-qwer-0987", - Offset: 0, - }, - { - Name: "h3ll0_w0rld", - UUID: "abcd-1234-qwer-0987", - Offset: 1337, - }, - { - Name: "ABCDEFG", - UUID: "ABCD-1234-QWER-0987", - Offset: 1234567890, - }, - { - Name: "this-is-A-sort-of-Long-name-for-Testing", - UUID: "dead-1234-beef-0987", - Offset: 8675309, - }, -} - -var secrets = []string{ - "supersecret", - "12345", - "a", - "SuperSecret", - "Sup3r... S3cr3t!", - "This is a reasonably long secret key that is used for the purpose of testing.", - "\u2603+\u2744", // snowman+snowflake -} - -// TestLayerUploadTokens constructs stateTokens from LayerUploadStates and -// validates that the tokens can be used to reconstruct the proper upload state. -func TestLayerUploadTokens(t *testing.T) { - secret := hmacKey("supersecret") - - for _, testcase := range blobUploadStates { - token, err := secret.packUploadState(testcase) - if err != nil { - t.Fatal(err) - } - - lus, err := secret.unpackUploadState(token) - if err != nil { - t.Fatal(err) - } - - assertBlobUploadStateEquals(t, testcase, lus) - } -} - -// TestHMACValidate ensures that any HMAC token providers are compatible if and -// only if they share the same secret. -func TestHMACValidation(t *testing.T) { - for _, secret := range secrets { - secret1 := hmacKey(secret) - secret2 := hmacKey(secret) - badSecret := hmacKey("DifferentSecret") - - for _, testcase := range blobUploadStates { - token, err := secret1.packUploadState(testcase) - if err != nil { - t.Fatal(err) - } - - lus, err := secret2.unpackUploadState(token) - if err != nil { - t.Fatal(err) - } - - assertBlobUploadStateEquals(t, testcase, lus) - - _, err = badSecret.unpackUploadState(token) - if err == nil { - t.Fatalf("Expected token provider to fail at retrieving state from token: %s", token) - } - - badToken, err := badSecret.packUploadState(lus) - if err != nil { - t.Fatal(err) - } - - _, err = secret1.unpackUploadState(badToken) - if err == nil { - t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) - } - - _, err = secret2.unpackUploadState(badToken) - if err == nil { - t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) - } - } - } -} - -func assertBlobUploadStateEquals(t *testing.T, expected blobUploadState, received blobUploadState) { - if expected.Name != received.Name { - t.Fatalf("Expected Name=%q, Received Name=%q", expected.Name, received.Name) - } - if expected.UUID != received.UUID { - t.Fatalf("Expected UUID=%q, Received UUID=%q", expected.UUID, received.UUID) - } - if expected.Offset != received.Offset { - t.Fatalf("Expected Offset=%d, Received Offset=%d", expected.Offset, received.Offset) - } -} diff --git a/docs/handlers/hooks.go b/docs/handlers/hooks.go deleted file mode 100644 index 7bbab4f8..00000000 --- a/docs/handlers/hooks.go +++ /dev/null @@ -1,53 +0,0 @@ -package handlers - -import ( - "bytes" - "errors" - "fmt" - "strings" - "text/template" - - "github.com/Sirupsen/logrus" -) - -// logHook is for hooking Panic in web application -type logHook struct { - LevelsParam []string - Mail *mailer -} - -// Fire forwards an error to LogHook -func (hook *logHook) Fire(entry *logrus.Entry) error { - addr := strings.Split(hook.Mail.Addr, ":") - if len(addr) != 2 { - return errors.New("Invalid Mail Address") - } - host := addr[0] - subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message) - - html := ` - {{.Message}} - - {{range $key, $value := .Data}} - {{$key}}: {{$value}} - {{end}} - ` - b := bytes.NewBuffer(make([]byte, 0)) - t := template.Must(template.New("mail body").Parse(html)) - if err := t.Execute(b, entry); err != nil { - return err - } - body := fmt.Sprintf("%s", b) - - return hook.Mail.sendMail(subject, body) -} - -// Levels contains hook levels to be catched -func (hook *logHook) Levels() []logrus.Level { - levels := []logrus.Level{} - for _, v := range hook.LevelsParam { - lv, _ := logrus.ParseLevel(v) - levels = append(levels, lv) - } - return levels -} diff --git a/docs/handlers/images.go b/docs/handlers/images.go deleted file mode 100644 index df7f869b..00000000 --- a/docs/handlers/images.go +++ /dev/null @@ -1,386 +0,0 @@ -package handlers - -import ( - "bytes" - "fmt" - "net/http" - "strings" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// These constants determine which architecture and OS to choose from a -// manifest list when downconverting it to a schema1 manifest. -const ( - defaultArch = "amd64" - defaultOS = "linux" -) - -// imageManifestDispatcher takes the request context and builds the -// appropriate handler for handling image manifest requests. -func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { - imageManifestHandler := &imageManifestHandler{ - Context: ctx, - } - reference := getReference(ctx) - dgst, err := digest.ParseDigest(reference) - if err != nil { - // We just have a tag - imageManifestHandler.Tag = reference - } else { - imageManifestHandler.Digest = dgst - } - - mhandler := handlers.MethodHandler{ - "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), - "HEAD": http.HandlerFunc(imageManifestHandler.GetImageManifest), - } - - if !ctx.readOnly { - mhandler["PUT"] = http.HandlerFunc(imageManifestHandler.PutImageManifest) - mhandler["DELETE"] = http.HandlerFunc(imageManifestHandler.DeleteImageManifest) - } - - return mhandler -} - -// imageManifestHandler handles http operations on image manifests. -type imageManifestHandler struct { - *Context - - // One of tag or digest gets set, depending on what is present in context. - Tag string - Digest digest.Digest -} - -// GetImageManifest fetches the image manifest from the storage backend, if it exists. -func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(imh).Debug("GetImageManifest") - manifests, err := imh.Repository.Manifests(imh) - if err != nil { - imh.Errors = append(imh.Errors, err) - return - } - - var manifest distribution.Manifest - if imh.Tag != "" { - tags := imh.Repository.Tags(imh) - desc, err := tags.Get(imh, imh.Tag) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - return - } - imh.Digest = desc.Digest - } - - if etagMatch(r, imh.Digest.String()) { - w.WriteHeader(http.StatusNotModified) - return - } - - var options []distribution.ManifestServiceOption - if imh.Tag != "" { - options = append(options, distribution.WithTag(imh.Tag)) - } - manifest, err = manifests.Get(imh, imh.Digest, options...) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - return - } - - supportsSchema2 := false - supportsManifestList := false - // this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about "q=" values - // https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202 - for _, acceptHeader := range r.Header["Accept"] { - // r.Header[...] is a slice in case the request contains the same header more than once - // if the header isn't set, we'll get the zero value, which "range" will handle gracefully - - // we need to split each header value on "," to get the full list of "Accept" values (per RFC 2616) - // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 - for _, mediaType := range strings.Split(acceptHeader, ",") { - // remove "; q=..." if present - if i := strings.Index(mediaType, ";"); i >= 0 { - mediaType = mediaType[:i] - } - - // it's common (but not required) for Accept values to be space separated ("a/b, c/d, e/f") - mediaType = strings.TrimSpace(mediaType) - - if mediaType == schema2.MediaTypeManifest { - supportsSchema2 = true - } - if mediaType == manifestlist.MediaTypeManifestList { - supportsManifestList = true - } - } - } - - schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest) - manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList) - - // Only rewrite schema2 manifests when they are being fetched by tag. - // If they are being fetched by digest, we can't return something not - // matching the digest. - if imh.Tag != "" && isSchema2 && !supportsSchema2 { - // Rewrite manifest in schema1 format - ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) - - manifest, err = imh.convertSchema2Manifest(schema2Manifest) - if err != nil { - return - } - } else if imh.Tag != "" && isManifestList && !supportsManifestList { - // Rewrite manifest in schema1 format - ctxu.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) - - // Find the image manifest corresponding to the default - // platform - var manifestDigest digest.Digest - for _, manifestDescriptor := range manifestList.Manifests { - if manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS { - manifestDigest = manifestDescriptor.Digest - break - } - } - - if manifestDigest == "" { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) - return - } - - manifest, err = manifests.Get(imh, manifestDigest) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - return - } - - // If necessary, convert the image manifest - if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supportsSchema2 { - manifest, err = imh.convertSchema2Manifest(schema2Manifest) - if err != nil { - return - } - } - } - - ct, p, err := manifest.Payload() - if err != nil { - return - } - - w.Header().Set("Content-Type", ct) - w.Header().Set("Content-Length", fmt.Sprint(len(p))) - w.Header().Set("Docker-Content-Digest", imh.Digest.String()) - w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) - w.Write(p) -} - -func (imh *imageManifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) { - targetDescriptor := schema2Manifest.Target() - blobs := imh.Repository.Blobs(imh) - configJSON, err := blobs.Get(imh, targetDescriptor.Digest) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) - return nil, err - } - - ref := imh.Repository.Named() - - if imh.Tag != "" { - ref, err = reference.WithTag(ref, imh.Tag) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail(err)) - return nil, err - } - } - - builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, ref, configJSON) - for _, d := range schema2Manifest.References() { - if err := builder.AppendReference(d); err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) - return nil, err - } - } - manifest, err := builder.Build(imh) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) - return nil, err - } - imh.Digest = digest.FromBytes(manifest.(*schema1.SignedManifest).Canonical) - - return manifest, nil -} - -func etagMatch(r *http.Request, etag string) bool { - for _, headerVal := range r.Header["If-None-Match"] { - if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { // allow quoted or unquoted - return true - } - } - return false -} - -// PutImageManifest validates and stores an image in the registry. -func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(imh).Debug("PutImageManifest") - manifests, err := imh.Repository.Manifests(imh) - if err != nil { - imh.Errors = append(imh.Errors, err) - return - } - - var jsonBuf bytes.Buffer - if err := copyFullPayload(w, r, &jsonBuf, imh, "image manifest PUT", &imh.Errors); err != nil { - // copyFullPayload reports the error if necessary - return - } - - mediaType := r.Header.Get("Content-Type") - manifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes()) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) - return - } - - if imh.Digest != "" { - if desc.Digest != imh.Digest { - ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", desc.Digest, imh.Digest) - imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) - return - } - } else if imh.Tag != "" { - imh.Digest = desc.Digest - } else { - imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified")) - return - } - - var options []distribution.ManifestServiceOption - if imh.Tag != "" { - options = append(options, distribution.WithTag(imh.Tag)) - } - _, err = manifests.Put(imh, manifest, options...) - if err != nil { - // TODO(stevvooe): These error handling switches really need to be - // handled by an app global mapper. - if err == distribution.ErrUnsupported { - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) - return - } - if err == distribution.ErrAccessDenied { - imh.Errors = append(imh.Errors, errcode.ErrorCodeDenied) - return - } - switch err := err.(type) { - case distribution.ErrManifestVerification: - for _, verificationError := range err { - switch verificationError := verificationError.(type) { - case distribution.ErrManifestBlobUnknown: - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest)) - case distribution.ErrManifestNameInvalid: - imh.Errors = append(imh.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) - case distribution.ErrManifestUnverified: - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified) - default: - if verificationError == digest.ErrDigestInvalidFormat { - imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) - } else { - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError) - } - } - } - case errcode.Error: - imh.Errors = append(imh.Errors, err) - default: - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - - return - } - - // Tag this manifest - if imh.Tag != "" { - tags := imh.Repository.Tags(imh) - err = tags.Tag(imh, imh.Tag, desc) - if err != nil { - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - } - - // Construct a canonical url for the uploaded manifest. - ref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest) - if err != nil { - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - location, err := imh.urlBuilder.BuildManifestURL(ref) - if err != nil { - // NOTE(stevvooe): Given the behavior above, this absurdly unlikely to - // happen. We'll log the error here but proceed as if it worked. Worst - // case, we set an empty location header. - ctxu.GetLogger(imh).Errorf("error building manifest url from digest: %v", err) - } - - w.Header().Set("Location", location) - w.Header().Set("Docker-Content-Digest", imh.Digest.String()) - w.WriteHeader(http.StatusCreated) -} - -// DeleteImageManifest removes the manifest with the given digest from the registry. -func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(imh).Debug("DeleteImageManifest") - - manifests, err := imh.Repository.Manifests(imh) - if err != nil { - imh.Errors = append(imh.Errors, err) - return - } - - err = manifests.Delete(imh, imh.Digest) - if err != nil { - switch err { - case digest.ErrDigestUnsupported: - case digest.ErrDigestInvalidFormat: - imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) - return - case distribution.ErrBlobUnknown: - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) - return - case distribution.ErrUnsupported: - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) - return - default: - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown) - return - } - } - - tagService := imh.Repository.Tags(imh) - referencedTags, err := tagService.Lookup(imh, distribution.Descriptor{Digest: imh.Digest}) - if err != nil { - imh.Errors = append(imh.Errors, err) - return - } - - for _, tag := range referencedTags { - if err := tagService.Untag(imh, tag); err != nil { - imh.Errors = append(imh.Errors, err) - return - } - } - - w.WriteHeader(http.StatusAccepted) -} diff --git a/docs/handlers/mail.go b/docs/handlers/mail.go deleted file mode 100644 index 39244909..00000000 --- a/docs/handlers/mail.go +++ /dev/null @@ -1,45 +0,0 @@ -package handlers - -import ( - "errors" - "net/smtp" - "strings" -) - -// mailer provides fields of email configuration for sending. -type mailer struct { - Addr, Username, Password, From string - Insecure bool - To []string -} - -// sendMail allows users to send email, only if mail parameters is configured correctly. -func (mail *mailer) sendMail(subject, message string) error { - addr := strings.Split(mail.Addr, ":") - if len(addr) != 2 { - return errors.New("Invalid Mail Address") - } - host := addr[0] - msg := []byte("To:" + strings.Join(mail.To, ";") + - "\r\nFrom: " + mail.From + - "\r\nSubject: " + subject + - "\r\nContent-Type: text/plain\r\n\r\n" + - message) - auth := smtp.PlainAuth( - "", - mail.Username, - mail.Password, - host, - ) - err := smtp.SendMail( - mail.Addr, - auth, - mail.From, - mail.To, - []byte(msg), - ) - if err != nil { - return err - } - return nil -} diff --git a/docs/handlers/tags.go b/docs/handlers/tags.go deleted file mode 100644 index 91f1031e..00000000 --- a/docs/handlers/tags.go +++ /dev/null @@ -1,62 +0,0 @@ -package handlers - -import ( - "encoding/json" - "net/http" - - "github.com/docker/distribution" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// tagsDispatcher constructs the tags handler api endpoint. -func tagsDispatcher(ctx *Context, r *http.Request) http.Handler { - tagsHandler := &tagsHandler{ - Context: ctx, - } - - return handlers.MethodHandler{ - "GET": http.HandlerFunc(tagsHandler.GetTags), - } -} - -// tagsHandler handles requests for lists of tags under a repository name. -type tagsHandler struct { - *Context -} - -type tagsAPIResponse struct { - Name string `json:"name"` - Tags []string `json:"tags"` -} - -// GetTags returns a json list of tags for a specific image name. -func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - - tagService := th.Repository.Tags(th) - tags, err := tagService.All(th) - if err != nil { - switch err := err.(type) { - case distribution.ErrRepositoryUnknown: - th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Named().Name()})) - case errcode.Error: - th.Errors = append(th.Errors, err) - default: - th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - return - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - enc := json.NewEncoder(w) - if err := enc.Encode(tagsAPIResponse{ - Name: th.Repository.Named().Name(), - Tags: tags, - }); err != nil { - th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } -} diff --git a/docs/help.md b/docs/help.md new file mode 100644 index 00000000..77ec378f --- /dev/null +++ b/docs/help.md @@ -0,0 +1,24 @@ + + +# Getting help + +If you need help, or just want to chat, you can reach us: + +- on irc: `#docker-distribution` on freenode +- on the [mailing list](https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution) (mail at ) + +If you want to report a bug: + +- be sure to first read about [how to contribute](https://github.com/docker/distribution/blob/master/CONTRIBUTING.md) +- you can then do so on the [GitHub project bugtracker](https://github.com/docker/distribution/issues) + +You can also find out more about the Docker's project [Getting Help resources](/opensource/get-help.md). diff --git a/docs/images/notifications.gliffy b/docs/images/notifications.gliffy new file mode 100644 index 00000000..5ecf4c3a --- /dev/null +++ b/docs/images/notifications.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":737,"height":630,"nodeIndex":171,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":290,"y":83},"max":{"x":736.5,"y":630}},"objects":[{"x":699.0,"y":246.0,"rotation":0.0,"id":166,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-30.0,-12.0],[-30.0,59.5],[33.0,59.5],[33.0,131.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":632.0,"y":243.0,"rotation":0.0,"id":165,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-25.0,-11.0],[-25.0,64.5],[-88.0,64.5],[-88.0,140.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[]},{"x":512.0,"y":203.0,"rotation":0.0,"id":161,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-19.0,-3.0],[79.12746812182615,-3.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":589.9999999999999,"y":167.5,"rotation":0.0,"id":143,"width":101.11111111111111,"height":65.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":0.722222222222222,"y":0.0,"rotation":0.0,"id":144,"width":99.66666666666663,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Broadcaster

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":290.0,"y":105.0,"rotation":0.0,"id":160,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":26,"lockAspectRatio":false,"lockShape":false,"children":[{"x":12.92581625076238,"y":17.018834253729665,"rotation":0.0,"id":155,"width":189.57418374923762,"height":151.48116574627034,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":25,"lockAspectRatio":false,"lockShape":false,"children":[{"x":97.57418374923762,"y":58.481165746270335,"rotation":90.0,"id":151,"width":149.0,"height":37.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":153,"magnitude":1},{"id":154,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":152,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":151,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":151,"magnitude":1},{"id":154,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":153,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Listener

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":67.5,"y":1.0,"rotation":0.0,"id":154,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":152,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":10.074195639419855,"y":17.481165746270335,"rotation":0.0,"id":150,"width":120.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":20,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":80.5,"rotation":0.0,"id":133,"width":117.0,"height":38.5,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":16,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":135,"magnitude":1},{"id":136,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":134,"width":117.0,"height":30.5,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":133,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":133,"magnitude":1},{"id":136,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":135,"width":117.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

handler

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":51.5,"y":1.0,"rotation":0.0,"id":136,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":134,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":129,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":12,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":131,"magnitude":1},{"id":132,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":130,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":129,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":129,"magnitude":1},{"id":132,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":131,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":132,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":130,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":125,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":127,"magnitude":1},{"id":128,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":126,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":125,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":125,"magnitude":1},{"id":128,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":127,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

request

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":128,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":126,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.5154455517800614,"y":0.5154455517799761,"rotation":90.39513704250749,"id":145,"width":150.0,"height":150.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":147,"magnitude":1},{"id":148,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":146,"width":150.0,"height":142.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":145,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":145,"magnitude":1},{"id":148,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":147,"width":150.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":68.0,"y":0.9999999999999432,"rotation":0.0,"id":148,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":146,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":156,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":159,"width":206.0,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry instance

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":473.0,"y":525.0,"rotation":0.0,"id":115,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":69,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":68,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":109,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.0,4.5],[2.0,11.533649282003012],[2.0,18.567298564006137],[2.0,25.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":665.0,"y":530.0,"rotation":0.0,"id":114,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":68,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":100,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":112,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.0,-0.5],[-2.0,6.533649282003012],[-2.0,13.567298564006137],[-2.0,20.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":598.0,"y":550.0,"rotation":0.0,"id":112,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":113,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":420.0,"y":550.0,"rotation":0.0,"id":109,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":111,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":540.0,"y":438.5,"rotation":0.0,"id":104,"width":50.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":63,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

. . .

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[]},{"x":410.0,"y":379.5,"rotation":0.0,"id":103,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":84,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":45,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":80,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":41,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":82,"magnitude":1},{"id":83,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":81,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":80,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":80,"magnitude":1},{"id":83,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":82,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":83,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":81,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":76,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":37,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":78,"magnitude":1},{"id":79,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":77,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":76,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":76,"magnitude":1},{"id":79,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":78,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":79,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":77,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":72,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":33,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":74,"magnitude":1},{"id":75,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":73,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":72,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":72,"magnitude":1},{"id":75,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":74,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":75,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":73,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":68,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":71,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_1

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":598.0,"y":379.5,"rotation":0.0,"id":102,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":87,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":60,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":88,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":56,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":90,"magnitude":1},{"id":91,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":89,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":88,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":88,"magnitude":1},{"id":91,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":90,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":91,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":89,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":92,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":94,"magnitude":1},{"id":95,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":93,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":92,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":92,"magnitude":1},{"id":95,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":94,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":95,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":93,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":48,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":98,"magnitude":1},{"id":99,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":97,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":96,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":96,"magnitude":1},{"id":99,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":98,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":99,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":97,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":100,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":101,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_N

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]}],"shapeStyles":{"com.gliffy.shape.sitemap.sitemap_v1.default":{"fill":"#ffffff","stroke":"#666666","strokeWidth":2},"com.gliffy.shape.network.network_v3.home":{"fill":"#000000"},"com.gliffy.shape.network.network_v3.business":{"fill":"#003366"},"com.gliffy.shape.basic.basic_v1.default":{"fill":"#FFFFFF","stroke":"#434343","strokeWidth":2}},"lineStyles":{"global":{"endArrow":1}},"textStyles":{"global":{"size":"14px"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.uml.uml_v2.state_machine","com.gliffy.libraries.uml.uml_v2.deployment","com.gliffy.libraries.uml.uml_v2.use_case","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.component","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/images/notifications.png b/docs/images/notifications.png new file mode 100644 index 0000000000000000000000000000000000000000..09de8d2376d6f986374fceeb1e26389d3ab604df GIT binary patch literal 37836 zcmeEuWmHvN6fJQn>F!3lk#1=P6p@k;xP%DO4boh?q$LFv6_D>&l_)yJI41Ny!V`a_St)_x#pbfggsSP!oEjw4*>xITSZy^83F?0DgpwMGCC^w z%gYy6gWx}iPS2DcB77g9+C)H*K~RyGeeRC9lYx<~HFDGGo46)xRj+0{oGm+?tLmE? zf#*Rld7p;Hll&n~W)9vWeH-JEKoAx=gM484n#RxU1l0GIE;pB#;+7e8gPSv6gT;SD zy|RuBU4H9%6`hxjPt7;L8eGHt%!6PMWxv}G-7@5ng#=n30!HlJ$B!VJwT%A1110pQ z36jt7`%wS=^V5VW)PKKhVuY~v(0oO=2>f?0)*c&VCK)97X8|9H$UNpbq)hhqPzD@T zA`;Nue)iE3!%LOe1E%f{=@7&Dr~kXUdk})c6eDr7&A;m-D5So*8>b&c;-O#8PF%RL z(7)^Z<@5>NT}=jwhF(VFR9S(S^56AkG%P6pJADjZHmlc1h7|07*9W(U{eQYWsuf~L znMMD***Z5Rvpna`pU;YnB)=Ngxwf5~;c9<3U`rEv^KQIQpM_GPW%1_f=*jub<=!W) zeC=m7j;lefN@z0PXw)mIuOkL%VpMJi{)}!REc5VmclwilmF>?Dav`Bl>Rfj}s%MBD zUL6hQJ#7of&hkDCGjJYOD7Bk!42b8mp%k`FfW}B_H(vg(Bl{kf(Gt~h&`HbVwAQD> zX`HYpLq}IN>zXEU&_<}iEx~j02|2Tf1si+aUky^H+Z<2mz9J@>;1Id=i+$taK>ryE49hccH5B1Qq9)4{O1yG zyT9&BdL9pL{HlDzk*gGg)9G-1GLg@ULoN3FsN&7EP<#QWUa7pg=ql0qZq4Ax2*PTDZ+fN z-lJJn*Ox^-os~+GF5S# zy0rA)E2nIy^#T7E6|Ye$ZKiT;S<>wvHWBPKff10p?^aA8kwnSNH@-!_{IiG4X;A$% z=I_PC57fr1!^hZ|f1Cy-g^k1P7r_1Eq&xpqd99?|7U-0Ohe{!AOZDnkxs`EeG_`-- zUZXfw(f#NC+u!TdV`8K(He|q`k;i$fY4jU@^VW6Jc&WDTO_i~D*A!v%1!fW!dX=}<_ENST5%Y@ZPfdTYLN@4DO@HAPIq0yqu z`|YGJAx3Xcv`!*QU$h?iXZc*H#Aqm1V^fJdjd|&PpwQnrSpq#FJ3rgXFM42PuUH?7 zYt`Ie%Z9!V1v5jD;+=dE`@BKl*QYTp@$A^aFOC|g4|eRa|!XvnyLZhdl$oUdK@lixp11=&NM z25%rGj!_BLYboz>x-+#EDWQLK>c7|;Vtr#Gd>(?9A5wIl!Ut7E5SFqRK&_7*FRYq- zt$vVpw;NE9GW9wl7FWPF>#dkD;-D0=8B$QgB;mxvTECh9d!6+ujzQNC1$q+HzNMEx zyS2J042vo;#%gWRt-ucSc>Orm#Ygwa^epspBAXr{3Z(Y za;3O5M(z4!p{%5~ykt0)K`#n2KN>5Owf5+C1)Iz*VZq9#tL%90KSs60=}G3H#W4;0 z@T-s#qr{fXIfVvuWNW-IzjeXbO#93F`g(5&tqtoI4q3ip}1_aT# z4o$RJhu-_mX!+EurJkz+iw@}_&Ad0iDy-Wn*q#m;2SQgy2%O2Ked@VmTN!GW)>Z@` z-q7EM9pw~1OL=n95LVe@<6={Cr&hd$^P~00Q51qLNI&y+crvCdY@fAJ%xN#ank@bR z60d{nB8A;uJ}URPIKLjUMAVlHTB+yv9bq7;a443d~3D7XNzdWZ$p0wVi}r| zEr$l%lwjH770gw0ZA4s(~7MV8n~Qwzxze@piV17%!Pj<_rdYDFPbe`valznss#wP<(me#TW(ampW~T{=Zj}~GJZAZ`}bPxwcngC$Q#d(>8p9o zdy~fOj;fOc_h?8gk8N@l^dyz8Wow4Em2l_DBKc)jO3tv;`NaI=Frh{stHe|F9%Z5{ z!neuS4-+C$aUoXgy}>o_bGa+l7dh5MW}#?Dz)@QAj7)N)Y*0$Spyv5kqlVg3S5h9M z0=Y@DahSWD4c}F`QN^rGU&+$17Z2{|>lBUC@XXLxo*8eDaunfbZN5?L^#5Rr&VQAo zll856=l~g4B74vricTG4{y{j?4qFzQQl}En96&sRXC{ykO+4uyw7r`AG6@kGwW}{x zz=iC`mwK6mzXvhC8$)#!=&E)$_KbceZ)Rq)L!vu}GCkJFse3y6@fBI@M@(RK5#d?3 zcUig{e_!k3S5q$pIoGp%ecW}pv9@1eHaWi{7I)FZOmq&6 zO*ZmWS@-7q6R8y0C&B9Zi}&xMsCyM46x}I)9Hq7-JoG3wSf-Mdxh+A^>Rh+Ci@~H@PXz`P zC_`~!HT1f}P6fIND6(9AGE_&0hmh6*JfA!)*g>3-?X#Sb5XwUHi($ zdWV@T(cL}OMTZ$oQ`|O2~3MgF$|%&^aJcIXp@y zmy7kw!YIz$S^ou{yPKZUa0CU>{BQBWcX4Z906@?z>lhD~JM_d322=1&o#YM`Hw}Wp z_|9s+3wkLW4{vr%t$_~VN*OSP>Le%kQaO|6wlXzIr5*n@jVK|%4_w)pw zmvbCbCQ|SL(m^5hPUblDesoW=q$ExFgdq4n%Nf!{es`mM`*Z2tVj-c>y*WILnFQyn&nAul>S=fZHnvOHm(gC(`FLR{vw zu0B;^t@CV3R&T%g%cINFUBYnW#izoyB~7m&iYoO&j`4U*p3H~@${dJ;VTZLY+e9)E zBwX)mQ&KTaUa*nGr%b?#BD%j9rPF^qS|8Lg=}18H>}w|v-QSri8xhJ0xUd`fjO}&U zu7x!Vo@|>_j*X}4j$yG&ha5rxNSOk}ddEr2_~wjV)&BYVRo|^mF%Difcvgf<|FoI- zZYZIULrXyCPsO|-sxh!wYdjp!WUR=*OJ$hOCpV6_Lij{tSWs5zyosoKPy&p58AEbi%$xj;NX~MOhU7z4>kdPq(g_fnsr&8s+rF_E#N*K!q zzKOd-J!QX~^x(uEhPd0>=q%nZPo7*{yxEw?jN)z`5_Ab7WfGW|IfUvJzPCZeGAWf2tW_$n&#M2bGJLSm@kQMK*FP_8Hx!k~ z5v&m|PaTOkg{C^zLyCDGtObwMOl#MB@AI?A5Smu7PRDCM0j;8#15epIB zapbew$*$T!TbC|;p*2ZEyxDp-sCjgYW!Q2p+qJ50tn3w5iB4W`k#Owm6)6noP-4W& z2odamVqG>r<#p$xk$NyCXAIU@0Pa>wPP*ht&pS}2wJ9|vV>S$>1%#~!pB|WG^AzEA z_4bYwq==n9YD~fXH=vi{r5-3Wq>xX!VIPID%N!!VE+2m$7`FhUXjece6;l&I9-Goy)% zy09QfnFEXhO7Q8dfj=;4MI+b(HRo$-=3Ap*MYSDClHoC={s{;Q*J6$jLfk9sYE_g96|zLHKike&XX<&X=(NwnyH-yMWO_z_f=hkt(jHH6nRw7yp* zIF!P6a|YjQx7eHY{<{9TylPS zu+5kY^{d90*E7A;*ZNX(O#tF{-<>OW@Rdrd^=>!9@7YuaCBQSw6MtAQAH0FUY(N zLu)Z~>KAkz)ljNI3*-Zv06YyQd2o|>-;iwf;0gK5tN%DD8%N&DOe< zLOXG$e)#^lw*&5yzQOBH(wphZZt=bP+@g~D-?iH-sg{{>phCU3Q)W3hP8r~Nvega# z^!40o;(Nw}S3}unNW*A%+v*pH*3^oI-=4EY;hTE56YHR_*=M~y)oXSD0iYt!do@v~ zClsI2?B?qHa7^bVhUY+o=gCid;aTT+iL3oq{F)YffygjQ*ZNAwRv7lryeG6)^No_~ z-4`d@=1Xm1Nc16oxp`5%8}!0fKjmMB=ze&JLN53^;WXBI^vj3Evyi)dMhO-T_b9Tq zP}Qqp1}cFdhn@ts3yiFCPQU_;=@9>#cMi^U*6{MA^cV6`jHY5cBSUuEp`2~-a`OPi z>?$bzHS-jsD8BAPxyjD~%Fn`RvAbJz+^WH~l z=liX=tB1Wj6ZWS@RhbJQLc)VbB;mrAb#i_-z|PMaJYC3zZPN>k8eX7~O6sFCk=U++ zk|L5^z?qfO6q{b~GoS+4TcDlMiJ(gE(tILG{Cm;Vhs+dI^{=QII-j0`(E4GM&WIDw zYx7g`aYh?>ykleIp@YYQf4&rHo;TOyTA0gD%?y4Eb2rL>2T~us}k9iKi zVU)H*G`rx5ap6kr``u%wqEGKI)~!=Z6FLORa0|_;>GOm^#cQq2>8c`_+!b#%A{no- z-|0JXmn|kxtXiFUtdyGfzP6*5e8stc0iYe^AOqCZ%%br44wkkV2m@1#_nMa2z3fPb zZoOXT~X%HiYK%d z)jz{mYB^(SPAs_&s=yA%pP3?S$x!m+Z&v?)ppb2xKBj-(Gc_Y)R$`ye`=MWtms12j z=oJJoW0Kn4WCI=HukZ4L{|sYOrsMB1(Z_dDenA6-yBKY(GKJfqAL|8HW{u7|C*SRM z@l(RESD(ryZnXh<=3`9L&rgp>lF2k5h5EgvTna+E7!^>M$|LTy0dpjx1cDgV<2+~% ztGL=|@Y()k?3CuO9sAXX{E_^~kbP|~vT#PzMHcRXG_lT-JK*xGtY z&lnF62iXtm7Y#>whwGB>(-6wV$}B_32rAY~$4EjBH3TG-pt1Oj=>=JzpM# zItjfg_s^&-*0NmfNhG9WxhTclL9&>mNqlM?ZOzD~I4vFVVDQNc%QshlIJ^2cf5Pcc+Fz8deBOvlly2 zsgi_iTFF$0m6{&_>}!gFCk+I{DqF*^TKP$hk@WiVuvQzT0YzNU5nRCo*7h37fv>^WwJm_H_@Q*~}{$`-82H zmPmXv`&S?uwaCo{xAz|1Wr0-srhYm8)`p?y^u=WFJB8PDNjY`#MtD#}nMpElLGw!h zJiUr9cq1b+d4fDnw#I6EW-`_8m1dys!cP$^3GtTn&X63lH&&tR)7=lsYY&E|_y zmz9c5)VFN>d*hYL2ZeEGb8uo8B*1w1kL@vw*Q!VDT-hSRw zzy_KFHi;df!g^x-$iA0kxgyx;aT2G726wm}NG>ZyzSq_oMjMLoh2=oFU&sp8C6!b@ z5>cJGe_Emhs2OdmzZcs@Qrs$wdYz#Nis-!Tu~wS3&Q{IKR)DMelEALTJ~Cx;0e&tj z)0CVOIai{G5UlhneA>2U&U9I>|^I*3ktga9-^z0}BT z!aCm^)X#Sjbkn<9wW7J~plGNw?Hxac59lkw{^}Od@n>U#sc=56r&w7uNFE*@?_&Wn z_(&m6tQxg5Tc=a}mJJ)e4?YI^@hD97F+y+RnCsAuCn-8c9he`7P%lSIUA)41X<15; zB80I*hWPD8w3bK&r8Hvb;b*n9mFqwAG3Y^*uy}Pb6+rPZNM9evr@uX&@@8TWz{3T* z_uYLN4fQ0dR&z=J8mIN1vx8-5Qkh7WGm;)* zF__pKbpQMj2Ko{gS=(3U22 z4QBT&0wJU*{tJ(R8K`v_LERQFhb#G9%z1+jE?VF-DsJ=`VBOdG_FSc%z^?39jZ@w| zy1Btius$?z$#an8&HGaL*kk|9IK`Yc0!WkRl(LVe5v(h>Qc}I~^6y3Ry(@s*vVC+4 zU)%=_cG-Xv!`o2tphneH{%5-0tn9XUK7gK^JzD}Sexe+Xs64aUdnSNSbO@9wH{b7a z$2Y~F(kX@>+%1R1JI&XBSbe0g7^9@mEvqQo)%n z*45KdQ!EtoM{E5Y52Uc_T5)Z$`l3)E*yjPfxf0cSO~v9r599q=vrsXmnYJbJD2&2>bM$L`w0t(st_6@c z9M2X*Fg=?ND4a%ky-s)b)sSri-gWEOTm5_)i=gN)b+&+<@w$Z~@|#w^7z|aDuo_=T zxF(cL*tXi_vE}UV$r3rGI{Oh@5+5;bWyJ(-q;ySJ1Yz!l>L$}hk^nzi);1?VN!6aA ztH{JEMN@s1>rJU@56%H0Q0xho8|PX}vEb4fAd*~kDJ7-)xS+2nOe5yVBd?MCprH^f z&pABI6x{=2SH2cr30R9|U&;qbhrY*`V+$Rv=zO-$PieawQ3;&RjiWyJ>@2td*=)NO zv7kp?J{J42!N+Eb`Om^Jb`)+H#(y1mx?o?K%a{?inbUbwxSPLukLI6eUz@C0gF;Z9 z315e*+Ln%1_seEd^WMfJG-eE+sp_Y3y|=pPrVs*t8D)XCV$tdOdKPIPOANKRHXPe4 z(Ri3mP6H(4@`3cq_>3YVw3xmfU|sGF*~J_{EdSYN;OnDZuF>@_p2Tz-n>^&3--@bw zGp)VOq&7(E&>Y<_Z+tHT*R}*g%HU7DQeD%e(7T7qY2O+s>dDww?aut#e)$*-WiyI+ zi?RW~!2JgZJI)^rat@k*)L4B1Y3h;3W9D50?2$S*yF#Ta+oyTz;brI8rq_r<`~IvC z=}LI_@JE8XAD35PXk#MimCsCJ~)TK_-eO7JO(SGmiH`J1L?! zsrdpLEf2-^LAswH4`nedvvsOWC=FdVhoYFmrODGqX|^?*KH?-0M^}2y?>^12ItA)F zzMV877&Oij4BZCX>8U>5jBhWk@zW{^+t9>S>)-Ufm&TcK znJ@%GT!HEwh#h_vwNl1vnven`r-dltIc4%_!5t5on;b^9H8&z^w3t9hL8j3H(6_L3 zmm)owuA8ErfC#C+_1?6hR&%+eO$w>3LWb8^k~sT^%9U#&ia+A^XHjDe%Y`0}qS5WZ zicBW^W)#v_?&(T2k+PR^jd*B>uV-h_3W9?X(?d!W$B+Oa{Y==pOUzg@|(OwL}_!Pv9b#Sx8CjA~(}D2yoXJwvRoRj89~mAX47Bf*34KaQ4$p zGPfZ`F%9+T5js?IqG-`FtxBEk-t{Uzg!?uDR}U*gTUD)S^Pl2YM#C%!W~*)kMYX zIpt2!O7O~~+jHi-PLYw=)=7--m7LdEN~I@IN1Z=OD=ypHV0@PV@rQ!v*XR2Mi<$hM zJ1OFwM<+=`+e>PZH-E*fm8ExC@b<(OKuVwz5Ab8x5WD-B|r}wsl~vB9mk(zleA0d&$9M_ z^?$|7HxWTwIDy5@FzK8Nid(UlD-~Dw<7rtSkC}=47TE& zkyz+J6voQKKfp4=56mmM#Lv=&`^J;j3|*?ErG}%9i#cE z(O&1*58uBDOZFVRkj2FiT33#X^3Ka%o*(FIzfIol(x<5|FNJz|A3rJ%MU%w!F3#8_ zw@f#wzkl_H)GWTUWv(|8lNyUV^1)a9rX}uU)WSu*-E(@OJ^|cRRZ;&@GQdR*%cF|yiQ0-xdH2)$yZYo(r6u!6NziiMR zPVDj=&1e5PI+o|u_{C5P`$7%b z^*Z`T197FkNa7xwm26S04@D?fT2ela)$C?$C3%v~$|n?~{K<19Fpjru5l3jg`L*4# z8GWp70Y)Ir*I9X<+VVZanEs&RE!rioF)f|23Ye_hWmyPMAC7-yG^O0cc>PzLwA?@U z0yBA*6nUuRe)E$3f>8_e;91VF(&28nbn@G`Vt-3L72B&n8l}nbDJYg8O#}3!5NYNK zIbt3Pd3$F)ANR(ZskLWe%H!7duDRYYG*T2%nnwed%@TzbtdF*T9!Exzi6_3-3x2aC zARGeps$x;%gQ!m0gDxgmo|y5M;&w;E%I;je=l52ir=1h(M7eLQ-=Cyw&)OTT^9CUGuHWE=`Q zKv{q(ic$q6)9zI~6=Jxf;|y7k90bsMM7WgdM5hI8a|A9wRan~9`~#?j**hgo)es=} zGzHZoO1e2u8INGm6ULrrC$7g&byabo@54+++MWjXQEw}Jbt)l>sr5Ee3Q zyU!`UmP6n6YTJDUAB4vVVb_~L<)wp1P~POIMa%k2%A9C>vc?Wm9>xMti{j7l8fQ}2 zlKt+8LOP6A5-1_&QA{g#$3#8qMUUV&IUrlhICw?wIRdBm1}x<^O7QV}U<#%de<8<1 zR!b~kXJiq-^^8SW`#!|s*Bso{O`Msc`5P>{>e1@zY!<&@jxZaVhq5?~eW54~Q16)s zts%Ii-jU7v%XHenUxLrs7r-Z}I$1I(0m1hompC4h6Sh>l(10 z8YXQPB6zFtGJ(C;orV@wlY^|;U-k*92Gffly6AzJ_dM?hOKMvv8=Ksd zBC8c>b?e~Dw7W7Wt|hWsL?Vijk^47F>}Y~px|~x zgX$dD-BL{cpKRZ-m}#$~vw(IYI%fm5MV5Rmaq~{qj%ChygXbF37eFlVC-H`g=wm4H zm&uSB9~1dK6y~_arv<9*PMHq?YG!&$h4?a@AI3`GLk4Se(~z5ON8Eoeo|)Lj5H9bf zl~UC<^leknv(VsR1!)K>PJ|jZV}9n8O{}utWjN+9COyN=$r8TkQb@IduIHJU3;&^ zSPqs``An_i3hN1AdZyuL`odpO+J5M5Lz&E#6G^X*HE&9fqOF%BtewtFDcga^rik@! zcC-ak8aWb2CNhGrBb&v_QAa^!O~L89w%HqkVbmiA_1L zc@L2#p_vVQnvL<{)E0Smr#0+*i63Tu-!3e`CT}s5I-+);HJ;D?7}+$+!|QU1XL;b3 z23Jl5K~pP$J2-?F+DwQAc&>C4)~@j86KOm%&dOHn&Eg}1wXKq0w07IgmBGVBt4@+~ zs&M&}1^o;t+0)itHsoRSHr~AHQMl`|BbrA*ywEm;YWmc>;=1m{1|6lCX5Vtsc|fzv zVBWI##0vPM*n-kkz}s@5|(7T zPJFJ@2BR9sPZtk(Tw)l>8sJMuW(H(3huoe>rK4ZceJ1Zk*ZNkN{yb}6etI^Ey|oNT z|FL$Xbe+ohTkaAA$>8lSL?L;p$-crvoqXN`rSfq%-!BpLmDN>@(>5c(VNf-02E(~% z0ytM}TK{uxdS@dTQeuTd;zjOn5hMC$%wFc9?urY*N3`~tEN~vW2?I#);b( zZHDWQTy!DDP6_ioC`B!J21_vyWP22=cHZ}pr;lg3%zp9VTa5sRKhwgBDEk-vFl@># z?6CR6!7I3m=cftr<^cSw!00wnq-sZCKkMnU=_ms-E@9WE#4`*E><*oV!7%t3h-sR-1_RTybo=*1T}Du!8Vpb znv!X%k~Ba5W=ycDXCW7CZcW~Y9rks?hKr?l^$n7{rE94Ls2(ZGK{ea0*lf>4*GOzW zP7R+c5j8qu4H_}$FBX=-bEs|^aj=R9vNDdAP{bo7CcHGiRH?9CiutLjGRu4=mZfl> z_P)zsrREU@FDf1hJOh@(rorj~+X(84ERig9=$#=17y<^^#3LyqXnZa< zSthC)+P10b``m+G0lhLnf#fzA%OKq_H)WQ#o`g_W7MoiDyD3$)D8JZ8RhMrm4Vv3+;%EgIgB8 zwgJhKod3P{Q59qR(8Wr3{3p1rch+sb8?bzzfP7byvnYC}reWcM5t(dk)cl^VdIBtr z%o7GqQ8GkIF*IC1{@&DmSN8^SVs%IkH&Og|wVasSZ%)AOjVxO3ylt@Pr#L^Iw)+HJ z$c~_d?u9F)DLk+t(@cuiQMmr+41BI@0~wS8>vr`AuyFn>1Fzi*xFFnX=r(&l)OYt+ zML<9PXVp{&%XA0WJ(oUZ_jk7{B^@I~VoVce-*o1|V~PF>ED5urY}o)Bu1q~Dpq+8P zY&1ZjXjmdt7m>YAnd9$?;^l=GG0uB)hP!yD3K1k2G((-jK^F;{>KejWIzNDW_O-At z@1TU$Po~YG4}s3RzqHz~K$V;TrzSaQ7IB5KH|TRo)o()q(*oD6e__u-qKNyLq@!y_ zCbFN3X9v%Kv0fKK!u6T?0pz;46-OY%5ttLLs}nhNUhh$Mb{MCKC&|9pz*vR~uZ=~o z_9hS3UbmEenw{K^0Cpw+8Kwm`je&7?zsVNkbxJJpKfBX9Kvw&_4re>(*KlEtxl#Dg zdb=sI`ro|2WX=wN;~)aZNiGyHC(OXZF!JZYaz*IWtEU!Z(lNzm(1v1Fuv5bb7t$S< z`rhIAFPa1X@1_`Vp2=V`9(gt44YXhdiSY0|VSoOGE`_I~UO=j-@nQ>6mR;-H$JU=I zd0+=s+&k`yV_a>;l_uhp%KfKy3IXko`aA9Mc#fCNTFIwqYH_$gU=BF#_v|FzCMQQ< zG;+(=v3!TYqvK~^f$O2RByes1B;bhIXwt+U%Qr=!BfV18trc5%Bg&8bnGV`NW9B}9 z=%uY%0VvvMWPzdv$DvUkV7sjX{xTjgpqn$?u>6t*%%crg-+kX<1=3A4R~sBSsnQE3 zsnx`RW(|oEIUZ;*hSC2B_i_o>gXgF=jz`R6a~hq2Ray@Wn|Gp zsF-C-hLbRwYRQ5y#&O{+>1c)E_1nxb4d?ZDFHC;JkARsd6eRBu_A~_`X8^fjUkHq~ zYCEKc@%FHb9-Ncvn+-|g?HX;G2C-!BJMCKZ-I5If^1joTQoZMQJKYX0JEdWZ^^ru( zb@a^;DZp+jPCc4tKqHKIeK-9t+nNQDzNB zohlfvjcl>>fq5c9()@=G(-9n;5x~Fr=s)RjK@di?n(#|j5gD4MF=OT+C7L_d)CR^T zA=5OGgXL2l3*C6%IzjVFtqVl2pufr%}gcUGVkW)SFY z$HgD3!BYs+f-F#LaJBE1$=@Evrh)mY(c9y^a~b)e0RSVZ%hpG5cXC^xGOb%Ql#RlF z3i>ACrX%A|ypQh=YgC#va}2&4h!&z8b!5*J5G0A=BY8j8WhmbOK9;6!h{XM#HEdbX ziLh9iYGfx$(@k5c8_f46ui7THP-28_ZH;;4qoD!KCP;&^b-=FLZvlh}v+2q=>|L69 z>L8XD$CM^>`+!YLDu5LG+@A^uB4oUBpRg!fah=>Q`8|+lGY5KmBV40BY<|7~`{se| z%;ZTW(t(nciRv#tfsT0iK#lQ%uSt#3%8&4qW4T-S1j;O6 z8Hia4aQ!=G4M0$+K^JlU@9Y1M{haD~P#U8;+@ZvHz)SG&p1WM(c2FZ27BCi}cfe&M` z^#pgnBWR(yN;*k6)X!6=Zngw=$_o`XepGF+&4eKE3oFMp0g6@-2DG;a?I~BpR1GU& zDPr0_0#2&txl-gYB@I^uhFrLk`UVNC|7Qrm?b6SNM@If?uD)ip=;ceehQ>$bcrTze z_!EG`3BzD#uh970HJ&SCW?1J$K7@<1aiD3ax8CDu1@KjP0i z2?+9V%QxXM1#q||I1wEl9euK%_#UwLXWsW^CNMk^ZvX*LU|m;4W8oPx*Rs5PybRa($9wbA6L`wq@9w*l=%n(Sn}Jpc{x`oKWW1aMO+9dtoNfS( z+Bvo<{omGb^@E`^R9gf4hVL>^YyEqBrZx4vjj#`THixxgJ_~9 zr>>mcIp{HoubT6GyxtACLv=W4;Wl$(r=BoUUVM&94vgo1f6-G=o{}xK4{@(8UUB!_ zdf;yE#@+n@KLV1mT<8-dy7@ zb(xq7-FpNG*`WFI0Z$~735x)WLTXxwJmESh#bka$h62ez;W0o9MI7}*M5bymzMsK| z%7lo9VZ4oyfz`0NNBN&W6u8KVR@+kfApg>DHN5Zep0*;Wt8Q~< z7$|mF1|sR<;RIe66NA8@Q_TPDUrbU?2k6GZ0_(rd$M0~zUO-~pe|FL(ke|Z@pl}}x z{9_*sk~?uh7LNC1MlD07w!RU#i2{i_U~T)|E>Q&_0-jb{efLq=}`i!L(_Ww(Lnt5 z<)6-s46q4R;J6EvkPaawUh-$aLP2g0xdeQG6Ue{ zV${6d=Ptl`3frw+YPFkNcS>OXnH@zN~ zv&HLLtCX^vlPGKhtqgi_gQ^wi5i2mLd6vX&_^l-!Q;_QK7(3~!;>gDPeslIf!DNz_ zeu*P}wZFd(>f|wg$+LyPL!j(G1_r)kb?=R#51@fo%0<7_EG*rzht0Fm_wP-q)d;iX zUm)#%=>kR?{aYud>=QE7d0_o0$}D>JDIDHE>PaIEf%|)QfT=l&=Os57Pz~I*ytN1C z27qzv-cPyo>W%vS7EFro#nPCh+zFs1Qc*FIjNTmhdiy8`*YW&q-fVw)IOEr1Gh3_0 z%YYC#kAUSIS~9}z@PtNF|%q49dN5AN_w5X3Zst&t(gU*h59;smGnKpla+{L z30$h1NO3m9Il*vI;AgH9F1e`VN4S)2@&a!i^d4mTH@sc*t3gevvF>kR6i)-f80NpWa1lD81 zFL4DuzQdS#3}Y#HAMbcf1Lv8nT*7b}wUAB9Y3QO)_xsu>Gjw* ztHEmEUwH>IV@Z*IfdFQ5tk%9w`C$umW~BY->gWgp#t?*f1V5`l;YM|i#XDT+gCM3`PR(jR$} ztkkpm(>IWFV8X)Y=?P0zGSFQll{*DIT-SJIG=84T_W0%lB3qaVckKqlHBg=Y7XA>G z>>8qF>^dlSJNv|H`6VZ%KpI^J!uQ8I5?`T@0~$?ADu&JsB8mQ`&vDsJ9R3!EZ z!fy2biECYh(}!e%AIKf=OptU9p=a5UN0@ zKYan9Q;Sn4SxzT|3Y6D7g9Fw&QS)p>&q$rKsU?Tgdk592R@aR9S~o$ogZdnl59}$J zyJPvus0w7zt%)L@3~{${6G3lWiU-OeWk7j|VWS4Euv^4~Pfh8`V3oTa)oD&^XQ7PI zLp84pgn~CZx6IRcub2_V{cUWvbp;SLI^t0+j=zG)=?L2Hw;nz$!0aa0Y}rzPWDN}@ z5|pJg;RPbVN=kaBLYeCqhM75~YC!k3voa-Sa}Kt)foaNgpNdi)sf!Iw_hCNWOVDuf zqTVv=ZRHo!NA)-1kwMYX_{REAaLns>MT~rdR|6zhTe|kW)$#p!f1!{ED-}&(Np}*M zo$3#rLKZo$wwm%42Qh#rvn0|$F`CS$=_hXe({ zR__p|J()L^KC5X&uQUU;)t?Lk8rFLgl(2JXqgt>rT(tnZRfOMV=9S0lRVc_S4d*8& zjqr}dkdg1NzbfbSEM7(hQ90x(t}~u958?k-2Xr+mlra%JPvF9K@aSATGsa z{52awvr+zOuixb^2@hs>GrV4|YhRjLsnD4tO~T5-TO4Yil;B&0+Ld=J%0_cD#*}5r z)PlPey__a`tc(;*$ol)Cf~+f*C}SU}@+;lK9Z@;ntHgtz#S~0pSL(7qDQsOn-?i2n zc#$?=@1<*U7ofUlLX@+0k*c4jx@l6HDAG+{*Y?y-J{*x7AW&8UQ@w6;WZJCKq=$rO z63T~#GNo4;+3y9Jc@4(DHzw#LkJQNI*O}XgO6hLXWj9EFNgyVW!G~m90j}NgEde85 z#ueWW9iV^7x_j+EV`b>!KQ-9ULbuO^3nAd+?+mB-(?mw@NIE+IhX6EVBDRkFhf8e% zXm%#F^!#`H6ujiak>2>qViCMR z4{5ng_hFXr4IBI|8ldUP;&Gg}Ry631d<^o_N_L<-xw2AwNkd+s}T2P?oKP}lEn>mt%9k#*F$qPLma8^%~KN47ov$jg9Zq;_SPm4&Oxi2vTDXh z8b)X+v+-nUgHpW8-x6AoNJX!A8P+u)9CaBOC!BxwGqEPqWJw*=I9+WjHuoCzp1t_c zpqU^rubzVZuh4k!gQe+>5d2*+iWeB5Ge{AjUU&Lg2{T7AW#$l2-XUm@~2Qy#$o=#bQa^3yq2)Eez!!ZcV!> z4{cdkwh@T1aCSql&b82Hu3itkx#R0Ru%!+^2dkfJd%RGSeirnh!5zy0c(8y@H0=+W z53JsS*M10WxdG47)`XcGpo9X1At*?Qse;x?k)V|tZn_tqbI*MaoYypOk2PF`Z_d?_ zZ1Ie}+ycLeJ3LF6UF^}7R7F5K2S#)ikmWuBzZ<->u3HTlQ)Hrt17U1h-=0rm7p2V< zn*`qD_nd%BHR;}m-Tb~c-cvxhYfPIAlFJ6|Io7!3uE#yNqAPd}7=FC-z)wW+2PDA- zRXL>Z@C%~kTgN$`{SqvXoXm%SiThNo)b$ zoJt!1o-O0nfF?VJ^dWK3iB*LTR;9vthC5r&Z+r6s8s?_}MzMicHw<8>**qlck3jAd zWo!~rv_nRfo8nDjv>p8tXDmV~H|{P;>)FhEYM&A#gqo89Z114p@X~Gi_wJMjMrUdu z_JI*nea*)QBh;#ooQZ%GFWg%afp%a~yUta62|GYB zo82DE&&v@9-DDYF9k<C9kFJkW4khCZ1xY>b@Eg?w+kce~IBsSP&yfVS3KOgfXf;ai}02E!6UoxC_1O8`h9#k2e zWQ{&R$5VR-o)f7ks32N9%VYg0A(L~n*<0%AcWtLJgsh+`8w(%~(l_+^8;d}Te(4l- zZqj#|LmH`I1Qu5WD>g~!2e-)N3FsG!hi}%>`Gl{5PuB{!4v04a&O2+MsYWNRqu7np zt?uP7l_oYMn?3b7XK9-7(#y!yfDnvqQmjCF8!SBl%hCmGi@;>hQzxmo56@2hl8^-O9x3w*>-PT2bhf)P%3?ud8GVCW6g2$Pzp zwzIsJR!n}2k_BHr)B79HfvKE*Vew?D&ZRZ|F8+e{m~cmUd(3V74by@l2snG=*iN@9 z3sWo<5E-J{i=5oIRD_=!ASxR4h5N|wbctwS09n@!_WDrmPKC_uS!Bht!ZmT|H}SkB zMXbBxFfX?r#x?CF_KGwE29JI<8)6^xF(G|8dhTJ!Y8Ds(uqp}|Z!wkCbPtST>sS8i zn_5aphov4Jzql!7 zleXDQEmHeY4l?)J^rL|_1XG0O!`6MlxgS`v9oP^EX^|^uADOJ;Io(1v*-$LPwD`1) z1f+V1gXYn3{1L=nwmnw7kx#h7xH%w2m9>knWdGwJ*zlI$TUdKip@Ru;6%izj1e~K# z42JWq*t(?hpITHYgY~7b7yggl-ZCo7u8SI_ySq!e zTe>?%kOt}QF6j_yY3W9~kq`;#?vf4xK~h>c`+nZ%eaH9bjB&m{$Kf9?Zmzob-fOM7 z=A8TI*1l&LDm$THNp0HF`s|>Pwo;(&Dc+uL^PqbTt^_8o=(xku9#SY;*qarI5zDXR zY!xW?sDnFmPtG`h$ZRstQW%2-#m`CA-$Nz7UIMP74G<-U#8z{#} z5N7A zasbzb(HWcuUD{U?!6#pXBeM8EG-P=Ydu!t|f^ggw4zs~KjH?SMMyvq!fmm1Zd;|%T zUK*<&O^C!Mly^8p&t(Z>9pabs@J8^ubN2Lf9e#W*;N%v`bB6O^NM|~E&1}AEk`!gs{fV)_hE0Y8~q`VqdN8yh$xV5LjN^Ta%oT# z3Ap4-u=?HnW&?`zWPr*K^Gu2qZ3A;XSP*?vAfQnLDCFMPL1-P|vgJW|IL?WoQ$SA@ z9qLf|2V|2d;lzcc_M{_x_6y~w;IB2AwX1AUiJPQ!O*SX6W2lmSo*TXnDl>n5TGCYG za4y}4g@G74k@+9j3l+DU_91}ClmIFOWB2$QTJ>xJji>9@Z;Bx8*aHX(A(DWxG_rYb z?>Z=cGXV?(fq_T}zawr-YDz$zmJFPEm?FHut&gmZdj{_TSqyxQoQ?#VNDimN?jFpv zvdc^#V;NYG@>@Ltd}pgSX8hkkp9e4}g)1d3f8ASAAaiTdfoSl^N7!|0$TmJ$QeCX# z!W}!Novh)$p#eGW9IX46But0*>4Kpt#upF>h2Y}gWqt5BsH4bUJH&H|qfkTCKNefP z5uv_TXx_7l!Gy?lU;T@;L7P9$&xBn>As>J)q3eAI(?y<^oRQZ+dW8%Tkna-!wo?Wd zCi!RzyOBObx28)QIatbxrz{O)Oa~U+xK9O){zViSsF+xsectpe!2ck%>)iv2Hk3I& zAHh3G8$>js{Fr9Wve?B7bPAC#Q~d`2(CQ_66MXt7v92JAIXi{;$0vQlx;7)>gjTf4xeAFAnUzh~Ut|93KJ%1V2f=fq-U7lzf&2#?gAZ;>w?rBLemol@?uw@~ z|8MU%1r%qRy@TDuSe(MhrY~7Pjo~Z-F`!lVoT8O`a?WSc7DyC%EB>D-qByaa58{x% zRp(L&eIwl1!w1zI3;vhOneC>d*SUB1f293?x zk3{>jy$2q+rOfCkJDeVxle5tMlP6TI>lmqYrMA zzIv<|$`}v@G5&YGDy`K&gMx-n>03OO#OV@iij+D!<)KEcp}gJzF}9S3R~f}W-@8f`h}DJ%sVd{#DoSuJ zgD8E~NQc$Bnax zBl4rw`R#W*%T8-SETr#OqaauI1aD})f*Np@j>6i`p+Z2E_!UDBy5;T?PE=hGrxQqn z;Hc&QFn~e4-9yGXgN1JKcAm$PT{igB^TQ`t0`Tm334x=*?CF8A3uxrRQxb{z>lex( zLy9blOHAb3IH=W*R=e1amp+wZ8KenV!C2pbwaNvcj~z^TfFAfB@K{U()`cSglKrCz zU~)*!jA1DbWW7gX@9sTB2T+k9E~Sv=6BNF=9qGQs#+h+TVBng&4}aIYPT`TvvAqw4 zg1ZB{db8I(Y^`KMY5C{vr-3pfFHFd1}@4(eV$^4l+{++Uwm!u;I~ z!#t`af2Uz%#clo1!hggxvR;n3-x~%8$v+r!JU$u%NnEJ$`QzPXiSOn9%0mRyPHGe| zzzoGOrSzQBcwHoij=%Nz7ywBWn(XG4oH|bUdwS~tr_i@99FUdpzzWP^lB7r~E0}zh z*Gr^m|r!C zLG>{Q@4n3i<5duGw%z+CgDb{z6XV-}1F&;p-P|Yprq~?=`J@KRzcCLMM$Tbcc)iIs zS;A#J!U23k4MER=MiHX!`zo0)opXU4Ulr0)2{EAr!gVo5>P7_^cyI<(-QwAS`NhNH z@umP(s07i1)_~*s!4UxC(sOj(7S>b^JaWg6*YUAifuyB44gcxk>aeJ9sf%5N2c%O= z>pYr!U@W^>S5VLg1K>g~7U?HeuODbmL?qOop|UiL%l?WN{yk;PMG*E55I|?T-ubry z%oSqiD#YQKYl|T-fr{+&9*mhN=Drb>|BSQSa0!sXy>Qe2{F*LDd0>j)>#nzy2IR5W zfxz!65Comvu1`gbWeI$mBhWAho`9Ajj;J(3^D+3~MX1B+L<`)9hDH%$0}R}5PVZXd zuWlJoF}cd%Zr46L)^vGAv6%n2w=f4l9b05}X!;*zL*)UoT44}2I6;bXI{W-7 zV1oMPLVF!EzwPSm>g7rrs{x)i-uHkh>`l`%vrV`f45+|wW~FkbzDR@Yn!F}@;0O{O z)S9dg#VuhD1e^AZxb%34)9o3TOwcTK5UzZuf0-CF;ov$dHTi?s;a zx5HP5vk<~>KT0F^?ngRh6ge2c3i;Kx{ieoHuYYOG*N;6+FNGS^xjLn9A!y*GBb~K7 z6j!jxo$0L*=##<|fAvFh0-aHm>X$Eu7%Ab%2w_QZO#H1$-$y|a`t?gk2fF1#v6mCM z0jH5t0^l@~jQo$&$Q7iN8+$_}E9icl*)WOpuOx%TkB<$0*&2W3m_QLchk{6r;Oi74 z>CyL%Nl@%sxPZ@-cHvX5*LPZWT&BOdL-v9oL0j8$_)imnAv$;47uv^GeAbxiL5LCWP z1{abc()#_6v!{YoSf&tf)5CW&0mCUsLmmMQP2JY5XqLA=kj2l?f0Zt?%&ws7H z-$U*eL*Yv5U+T^wIGx)6)x!hATXY{KoHb4)hOh0oeyJaJ_I- zzutisi8+evZ0UG0C}cnCyi~t9+EvWy6hXH}C_dh>U#P+kNPE0Wo=3e?1ZJ_VE+lpN zKc@k5a0q}g|0!$>pG`RE7IOwhAxcEqW#7_RE@k9R8h)E8DqmpG)l28FP?hO%=vKq) zxS|#BbVG=3&#&;TmnFX2Lx6^yIPOc6&mi9adLazcr51x0v?e8On)4pW)JT4y^ow%98l zws~|^w-AZdejyu7l%_R`*k6F+m|AZxp1iz(C5Q&>y`>d;BEx6j&_?$r!_yf6Y-i<;CcSGUt73?f2%pB?mBdSoV^zj#aqNo6l5~K z{hwNMMTHqBh!!YB{dj+7yEkjlYdAq}){+t2tn_AP)eUmK_ha7kqdc=t4XddC(>TuV zuPPP7t3cFnLi4F3oZrOqw~Y}@#^GVTj(?jAk(ND=g{g2E)KaW-1N9eb&?iJ+IA=Hm zxU>+dj+{_-(BBEtjze}&F@+)87w#wdH1J^#bQwn9g+(Fl$Dj1tx5`fvuEGnpAQ7^6 zzGo3qd6ggb6UY)OdLQq8ny2FIe*#9^{b;!67sj;RUdR3VOk(PId)vg*@)+24Z-?5w zzDhWKdl#%#VaOzv7)Gw6VEM`r)o+Y|ag@tPY_~KJqe&i>1BWv-_KXCx>-hTB4}plb zefdbyDn4n`i2?)?YmMdj9k@M7=Mn_{fv`KXGzF>?I2v|~1(A!yhtqU!%Tcn#1RQ~) zAgQ<19e!x5%0wLSxsDV{;eP?2A2W>NI0(BS`cGXSW^x~fuuAFkSr|kx3|O($Ae7DH z-7>Ty*O5PTJTZ+t)qKky6J-|yjFmUkp+sA-$wyAQun1_0m}0jQgkEdnyCIwg_`Uf; z2(nxjm@JL=yl&B9bw5J|dQDNX(?PF5 zDJstMy&!Kmn*UXOJZoQByf9+ar?98QzD-|&!&gldeH9EYBUVvw(Xp>cw+ODR?}8E0 zaUGunj&*&nl1xABfkbaYq=d^UI`lhmsF#QOyM2;WE5`GnaY0ayKehK?!ezohCB zco)MVwm2>eaZG0yXUnAP_8Rlbwm~ia$7jljE;xqBYrOa6SPvp4E8sz-0Vt&Bo{R&_ z5??@|Lg2Fs{*rVrU(mXUJ=dHru+h+Hu$yPTC18Eun>%wLyNW)sgRlu4DAH!tgpDH9 z;d$_Mnndq)J#h-8%s$T$_tk98Q8w7iIyK72RIiOYVcw%gwyxGcfW}u)^2x$WCy6gw z-;MhYTN}v{SIfVitejl8*n=MoX0;ajp>+^RaE2J6ue2dH{cGwZ#1L-S53g0nG^#wu zU)ToX8?fKmmjc3m{`nbKDLF++({K#(<(ya&KBe_{H{{vVM)g#N8DacicfURE5#d{Y z&lit=B?Ld{r-(h^;EIrHRr!v>r^CzBs=h72p$*f5ZjmklG~g*{z(HmhssHIbGTsocW_Jb zs4Hx%t`{dY78jDlAK_jz1Q*yf#?giQ?2qZ0O7hpI*NKZ;v?4A_kvb=hCZANMN}EC` zFE8Xo7@i+CNFns9H7Yn~f1&O7FRIjITyBTv7vQJ+N?G!yNApjf=IA;_ga(ux40ttU z>)J9{!*My8A`3YLoYi|MdMpcL}Tw!@1oJdHc5~R>z9+9IN#~g ztc5-$zWj@M!}!ory}DaVXoLh+ATU27&W>XuY$fc~;9Fk?bhZQdFQMJ3`&|Cqo^O1| zvydTZUt9pplQDRwE&o%Kdw7fj3ac{@Er*NAz?UQTwa=4&Xh&#S$?IK+v#9F}S>4zh zpJW7bLOGL=S1$1{$S}?qV9D(Ku^)2Ab;9wMFPF$%3FbgIf#Mk{Bx|K_`xs*Rbyh=Tw(e|H3D@HIr+mpsv6>+EPIkNSwkTNG~wt z`j{DhCP4bMCwkF+ZI1ce6y|z_DT4P7I977lU_PS@>!l@%1~AL1&9fa(HiU~B;MlfC z^&zMee38W;ydb~IcWIHL~w19G;(Z zB5+3uPv}QC8BdK1TAX5rW1I=GZx(Lj7x6l#l=&8MOp)r3Sf9e4%C3?e5 zsf*#gvKuJ}Tlx~(!?)*~_{4ry2}N!rucB_F?>BM=^;`}mJjp+Lbx_1?{)rDdAuv3S z$i}m(i&g?jk|z(ogQ5~HB?wc!D-5fy0c_DPG6{b+wX-z{GEHRF9uww|m{0#PZTpqC7Rja0PFTZY-_euEeW(qp|eS@ z^6PhqyKHGd24Nr~b%J`rcH1G7)wn7>*n>c{M{q^B=5^wT?`lg@;BoBa)p4Hgc#=^_ zc9*r2qzo4?bj0{>6qg^V2d35{_Cws$tLmv+jCWY+^*c9YUX81+(@0N+TZY9(9J~b{ zz1OcYHeERbn#j-XR^E$dyf20AXpfkXU!AXQr~V>W_{5j(Y5e>mKA-Nusx@jYbSvx$ z<+z723DGk|da346Z%~ufl;UzaDpa&D4&rB2Y1eYRMohtVSgBp3eCt|1 z=_k?^@>MRIesf?_^Qtf}<)8Wjc-s}bZf5S@QiKR`D*kmf5O*Om$22BHk)dZ5@I&v; zBXa6Nk|K;TF_a8SGvc5?!fpB$QHtfS;nZXQiqVm!bD-3Bddyzv6T;Ibl5O}2{9)83 zz8C6!V*ph~WONJSR~n2jGWVz*J^gMow~p6sm-{2D&R>ZMegZ6xSk%wsM{ReL*_eKx zS8N4(b7gsp=9E{Mf{SqW3JQaA`k{W1BH3_5IqPJp3BQTdQKIJ>7cy>bzMMI)kdV9$ z5d}H$rinL8oxR_*aElS?TfiH2Ftq(%Ps3scfE||_-duCQ!dY2KXLFvl@5G5U?830{_j~8;;3( z0tt9k$xRnBYQn!LIdgM;6ezt^duNA8{*2c!@&Np8gzQBG2y^x^LI>0tCyCa7nd??H z>#WhQ6KfMos8l?MKG7Uq;X2%e!TPJwwulzHrqguwZVdmE2f@0zln9PwAi$Q5akrEqb(%@t0*k+@GH}JiV7K zgO&vAZYDB$czy!!qRGcHP2+X1k@4}8I8y#8TCWBqK*!j&CwOD7v%(YgeV`{ z=<)q?x|lv_Tc)&fw)Ns5N&G44_tzVzidWPz`hN@YWo9hX->P}u4UU3}O6ukHsj_vp zFTLpXEv^=*d5+d95v%uk3h2%neRW8C>G}M$6;YF12T4COhO#y%_1L(My*Moe)~Nae z+iOnR@im=m^YQcX%Cq~PDs8;Yl?}?0ur3tSM6Rp^a#Ln%!Zm0#h0j3$K*6)elaky^ z4s-_y=xW$e_e7Mxe8%|}r^vlyH_2{h4m+z*Cmw);u0*nRojF^_*b{%vZImjq(W3fv zcX)Cp?^z_?PODZjS|;@Vhl?m^dSr<`ID|Acy!~{~ZE0;iDwE!qAP|bM#+?ZE?lIUg zIy@}P{fI6C`i|d*5%3IBdL*|yx51gJ0U1`AB} z-;w|Ds#04HB0xz=L#WHy#}t2jMYG)&-+)_kYq0k|^KiH@nYlLO(v;$xGcU6U#(@%L z+`@6L=+(V_?er*zdWiu7`bT6{m(W%3-4OxstjUKDr!QX-v|8P8p9~(4O~b7B1p$3R zKCp3B5Zs(QnyXe8ST5CXK6;&`_1SDNN^laC?$`@o=#Lm^gRpq*y$~j^pD-cLvO_X3 z7BpR+zqI}Sr0UoJ0DESL@Q?pGQH)_#d;nC>3a zeu9f|P9ipzBCZxdv|E$W0SrB6Z#BIW-IJvN>=OSsWrpqdZw{-Vk85Mk1yo>|sMoRxL6LkT@Ub_Jr7sCCq=$k$OGun>ttpgjJ@RIMaU&N127YHdG;s>akTo z!>u8DE0(T)D@6pB%O9hwvL`-mSDOODTEgth91=Nh!$+DWCDXyH(!SG$HmHNV77=p& zWZdq=>849za>cZ_ylO6-?ZMb8KMXO&k2y1|n*IJT`)x)1L^hff^}9}7xJ)5@n-A2) zYV@|~q(Uh%DtyPDqip$*ai)|$|Qka*)UIs6Vwj0Pr8PfrhDQhO&&DY;W9_w1;io1`q5*vQUh z6Lm0_c6JpD8<7T`Xf{<$MU;J!?FMD+21}ezR^A!i3$fn_=5zNE?GkoIm@6U*FfMMV z(w8o0hBby5xFvtuv@HC+XE$Hz9y6IMIHu^-+4k-M!vmY+&Nsy%m|WPS7zll0TYm0M zL)*$fD=eZ%#vfOML;2%@;d#$#(liW8vPVFqpWk>bpc|)}qOdrRo=guHnl+io|fs&@$ z%@yNofdN)mf5lo3kk#Lg+*eObW+>2pr#4@61IE{mYKS+gV0QQf>3(r)Y!h#)@*UES z%Q*f6#6ODZ$6XcQ9cBk#FDW89RHr*c%bK zHk7?p{j{YXnc2UKs3hY6_NbA)c{xx88s7*~27ye)DpwVTcUXOcwyUk?+lvWWnU`L- z0i&3A^gJ(kkmc4HgHxm%VoWsE`c4n2#?N(^rcKR$uslW;QE;^w8pB}xd?n29Rp4bP zq+PqVSW={u`l%Q?dp?HKeZQ(WWm|sQ&%MXbNo#s5Bcz8aEDVrIc9of`ZXP85)DSz0VIMm{neAY~|$As3rJwOzCM>Z=6(M z4`Aeck_N5+A`pc#t<&v?=UCSl#y%+EnyM3)$R=K@AoS^6=E3_=ZMx5Cw8>VOTD8At z$ej5Y9!s%bSb~f|Y^*BD%C*6YM=(DcT_!6+b#2SF)achEMTqSm5J%E^922&>$Gj<0 zF4=7E3^VPIaV&iz@LJiG+H<6TKKlY+5f((4m9q?{ULb!nrd`jYiGKrU8opKD^`0te zP$NMxH2IRpLs2G8@OSu5wv^c+Z7Wp#hr^^XhfT&fF4KlG&O6;BDoy3Fm(RPQ4L-_; z<`gcWqUltH7O)XIItx^nZqW2Nc7t0{xXacSY!m#2yv;j>0c1xhF@IQ&Dk$4XD(2JD9HroE{a@77M9=1J+j5eI6~vXuTlYuY$9!a z?iOQ$N=K-;Eyfm&h`Bcl!w5%C)&RN>WYqKV=MB=UTpVvtzF3ICc#hB&$)~F3YkGi!?<*7GpElB7b9b6e9v=0EU67fmmAsr!d6>B1}pkZF_I|R zxy$geq`)GDBc1jkquBqPppKSix@<2idALx-Sa~!)o#X8U|AVc*@PH&^&nliksehp9 zQw}_5>2FhpFKi2uJ>~~AH1#N>WUW~8_HHA4d>4XwKhJ91t_>PQUe`dyk60!q#XXFP zMq8VYv9H5IwkUGbU#VHM(xkxJUcK8WdG0G~W9#>j9{8?&XsIjVbiF-GbAlwkD`)%O zQtee)w%}=@VG5kN8$|s{T=jB&=`BViRs>WbIhY3ZH;6|uE42y=I&PbSwwK_ z5RfvG99ifnpK$&TW;i>VGI}{o7Ky!c%HInRO2$lE9z8=wyX8d?34!MH?;x!8H>OWz z$q)%i-~159#vkNvD{m)#j zGiCeXK;TvX&-eu3F|V)wa&0cJPA!NbNItDEjb|INw+ucJmX1v=Hwh#=f}Hg5Hwg%J z;Rp{%=GMhEMjgk==3aC)ZVHxy48GK@i8@uf!>Lm*uYl~-miT#ZH-P=FPgXZRdw&~Si$+)n1t#yu0{bc*?s^AR{@Ct%W~ocRZR z6noJar`AqC;4P9PZaZ;*cQv?+7GF~@Y|XyoaM6W-BO)_ zdY+8qEn8UUNur=niNs>%+XSnLPpW{og~4sgx#^nUxMinXzd3S%L;W(`(PO?MRUwJf ze}Yx79kIRHegOqzbwazP5*Ka0$9C{2Wqfu1SKu5}`}6*JDQ`-SfPzlqfm*#q-fh?|iC^1w!3}oe)xV_-*o0oGa}A>{ zDtX*Z#jWxNT~N?}R_uyBPl=XeQwJqe)}NWD;qy0ev!0d=fv7YaqC{ zQquA~;h?sMX6y8{Q~Fn11lq4hzqXz{kBi~yu2a_TArm)f3<_cGGKV+`=La3wVSN28 z{^}dw3&QMKl;3th6&1rN;6`) zw77{rZEaRkzC|TYi7_)zicZcghK6-67>Fa$GYPTr;2fR$bv9!S>ugY~}^#gmeB{js0^&N-4 zPHSPW08l8kcS@6;*gAZ9`Mjypy!FSSZ|O}<{L&ff4;*XK->z(@pL0hUiX6+p{>Jg8 zowtQU6rMMtZs7+x-WcgySOVLacd@S7K9RxoM}YxyxBY}hDie`|A{Xz;bhjLc%06t# z@SDFbFpWlH;;Flp%<8-7Jp<-{!`{uim9=m=IYa!zGy@|zDkgcZO{#lYd(j_x7UPRV zVa9ik-tg6J@CKuxPaxy_tEpiafz2UAB3IKR(TL63hfp+dnm%q}xkmNbZ>&2=6E|QG zbEizwQEY0|etkk{#UW9WZL~PV85tc-&1o7)$ddrhE3i7efA#368r>uuk<*M$RQ zW6H#9Y{>b?m-EgOO9R2IyKr3fUG4r$uoL#Q2)j?s@%A6E8^=srj_|Plx*ECg%3~W$ zJzt;E3;AjIpKqy>b*P-d#&7{ha`5fJmp(_I8A=+IEv%LGy3zOpXHx9@mGBS0`!o*Mt!C&uPqMxw>_Yyj{N=$^n@6KGrwQ>voQ%rB24?c4G>!w zGrHej5zlq{qWU^21mrk=F$Hi>i0dJad7t=2eclp>kwXT<>s}Xn^}yU4=2M5eGni_0 zObQxlvYn;<=SoLXcUmUl0F}~D{Ei4YF5Adv>hct>aDPrW68x@@PNFnr-hYkVJ#n8D z{}!_wL7hx&sPYTRNU>dUW$&T^N^Jp?l21^0D@Vg{kOpb?U#Ky~gS z^l^ev;PcH1Won~K?G^}$*z4^m}jPelcpr)!DgBZFR0d7hFW@m`46_-XU zzE|esN8Nz$xV@VnY+wawZbIQLIm$(TjsrRS;|&KC6t0t^jHFiPA65o`V*@@y9~LHI zr{xRQdL!$;x*eb#$cs`Yxx0ZT9avuTp}XbMsU%1^ck8aQxkB;WMozfqdrd>iAJ9+? z+h#tPupSvA0`w3TWd3XS&2BOWU>iNfn3NY$7L|6wd85The$QA1v-8Tr+Q(H-67;EV;iTfb@DM1XrF8D;bx z)PdAI)?#vFAD|C@Aj7aebQBn(5GTOyR4uLFIT4|V)wZT(W#LC2 z%f36QmJSU5#7dLRxq48u=1-#mz00Fu#?v4b9l4q1oQ8TaH&8axy43tD0w*Y4T~)f9 zs}!H5JsRwE5<_?jEr&|hy2iLHk?8 z3nnbd-cr!s`SsEcitBH3aydLbdeaf(xcfSIEpu04@ynHjg4b3X&R&^#~vA2Y)*X`D1J-kHrfnFtUXP_ahryx#jm1rOP=i zfdwzcdpQz2_`@=%Ip?w+z?0EjnV$#mZN{dN!tm)BLtw*U*D%$1eyy6A>qP6?b+tu@ zF}R;%*NuP5!EWS0!_1_S71>A5(0#RaLzc4eQ9gn$6E|ttEBkr!akC9i3;kFfJ>LK? zh()_PXdSe~N+@5@s={;`NiVlz{-0dfLjOta2WnxCoSf^4zPfuXr+{c-C9>D3)^eMewasI|v7%I}A0Y@q? z$1n0$O}j5#_L-Fm{g}u;;oIYkgO5AliMgAljY*v@edbZcH%bhET@X?w4rj*MkwtMa zS*;hceExtx#EVXggKX>B&1C)}F*Ht74;Qv91FKVJVVRE(G_RW&e{ifBx&9OY)v5-%NV*>yuT9e&MD zw5m*+@k96^@$LKMN)eVGtB?`b`n8Y~j-Wf`D*R-A4p~2RyoGo!s}GcEFVEYShoN_d z$p2bMW}ygMb%YgpLi5L}3e<67hBj`Ya8`A{mnO{}8ZG@Exjc~TdreF-luXm%(I#=r z#b8Es`i8Q_hc1MJi*(H~L5Rnw5md6iiNUn(Ykve4b7!hFmO^;s&TjxL2I3pM!-j7sL~9GezHZv%`VK`t1wKCmNQE5UU%_~uPALCr zajNHMC$Km~>+~EYpjfR|nE%pYP$g?oJQK~=yVyg0p&55cF z9s2M`;qU%LvRnc1#d>;D)?2Xs@y#WM#p=q@H?0Uiri0-HEmDxz7Wezoh~5^LN_>A3 zBjlv0XG52^4g3~XLDI@w`AI*x_M*}7yXnWV=kup;NX;b;xSZ^H8U>AM*VY7fcvEt| z#RJ>JHcR=IV2f4~V%(Ip5AQ>$H17)BD)g&S_=L%oZ3J-e`7mDaV2F7aG#aghq8SWR ztKex{fPF__MLB5c?f!adF&_)+zUBIn=6iZEW4WqYZyLW~M#`@?DiUA7!ntCVrktzO?Waf*sVajQX*1 zMraJxK-DlNimegZ3S!!`ynyTc4?&NLsW3wlaU~?>B52k)NuHgq>GU7}%HE>6D@E#! zIgcD8ha(6Fk8h5v+Rbh5QIQ!Q8co+9SJ)=c?#V}Q#a@o`a5F#u?vlV&tWsCYruBXC zR=~o;z@9M|fxAE6v@cplilVpEi3{U_NybHS<=jk+3SOF0ckuXlVcC`4SwGbCFY;5( zkHv%0)bO4t9g1V!j@p=;IHtwG87Td3myxBXi?UFACgpTOe}SD{mzP1C-gR0~1e9Td zy>nD}U@%=sd`iWi$>i@-mnMGoxSGI$~)CyS-u>a^v1^aI;U&pW$hU4 zy_u6mk{C->k0B}W-nw&yBXr%G zX;7DMdN22XL18gYcKx%L%^9Qgmav4LR#4z=(%nc5izo{V-XZF*S%I4XyPA_n`r-C||^+L@io zs>w(ZNC~e83MA<5y5CrFoJA|^7`j>}#n^~BjuOayiF9GxclIqSbP7<}p8L!(t9P8S zF0pGlKGbskyA7V<5E8;+Q&aD+udh+&Me(Ot(*G9XirD*mf4 zd+2x?MO5iFO)gQ_#-ZZMw$Svltpkoem(5fyyYK$&+zRWn)VD$i;%41hByiq*&(1e~ zCftV8=w{C8a?7xMA!~w*X_}NLHHeq?e7|AHkR>k>L^5IwYd2VD+Pu@NOLIzDF3FeJ zMj{dOyBBplnhR4boP00CQETJU*n3tc|#P>zzTI>nJT>6P#`Wq2n_^Ufpd zYlD$HTe)-MqL8{&E?~AI#%rb!V#!mjuxgi*_9A(-_Ue|yQ(y~pKEl|%uBv=F|h@zUdFtOS{FT zk4jO|#->Q-?8s#^-PB=T6WM|(n`tTlt!tZqOv|2_17-+GNEp*hVq=~*qP8#o7(Vd?}4^kEZZ~hDlpH1jqfzrsa>EM-mf5HVX zIy=)gVj=|N+!L@Q=HwiR7V!nYAj`VY@IV5liDjV^IvA9+`}x)0Swf!7&CVNY7^&S0 zNnpLztz#!N+B5LsdD;MCZCXTST|%N{CTxQcfu%(Tv~We}qWsyJ2oY0j?~42($;Qp% z-PYT|ein{ztdsdsa&R9esa~Bdb)tc9BJOoZ?d4)zsYM3Dl)l_A4h9B!rO1<)(>Xa# z7liFWJ8OOZmSaA*XTk@t>#VTUYu8Dp6rgWk%EY0%2TMO@t6thl>{hZ<-TnI{@3@74 zSdQREpvYb5XkY>{zP%~am;yrtVL%uaeM zb`gr1x2nvu?00^5`5ch*y|@7y!ciWgGFc$QV>YI^5(AZT;g6*Eab$JxECSa$1Vo^0 ziQW?(@iagIij3 zy`IX`z4PCWyk283cw=(7Lxn6f`k;L^gtdh+&~C|pn zKD;0p*u)XC$_K)A#`t!taebHMq_BP79gmpJ@U11;NY1j5xT+~h*e77ec(CsOyA;sf z6m>*K$_*^GPsna1Pi_lgpXq+@;FfEy%fmB2^wdJf&h{R*)m#8$wzK2Y>A6BnWgoN0 zR*W~nO)iqv7FK5>GgxwCBlveX3|_tVwg3v{b>Gh_^ca(qSMTY1JP^3u$Q3J?1@mH7 z#%g1p$@=Z^oi3|WISM2z`Wd|I~ba~^hwivhqzFTO)k<#;zU5&SO&WPRheP18?Pc?VimLs zJ1L)?7FwL^ey*tBYoPkBc4x~ZxP0_=w;jmhu@P&XGqM53I|Am95>xpkj5P>z@iCQc zGe0%U4DW>&$S}CkO~y!Y2YW5`e7=KSHO@~ z>&%a~pfVt{3{U4DZ_Y{&jzsczHJJB<<~9z!hVT}LH7VtE){+wC^eHe~Gr_ywm-Wo|o7}%R9j0|uqP&64%!dYPiZAL> z%BbAwr%F2szI>8b=;a8nd0$b(7a`3D|L@3%3@rGN%*m(E|2y=WFJDdY4Z{Zw@CRD3 z70~k%nKq!|pIL6i^X7ktDiG$F%8q2Xz_0&vGDckVa!q#JX8Y3r9a5MN7TVLwVU+v# z>RPath0vZOmW5v9TK_xrnFRE?DscP%y;iVYz6x)?TARvTO=iV^E+1@1^#{S=$f3jvaFd^fG-ChvM*cs(q|ImOo;Fm6+R$rzDDY2FR!ycx$|Usv E0UWExzyJUM literal 0 HcmV?d00001 diff --git a/docs/images/notifications.svg b/docs/images/notifications.svg new file mode 100644 index 00000000..6c3d680b --- /dev/null +++ b/docs/images/notifications.svg @@ -0,0 +1 @@ +Registry instanceBroadcaster requestrepositoryhandlerListenerEndpoint_1queueretryhttpEndpoint_Nqueueretryhttp. . .RemoteEndpoint_1RemoteEndpoint_N \ No newline at end of file diff --git a/docs/images/v2-registry-auth.png b/docs/images/v2-registry-auth.png new file mode 100644 index 0000000000000000000000000000000000000000..7f90c73814f08f6e018da7a7f9a0a261ef5c686c GIT binary patch literal 12590 zcmb_?XH*kkzitr41_B~oy3#?Uw*bAeOBMMPRc zkq)7q;s2iXz8~)WaL>9YYbR?~=C}8r+51{Fb?KdyIUvG<_L zB}o>R#aH>u|2RdybH*aaO!l6G`6ol-_pAK@W~+f{^m>G-p8vF!FT)k%u3Hpo`jB<>kc-Cwtdk#|Hz>^{b<*-ivfsr-hOV8(fVJ z7?R}@PnzWqrEr3!qn(rHR3TJn&r`px8V#lg>ho?c145}UXtaX1O4kFfeg_~Id>j0)krRn>B>22g{4xllnX0w5tvWaQ*6&A2!EfVnGPPY#h6z z?US%ig_pBgZ+uYoOP}m)g=y5@N8F#95@;3P%d2ax^dG`QKcK3p6~8j}=|h&|JJ>6< zXb5?2UD{D`4Xox)i@g!v`;$yJv&S#VO!cL9cabF1@0G(>nC7EQx9+YjV@hwTHV#@-naTL5hw8LG_1}ESuZF_w*Q=fymk9$d}d_%flP{e z%E0Bx#G9a92mFUjrkbpyJg`&LeJk1jox$%{Wr7nn@*jR z#<2$5;ZWY8F`xJX&~sYWSNSNAeMS;^Z$~ZSCgw zCkf&0nrj+tsH9&T*e>l`vd=;IWDNf_TO%T;hlwDK`@yw$#%k5?6D5Lp^h5t?-<)1m zbNypHM<$DselDebtIdqK+0K)MB0! z87JW=?_El5!_sNbP{J*r#vUY{pY@A7bRk`4;jfnvXR#F)1F?Y!B)SA$g+4Na6$@JN zWL2${f^TB(4SH6H+?0;H9y|9pi4MF`ww>qHZss@Dx3WZBTV4Ax*?+tLgT(wrpKHhF zE=>X@dj>&T?a?W8#csD-`=vg<7sa#OGRd9Oza%yR1*W@ojg8o2zs^27?8Y~k*w>Lj zm&=9aW)3n^TFkCGKIBVwNyho_W|z=pNKM{oivd<Z(SQS^!$SCu=fMjST9$;ApFRV%ZsX^XkYDF|D}j{MRv z;#&<7qPul{b5|@^nP$>E`tDu(8`1ndlaEop!|Q9IK_W+k(sG(OdW3o-rTd?#z>QUPB^`Ov`90|8EhjOAPab5l zhwbZTjrn;P=NCd&_gK>Pi)@u-3cf#h-+l`Z^QHt&Wv>&UMN_@LvYz+o2ZW=%G|Hd! zd*7+H-cUqhL8cb!FFDL&!fN^lBAYTpIH>3 zn>`Qhjk{l@y0_LnIyXPS4;~23^y1G9pr4{bEV#|$=0DPoARVg8dg4H2&A>H zz?x=+{4ScrAIyY#Pe*^!W-k2oDy-!Cs2P8ZQExR;;suFLlWkn#u!gmRujSGRzV2p< z+kiglvyJrM$gHC?%{m7P#|}n)QhZV}>Mt7EpO^cERnduPzC8vK4qIUOX7=<4RqVAY z4fW&Q@S&n$Oh5?*`pAudj7Q&F(*iDP?^(~Dj7k4CK^O7~slzAk8M-2qwRf+k@SeeT zv|Qt~Nb>B}*iPyfD{G zL#A6)jnC5Ir19J&@r}0=i<5E+A#tfqNxuc@1fz1=^Rkl?X?+(J8PP7{vc@m@OcPTi{DqJ8ei77vG8;@P!>?+org5jrs-PAm%V%}&N`+E$`MP=ga zB44g#ct=A8N|9Gc-_&-)YWp4zXKH@56*BRhS6{Apx(`+-#H%E?zK1CgO(MANo*4Tv zDcB8PNNs1|uf3SE@sL0Z7j0La*lUl@$>P_>5KM!cn1<3SSC*$bht zSwe*7>*{>Y{#m#FrGA^)0;_~Y-HgX!GgZKpQ`(+OXBK#B>NKDjs_GPi-!&+c1smzs zp|UsqeKMykc%TonQuoq(j}FPyL*-a>!3t~TGQ2Zf)Os-G;3}xKHF?&NqZ91bJ(`Kk zU2dAR&H06Z3Q_NTVmd*jFZnv5)5Fs5hipfO{Kh#QdD+xQD&bJvW@xZr!-J4JHwC7a zGe0u}YE9Lm0e7w7kW886_+&bg{TPH7O?1;9R&zzvepCuBx-EtJU~5!#J3Fdp+1TBM@&GwuY!|Na|s-wPL@By`>Y2DLzJy2;tw zI;!(N2_}2{!2P3RhgkI;4YtYgIkMQNU~7{c_V(i4RM&~w^_su9*70^C~+?Qc+fo`QB%oo1?Xw}gU*-aoFU=|4Y zOE`)1r^-ph_o5ii(%_V$VWQP!I+1?X{+Te~ciG5=xan}_AV>Im0_jnrj!x?}Ix+fM z5iBLaf8Ox|0RdJaI+Vz5iogLDj{Nw(qssZnFmrA(l zd23)r7B<)zJzknb+Y3PX^sw4|Zg}`oAX+TwV;`fQjF3%$^>OR^z!by;(__=0YLJ+i z2p?*FI|f1Hy&J1Fc6CE>oW-Iz?Cgy!E7R;1VxfV3lI4p-gpqiqI&~L+&#)G-AW_py z52In(i#reE9IaM^(!H>Lfi=sEPIrr0Y(V<|spQUJHsSc%S`m zEjhWa_wn7WE0iV@`rb%2h(#C6q!DUAB=+v3z$;}_-}2>9lAyHRyvvsloub*L9Uy53 zb0kkqR*16~^Nd_YbKYselBHh6Jd7R2<^=%QPs0+mVGtCLY;V3&gRBcH?p8NnalwPQ z+8dIkZB~yAAxPP!A-UA05oMW%U7EL|pw}*@#+cm$Xs1jgHIo4292wF!W3E%9{Zejn zfJO)wXXAVe8jPA_MK;yh3T|BmQ=a^~Z*m>L#$F%3 zKmKGZK!(#*RfEbvKgY!lGNKT7^}bo~k3e9AT?xs4+OG{y0?-D^nDCCvBhyApr)QuS zw#)~ZA04HTx6vUUMvU>Q=P}a-y~bQY)nY?*G+(#=um%-q+{S?lAUj_nF(Ks?bot;& zN4%_gqLhd*>2(nmZv)rE+PKDqby#`I4vf8Sf1@;ZJcHzEhU>T@@ok@n6HK`NQ(Cv1 z#DzuU?zVBn=YF_&5lirr14%8Q(kxW+r$3j4W-YGv3yRX!k3h;>B?cYtEQeYjHS3Yf zSYxiFP-@p!3$u;S9QQ~3U_%EFM(uAbHydw<6dm%9#bBgB3rNIe97F2zV}!K5#;fVu z=>s%+kB=k@3x!P!NLS-_NZf1Ee){xm;#hu+?Zr3I1ds|R%Y)7ju=~P|i$&(sWh&t= zvC!#wnnIzEzweGZ9iksBNY{Vd`nW91;ZvU(T*d3^cloZqK|Jc1`f0wm!IQq&+*2H% zXvm2?jbX649Eqz(!06QNbmK|)`L93Z&9r_wyZrRjpe1+)_SE+<(oU19x=Rne^Htr z6&n_1L{ngg^eMD?1~XRsJ?@D{yt*d4uw~=5xLi{F@$t#T78^8!&Ql)eaaVQQb|`IJ zjU1VAP!?sJ-HB(`Vt>){sO!}N4l;@U()J$m$Z$;-{@K=18a4n~?JrVDNqntV4mT)` z8~g0ylJ>&(6e81~8Xzu!{S;xwFf@i)Y`Y}0JIICJ`mE0{@){{`th%8g3cI~S=0xGV zm3l<*Xkz}!p+uxoL5uM5^*(DV9z8UmDq~g^5u9g1;+iC)GC}54&&lLFNpaL>qO*qm zcJf-=NiMAX%_x2pI}yT*m!(8Tq>|!GY!drKP5bPliOIhO<$I4j%%hW7Zzbt67u`}N z#Cl&=x2imVE?$j@f}@$hN;0=(;lsIlQ{T@nWYr&7%M-K=IA!8BR(!M;>Vd13tLi_Q zr`2)PLnb?&x9;e6jE^3cDbDBM+F7O+Zs;7X3CV1E%sm_N3S?#_ji#ZFsK~Iw#ut!j z+s0nP7gzUhufm+tf4zIPXZ5O9x8oL%7nHXmLncPt>XcBylQ87t7wX=H*a`n<6sH|z zD>=4;c1CIfx1ELF^!`@T2T>9GwaXmZZ$7fpwy~IubLlZN7smg2r0apZP;4#zWNJvs zmUeS-`SN|!xt7!OvH{k>&)FNEfwv|yNSZ~mNkTrF^ARCSc~v(H*;GH_2O4%96mhvW zMO>S)e8hPs0QCfWOKp!WG88mDM-NI+=hq0C<&faxO6XegYEdmIKdWOYQT!D5rFj&w zb2L@+RFBW!=)9LM_+2c-WJHnq!$(D;_!7Mo??`wDMwn87pDiJ2%ymL>J#Syec9TUiO2CXEK zgN&s&IO)U{UaxH1E}Nxyl_c@5r>0Q#c_(tXePcy;YDitr_O}YgDfL^Q?kR%ocje>0 z&$Ra|e@vGX1dMooU6UvDee{b7di2sfVuQ3FFHHbb*@l6BVwShG(u85MP3ZU%amI-d z_HR0u+ZN{Wf`8_AlyB~HJ(_UDuM{X&kZEd+TbRnjrvonf85n9pZMp7@8yv{Tgxc0?DkOQciP3Gs zxmS_40W#6hE|ZCi&kUWi)rtV8Ai#Lw5%u*Y}zDV%s4QCy@O&^6N}UEWmMdc;(>9&il`hjX-3o}i5|xA*5J z?;M}!CQ|k>HBA)^yJe0Pp5g%W)}~O^FFo(kh4pvjvB+hl<@U~#YJG6rR=M3L%Us{1 zbyza+3ii1R6tH5E{FE9p1XwND=Q<1|`}Q8}J(2t?+f=wQwO+v|#k}{!>}N;s*UE~$ zO1TUznu3)4j7WOlu~ImLv1#WpD5Z-m#x-DDTc$LP?=9m2jRMxdC=d4wDFXX7SoZJ% zkD|Y^*wEauQ~Ak^6Mkil;$2HI=+t6gQ}5g0_Ltz2bZ$NCAKNHH-p`WL=K5&ik*aqH zs!D1Eb{_rcTN(RC)q9kQnd5maCugxB8>9$RC`A~7qR$%;xGnnwRrFDb`gfXgzBujxyYkgrE)laV2*sO7}IW@ zR3(N#O!M823hS*V*Ss+MnuFxhP@n*buUL>%m;xt5u)Rbkf{IjL(|->kBSp#7*`OJ%!?`DN-7CG;!Q-0L0>`B^g{6z{beX{oJE5ob>ty4m-TsMp z<+=v?-UcsczjSSV7^4bp6qCsyN=9AJg6o`~Nai3}JJNqKIOXUC-~I($ZQ5bN$CT*W z%d7#D(A3;+mj_+2jVY};YTT1^Y1}sYzds(vz@AfuKcstTU%z-t69G|<69nPkqHAiK zdHjil@3SP3I1Q7Loj78oq zGL*_1m{Qnh5xVIv#|m-{g_)Ze(Xc%nxP$%rZIaegT7R7T9PoQTt$=bHd{FFPH-v`m z_{1VTH8Onq?hebxS$SL)XusDX+G0%@xQwU@%%^0(h**~HMP|nA1)r}FMNG}vF8SNN!p!!qXM(hC#hs@^RHQGU0IOs2# zJyVEh!0uu?J#~EMJKB8~Wtq*@VKUel#!6%T5lAy8EJ8qg9?gIqkj`6U3i4Ow4;deO z>d8ngp7%W#?o6KcS`asnPStB%J$WF*n^V&$EAGM#VfOP6>qw!-g(eWBhGte0H_4`6 zJgUSPqE@N6U5Wn(2D`)LuL6uE2bUltWPsl3Wo_i%fX3aStGi7=p2pblvqzRs(dDq@ z>|qlw*>jxUr;U&jOu@j@LWej*i~fT`iKw?Gi(!8pawzj&K!@bk`tgWClvZzl1+LRX zPkP~PW*z=vVnh<{J$!#2X)Sg50CeBN+LAH;>B%tN{a9!DOfMki)I?fqu?9w#legp` z>3Q`;4M0|Hzl%DWVuX9-;tYZ1`u<52pF5XeH5wCX{p-=Ewo}7=M{8}NdNM+Fywf@L z1>M%y=Xr&VMeQb=K*aK+9`mSt{kbt!h*QEj#FB>7e;1{`;0Z?v*q4n5`bFuMjDKEs z?CIXN$f10O1OegR|HRbcq0Q>?6FOumi>zI(`b9q@R0nLP^CFm6u8P`Z$$G^iV`=%V z-LF7{UP{^+>M!I7!MlMhZ*Zg@)HArYR>dr2jwkIm|G-R@Cx4vWrf;(I=c7Sjo44F zpaos?iG@tLz2&6_H9!CDfuR4uxB__pJ`gKjdwu=P$97SW0Pc&?C#R?~8jz+l;|pQu zr;PU*a%#h}Z7! zgntXirJi>3_-dUr?atk^;VC`&-m-`eg#C&9a4%}y_(g%_)6@E(nQ3?Dg429%00~r_ zya3^Y=pHi8yv5qlDIL_j)zNi!QhHpYk$JNOIVwMUKHU4%KK7hyLSoa-{}5~}H1Fj= zBDPy^-m<=-b3iKEYYn1cZ6TBG?U(=47_K9;sT#{v&MABLItnPxNFglrlV!-pq%4<=%@w`lRjX9%WG1w+_i`T$KmY7~*xT0+Ie%-tk-z ziSVX=qzd?KjYi%s@9%U9Pbq7S3oMNZHVT5tIh?(7AJlC2kg;B{^V2IV3-}DcO}f8q zJinKX(R#Z2FQ3nGgMyQ-2#3rU~u-l6Z8nswigEmNIXOHGB;?5BZY#3o}X95=HEK3~( ztjjnzMGLJ<9Jto5c67C)03Z`s2?7PAE*G?2<`q5zIuK>(S*Reg!Q;0mV&~ zm3Qv)@?xGhM-SA?d!)fsJ>%BsMQHnl8Ej{(+u1U~c(A3`>5xWU|8#%ajqZMB)odMO zKofzTysHPQrKLjPeHu6u$xX;a1Onl{)wB%bds-+4z*Mezb)G6?D!J{n^RtH7w#(0T zaE6U#DVa_+`LT`MSJ&2N=Zj<3)(NdO+AbhhIT=fO-2CUfGDxxA)5c*jitf86$uj}OfI z&hS`ilCiNOFfezY+^1`GYwNk1_JOBaX>!4x4Sx|afu5Gu{kiRfkCI+VqJ>5K`>z^b zM?lhY->8F!a6t9eMR?S*-76MhHe_TiBXW36b8d|Wv!-?M%1tq-&a6uzB`7$Ls_gF% zg_2R8y`2OFivAVw`>cHCcz(>BfP*p-VuuNct32v5fe09$Gn|0^yXq@mX7KYde5Ywo zneP78P*!Nh#vTRl?kYbH=rdl(l&8E2wFmgAVROBl;ZWX!7C%HBBl^&O%opg0ZKHQ&~0tf6!9&U(PDv`4<_?UG(0ju9_|*hK(~6WUBU*+BSv!Y_7%N= zQS~sr!g?o=E5S=#1-JL$`1D8>HYZNINt^<`d;$^_u=QX zi$5+l%eII!_AarU8Kr;s&lGEi8j8nF;l8LUmHaiGwlauNa@z!M z#1Ja}`sjO!edroFC;m8?jqqgv97YAkp1ktX+q}gpHcg9i?V#1!&uCo? zC!%5o9}h16?PxgYM5C!pUSvv5-2Uur&HfGn5S;w3_nyo0&c3whmgsSPDb+apZcnde z?=F=<=oiwymUD1?wQH|`Dr;~;P9JIA#l|b(#A9ZY`0vHVmwJY5!H18Xmk{B21lggl z-5E>XHC*y*fY=1}srW*_unciOie2o+y?J8}P`v+QkN-s!g8)t#c#96gQ?>{L-m?Ge zojM43eGc%;04Mx>3XR$&xRDU}zUR>g;nz0Tm`~KYcH4{)zBrppydg|KD(jX}YC?q; ze1qyY#Dt*Wb%59|8W1t9LR$wa1EcWF{9$vBh>~jcJD|z9c2KoJl!v{%O1Ys<%^Yvk z5k5=Dg^m2*psf%+LmDG54+Ca3I4PWjOKW<)x_Nb9fVzVVCMmSxu1f_F$4?+HV)M4= zZ?qKL*5BWRwL}ja(8QeXZS(E9PoGSkjN3#n!EP><%+o=4JYSEC?ZU+MsOApem%R-z zFE{vwjK(1=u2#s|54$WhzZu^%#7Xo)3-Mgid+Tl`Wq3`HpALD5lbsVsvuXG9&Ziq9 z7~>%xt3LoCtFJgRoj3imrQsCYv1gU!^sq(82eO`RfvLyR@~Zi%u4f~UC|5&hWR}iv zC}uNse{^(;%#OcOV;`3dXx#2ND(-h|>)VO+2cWj3P|Wn*kMa$CTJFo_bK&Q&=j zl6oK2fPT7lv6a3;(wY#vSb8ve9?jzZFHZ)@AkaV5&#fCuslyBh5J~)B9R9{^G_C!( zdYnkw_B78g;-!A@6D5(e6t*V7 zvmESlK;z0%0%Hp!Zz&4#ad2=D1_BiGBRjAO zD)|Y6wx}J+?e8lW@2|1ToelF5dIv}E9;lK?34Z0Ny_zwKEtb?J6_dS}sLLd;P3McM zk2kfa@E$)pyT;gk0aZq_dowBJb2AZYvW0!X0X-i4*?dfL2}r8ytqfc}>VJGH1CCu~ zoFG9F_+CP@I3Uokuvy>jPx!~`ZTQ;2ITpu0%xbTh0LjH&%ljZu+vrz{=A&8|E>3&~ za5hZVef5VW;NxJEZJ%%O0A`&pF=4HLT>29#Ye z70L5?*S;u$WiCl5&&8fvP%P2@x29SN;&MYI@eQ%-(-FynFuY-#{K4spTDL!QF~>d? zz}C@;9KZ=n$9FG|Btq5iAIEEvelyO^m1$}o*N*%~2L~-)c*nC647j3bH zcJo9^5&S2wjx-M#Z2^B7Kv#`4{d#_r=<{FiUcZ2^V{AB7pGYYcmz7`_PNtVsL&DTM zJT`TdJ2Z0TT?!I!DZW?^KxN%{o;g55A8$R~&?Va>mHk~`Fb&GxN6UCM% z{43bN?9LIX0Si~#IvMf>J-k)Bi~c=EjB+VIoSQ~Fb^sC3=GzJ^M|^b9PO6R&`S>`- zjYN-%mow$^M&YyQH6nw^gbO-Gz`4F#kSYu&6Twc?>k|Z zOX*tEZLUxZBG8o?p!&VDDavuv8-$0k{1P8C8m25TSh{W{9R=B*D;hk?d0}eEqt5lZ zm_e6%`lilhFc%Q)kf!we9~q>XbIMfN>dU~Ptr;?E)_R;7g*j^Z)RHZkTB0^@u059F z1SjaDQ@7vprLmzZMs~#(W-=%xy-J5+C9QP_dGjTOvnQ=y(f`y^(lGC-jn`co#d2X$ z)4x001aXuK7`39*^vBP)5m8|)8kw2NY&|T`Hmkh+`|!&ozu2Ll{_d%>APZti@xM6x z9ieBp>DLW|pUj+dW<|A@N_3X1{!_bdGVwp!vwu(t;I97-J^qof{TrP8ld%8CX82?` zmG>WbgCE^qIVOl!2H?eK!kdZ3M#FvswfB*b;~beYK!M|8w*XD&9fIQ-(Y2i2`LDfr zlfzDDsRSmOOSPD;jsrjj_KbAhA3!P{K*X$m9ybBv?#||3M?Ob$+V^PpZZjM+eFEK2 zKW`*tDl!Cx0y(&2>IA&Scn;{|Y_&?9Cs`=1e9_5iZbI3o0o#m0s%oju9K37IptZ*zl6EB$< zOF;Lq@@hwH=~994_>>EL-NXMp<}$2q;Xr*kXf=9#)O%|?{YZj?y1&#%p>0?vXoUtB zs0vCFvX1fv@+t%nm)!tBD_}y!B@m2Fr!@ugM+QN@0NS4QNZ*=R$cW~}VjBmerUj+T zb3=!i&9d{+8hzVT>G%dwnpbjfeV`}4!%T^UN5@ESFK0En@}!G@$H7Pc4M~Un!0!U zB;d$xaX-=ZdIoZDl`@qq@hh4|mK^|@P&mRX&5Z=euttuV3k06s)K#Yy-!frB{z_`Z zuYY74;VGX`L+R%C4onu&5kl;`A5*pI|4QJ3h(q0sH~pGy{2 z-TSk{;LHM1FrD+2rHv>eqbiZ$9K21zK7hU43c!|L-xcMfy~!mRB~4+~06~bi-z@N< zJa*-se94Gnq2yA~x1MB%Bf%e~bt9hMh{mQB%$&99`qTmjVM4OId0vV;rbW5M25!0< z=6nlMtZuh9h$`XC!8q_fVgGJfk2T?sD?K%Q@lX{M>>Xm2_#Aj*BRGIjXk2F9Jj3Nz zmY@mu{FsTQ`#v-7q|RxmjVs~gC3}(^&mTV^ppxzz?~yzbg0EuM-vUpEhr9?tXN_aZ z$3QHNn{4f*xB4#p;K=`RV}aMwmWZinhwDq}{AB9`H8F>^EOd9w)ZlU? z|7#Fsv?a_zcsUXf2gTY_3J-Cwk?oI7QP!mDdU_6Jy^&a*3>rXoy79|_DD$l+ zi>elV=QfQF ++++ +title = "Registry Overview" +description = "High-level overview of the Registry" +keywords = ["registry, on-prem, images, tags, repository, distribution"] +aliases = ["/registry/overview/"] +[menu.main] +parent="smn_registry" +weight=1 ++++ + + +# Docker Registry + +## What it is + +The Registry is a stateless, highly scalable server side application that stores and lets you distribute Docker images. +The Registry is open-source, under the permissive [Apache license](http://en.wikipedia.org/wiki/Apache_License). + +## Why use it + +You should use the Registry if you want to: + + * tightly control where your images are being stored + * fully own your images distribution pipeline + * integrate image storage and distribution tightly into your in-house development workflow + +## Alternatives + +Users looking for a zero maintenance, ready-to-go solution are encouraged to head-over to the [Docker Hub](https://hub.docker.com), which provides a free-to-use, hosted Registry, plus additional features (organization accounts, automated builds, and more). + +Users looking for a commercially supported version of the Registry should look into [Docker Trusted Registry](https://docs.docker.com/docker-trusted-registry/overview/). + +## Requirements + +The Registry is compatible with Docker engine **version 1.6.0 or higher**. +If you really need to work with older Docker versions, you should look into the [old python registry](https://github.com/docker/docker-registry). + +## TL;DR + +Start your registry + + docker run -d -p 5000:5000 --name registry registry:2 + +Pull (or build) some image from the hub + + docker pull ubuntu + +Tag the image so that it points to your registry + + docker tag ubuntu localhost:5000/myfirstimage + +Push it + + docker push localhost:5000/myfirstimage + +Pull it back + + docker pull localhost:5000/myfirstimage + +Now stop your registry and remove all data + + docker stop registry && docker rm -v registry + +## Next + +You should now read the [detailed introduction about the registry](introduction.md), or jump directly to [deployment instructions](deploying.md). diff --git a/docs/insecure.md b/docs/insecure.md new file mode 100644 index 00000000..38b3a355 --- /dev/null +++ b/docs/insecure.md @@ -0,0 +1,114 @@ + + +# Insecure Registry + +While it's highly recommended to secure your registry using a TLS certificate +issued by a known CA, you may alternatively decide to use self-signed +certificates, or even use your registry over plain http. + +You have to understand the downsides in doing so, and the extra burden in +configuration. + +## Deploying a plain HTTP registry + +> **Warning**: it's not possible to use an insecure registry with basic authentication. + +This basically tells Docker to entirely disregard security for your registry. +While this is relatively easy to configure the daemon in this way, it is +**very** insecure. It does expose your registry to trivial MITM. Only use this +solution for isolated testing or in a tightly controlled, air-gapped +environment. + +1. Open the `/etc/default/docker` file or `/etc/sysconfig/docker` for editing. + + Depending on your operating system, your Engine daemon start options. + +2. Edit (or add) the `DOCKER_OPTS` line and add the `--insecure-registry` flag. + + This flag takes the URL of your registry, for example. + + `DOCKER_OPTS="--insecure-registry myregistrydomain.com:5000"` + +3. Close and save the configuration file. + +4. Restart your Docker daemon + + The command you use to restart the daemon depends on your operating system. + For example, on Ubuntu, this is usually the `service docker stop` and `service + docker start` command. + +5. Repeat this configuration on every Engine host that wants to access your registry. + + +## Using self-signed certificates + +> **Warning**: using this along with basic authentication requires to **also** trust the certificate into the OS cert store for some versions of docker (see below) + +This is more secure than the insecure registry solution. You must configure every docker daemon that wants to access your registry + +1. Generate your own certificate: + + mkdir -p certs && openssl req \ + -newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \ + -x509 -days 365 -out certs/domain.crt + +2. Be sure to use the name `myregistrydomain.com` as a CN. + +3. Use the result to [start your registry with TLS enabled](./deploying.md#get-a-certificate) + +4. Instruct every docker daemon to trust that certificate. + + This is done by copying the `domain.crt` file to `/etc/docker/certs.d/myregistrydomain.com:5000/ca.crt`. + +5. Don't forget to restart the Engine daemon. + +## Troubleshooting insecure registry + +This sections lists some common failures and how to recover from them. + +### Failing... + +Failing to configure the Engine daemon and trying to pull from a registry that is not using +TLS will results in the following message: + +``` +FATA[0000] Error response from daemon: v1 ping attempt failed with error: +Get https://myregistrydomain.com:5000/v1/_ping: tls: oversized record received with length 20527. +If this private registry supports only HTTP or HTTPS with an unknown CA certificate,please add +`--insecure-registry myregistrydomain.com:5000` to the daemon's arguments. +In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; +simply place the CA certificate at /etc/docker/certs.d/myregistrydomain.com:5000/ca.crt +``` + +### Docker still complains about the certificate when using authentication? + +When using authentication, some versions of docker also require you to trust the certificate at the OS level. Usually, on Ubuntu this is done with: + +```bash +$ cp certs/domain.crt /usr/local/share/ca-certificates/myregistrydomain.com.crt +update-ca-certificates +``` + +... and on Red Hat (and its derivatives) with: + +```bash +cp certs/domain.crt /etc/pki/ca-trust/source/anchors/myregistrydomain.com.crt +update-ca-trust +``` + +... On some distributions, e.g. Oracle Linux 6, the Shared System Certificates feature needs to be manually enabled: + +```bash +$ update-ca-trust enable +``` + +Now restart docker (`service docker stop && service docker start`, or any other way you use to restart docker). diff --git a/docs/introduction.md b/docs/introduction.md new file mode 100644 index 00000000..eceb5ffc --- /dev/null +++ b/docs/introduction.md @@ -0,0 +1,55 @@ + + +# Understanding the Registry + +A registry is a storage and content delivery system, holding named Docker images, available in different tagged versions. + + > Example: the image `distribution/registry`, with tags `2.0` and `2.1`. + +Users interact with a registry by using docker push and pull commands. + + > Example: `docker pull registry-1.docker.io/distribution/registry:2.1`. + +Storage itself is delegated to drivers. The default storage driver is the local posix filesystem, which is suitable for development or small deployments. Additional cloud-based storage drivers like S3, Microsoft Azure, OpenStack Swift and Aliyun OSS are also supported. People looking into using other storage backends may do so by writing their own driver implementing the [Storage API](storage-drivers/index.md). + +Since securing access to your hosted images is paramount, the Registry natively supports TLS and basic authentication. + +The Registry GitHub repository includes additional information about advanced authentication and authorization methods. Only very large or public deployments are expected to extend the Registry in this way. + +Finally, the Registry ships with a robust [notification system](notifications.md), calling webhooks in response to activity, and both extensive logging and reporting, mostly useful for large installations that want to collect metrics. + +## Understanding image naming + +Image names as used in typical docker commands reflect their origin: + + * `docker pull ubuntu` instructs docker to pull an image named `ubuntu` from the official Docker Hub. This is simply a shortcut for the longer `docker pull docker.io/library/ubuntu` command + * `docker pull myregistrydomain:port/foo/bar` instructs docker to contact the registry located at `myregistrydomain:port` to find the image `foo/bar` + +You can find out more about the various Docker commands dealing with images in the [official Docker engine documentation](/engine/reference/commandline/cli.md). + +## Use cases + +Running your own Registry is a great solution to integrate with and complement your CI/CD system. In a typical workflow, a commit to your source revision control system would trigger a build on your CI system, which would then push a new image to your Registry if the build is successful. A notification from the Registry would then trigger a deployment on a staging environment, or notify other systems that a new image is available. + +It's also an essential component if you want to quickly deploy a new image over a large cluster of machines. + +Finally, it's the best way to distribute images inside an isolated network. + +## Requirements + +You absolutely need to be familiar with Docker, specifically with regard to pushing and pulling images. You must understand the difference between the daemon and the cli, and at least grasp basic concepts about networking. + +Also, while just starting a registry is fairly easy, operating it in a production environment requires operational skills, just like any other service. You are expected to be familiar with systems availability and scalability, logging and log processing, systems monitoring, and security 101. Strong understanding of http and overall network communications, plus familiarity with golang are certainly useful as well for advanced operations or hacking. + +## Next + +Dive into [deploying your registry](deploying.md) diff --git a/docs/listener/listener.go b/docs/listener/listener.go deleted file mode 100644 index b93a7a63..00000000 --- a/docs/listener/listener.go +++ /dev/null @@ -1,74 +0,0 @@ -package listener - -import ( - "fmt" - "net" - "os" - "time" -) - -// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted -// connections. It's used by ListenAndServe and ListenAndServeTLS so -// dead TCP connections (e.g. closing laptop mid-download) eventually -// go away. -// it is a plain copy-paste from net/http/server.go -type tcpKeepAliveListener struct { - *net.TCPListener -} - -func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { - tc, err := ln.AcceptTCP() - if err != nil { - return - } - tc.SetKeepAlive(true) - tc.SetKeepAlivePeriod(3 * time.Minute) - return tc, nil -} - -// NewListener announces on laddr and net. Accepted values of the net are -// 'unix' and 'tcp' -func NewListener(net, laddr string) (net.Listener, error) { - switch net { - case "unix": - return newUnixListener(laddr) - case "tcp", "": // an empty net means tcp - return newTCPListener(laddr) - default: - return nil, fmt.Errorf("unknown address type %s", net) - } -} - -func newUnixListener(laddr string) (net.Listener, error) { - fi, err := os.Stat(laddr) - if err == nil { - // the file exists. - // try to remove it if it's a socket - if !isSocket(fi.Mode()) { - return nil, fmt.Errorf("file %s exists and is not a socket", laddr) - } - - if err := os.Remove(laddr); err != nil { - return nil, err - } - } else if !os.IsNotExist(err) { - // we can't do stat on the file. - // it means we can not remove it - return nil, err - } - - return net.Listen("unix", laddr) -} - -func isSocket(m os.FileMode) bool { - return m&os.ModeSocket != 0 -} - -func newTCPListener(laddr string) (net.Listener, error) { - ln, err := net.Listen("tcp", laddr) - if err != nil { - return nil, err - } - - return tcpKeepAliveListener{ln.(*net.TCPListener)}, nil -} diff --git a/docs/menu.md b/docs/menu.md new file mode 100644 index 00000000..7e24a690 --- /dev/null +++ b/docs/menu.md @@ -0,0 +1,23 @@ + + +# Overview of Docker Registry Documentation + +The Docker Registry documentation includes the following topics: + +* [Docker Registry Introduction](index.md) +* [Understanding the Registry](introduction.md) +* [Deploying a registry server](deploying.md) +* [Registry Configuration Reference](configuration.md) +* [Notifications](notifications.md) +* [Recipes](recipes/index.md) +* [Getting help](help.md) diff --git a/docs/middleware/registry/middleware.go b/docs/middleware/registry/middleware.go deleted file mode 100644 index 3e6e5cc7..00000000 --- a/docs/middleware/registry/middleware.go +++ /dev/null @@ -1,54 +0,0 @@ -package middleware - -import ( - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage" -) - -// InitFunc is the type of a RegistryMiddleware factory function and is -// used to register the constructor for different RegistryMiddleware backends. -type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) - -var middlewares map[string]InitFunc -var registryoptions []storage.RegistryOption - -// Register is used to register an InitFunc for -// a RegistryMiddleware backend with the given name. -func Register(name string, initFunc InitFunc) error { - if middlewares == nil { - middlewares = make(map[string]InitFunc) - } - if _, exists := middlewares[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - middlewares[name] = initFunc - - return nil -} - -// Get constructs a RegistryMiddleware with the given options using the named backend. -func Get(ctx context.Context, name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { - if middlewares != nil { - if initFunc, exists := middlewares[name]; exists { - return initFunc(ctx, registry, options) - } - } - - return nil, fmt.Errorf("no registry middleware registered with name: %s", name) -} - -// RegisterOptions adds more options to RegistryOption list. Options get applied before -// any other configuration-based options. -func RegisterOptions(options ...storage.RegistryOption) error { - registryoptions = append(registryoptions, options...) - return nil -} - -// GetRegistryOptions returns list of RegistryOption. -func GetRegistryOptions() []storage.RegistryOption { - return registryoptions -} diff --git a/docs/middleware/repository/middleware.go b/docs/middleware/repository/middleware.go deleted file mode 100644 index 27b42aec..00000000 --- a/docs/middleware/repository/middleware.go +++ /dev/null @@ -1,40 +0,0 @@ -package middleware - -import ( - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" -) - -// InitFunc is the type of a RepositoryMiddleware factory function and is -// used to register the constructor for different RepositoryMiddleware backends. -type InitFunc func(ctx context.Context, repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) - -var middlewares map[string]InitFunc - -// Register is used to register an InitFunc for -// a RepositoryMiddleware backend with the given name. -func Register(name string, initFunc InitFunc) error { - if middlewares == nil { - middlewares = make(map[string]InitFunc) - } - if _, exists := middlewares[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - middlewares[name] = initFunc - - return nil -} - -// Get constructs a RepositoryMiddleware with the given options using the named backend. -func Get(ctx context.Context, name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { - if middlewares != nil { - if initFunc, exists := middlewares[name]; exists { - return initFunc(ctx, repository, options) - } - } - - return nil, fmt.Errorf("no repository middleware registered with name: %s", name) -} diff --git a/docs/migration.md b/docs/migration.md new file mode 100644 index 00000000..da0aba91 --- /dev/null +++ b/docs/migration.md @@ -0,0 +1,30 @@ + + +# Migrating a 1.0 registry to 2.0 + +TODO: This needs to be revised in light of Olivier's work + +A few thoughts here: + +There was no "1.0". There was an implementation of the Registry API V1 but only a version 0.9 of the service was released. +The image formats are not compatible in any way. One must convert v1 images to v2 images using a docker client or other tool. +One can migrate images from one version to the other by pulling images from the old registry and pushing them to the v2 registry. + +----- + +The Docker Registry 2.0 is backward compatible with images created by the earlier specification. If you are migrating a private registry to version 2.0, you should use the following process: + +1. Configure and test a 2.0 registry image in a sandbox environment. + +2. Back up up your production image storage. + + Your production image storage should reside on a volume or storage backend. + Make sure you have a backup of its contents. + +3. Stop your existing registry service. + +4. Restart your registry with your tested 2.0 image. diff --git a/docs/notifications.md b/docs/notifications.md new file mode 100644 index 00000000..c511eb59 --- /dev/null +++ b/docs/notifications.md @@ -0,0 +1,350 @@ + + +# Notifications + +The Registry supports sending webhook notifications in response to events +happening within the registry. Notifications are sent in response to manifest +pushes and pulls and layer pushes and pulls. These actions are serialized into +events. The events are queued into a registry-internal broadcast system which +queues and dispatches events to [_Endpoints_](#endpoints). + +![](images/notifications.png) + +## Endpoints + +Notifications are sent to _endpoints_ via HTTP requests. Each configured +endpoint has isolated queues, retry configuration and http targets within each +instance of a registry. When an action happens within the registry, it is +converted into an event which is dropped into an inmemory queue. When the +event reaches the end of the queue, an http request is made to the endpoint +until the request succeeds. The events are sent serially to each endpoint but +order is not guaranteed. + +## Configuration + +To setup a registry instance to send notifications to endpoints, one must add +them to the configuration. A simple example follows: + + notifications: + endpoints: + - name: alistener + url: https://mylistener.example.com/event + headers: + Authorization: [Bearer ] + timeout: 500ms + threshold: 5 + backoff: 1s + +The above would configure the registry with an endpoint to send events to +`https://mylistener.example.com/event`, with the header "Authorization: Bearer +". The request would timeout after 500 milliseconds. If +5 failures happen consecutively, the registry will backoff for 1 second before +trying again. + +For details on the fields, please see the [configuration documentation](configuration.md#notifications). + +A properly configured endpoint should lead to a log message from the registry +upon startup: + +``` +INFO[0000] configuring endpoint alistener (https://mylistener.example.com/event), timeout=500ms, headers=map[Authorization:[Bearer ]] app.id=812bfeb2-62d6-43cf-b0c6-152f541618a3 environment=development service=registry +``` + +## Events + +Events have a well-defined JSON structure and are sent as the body of +notification requests. One or more events are sent in a structure called an +envelope. Each event has a unique id that can be used to uniquely identify incoming +requests, if required. Along with that, an _action_ is provided with a +_target_, identifying the object mutated during the event. + +The fields available in an `event` are described below. + +Field | Type | Description +----- | ----- | ------------- +id | string |ID provides a unique identifier for the event. +timestamp | Time | Timestamp is the time at which the event occurred. +action | string | Action indicates what action encompasses the provided event. +target | distribution.Descriptor | Target uniquely describes the target of the event. +length | int | Length in bytes of content. Same as Size field in Descriptor. +repository | string | Repository identifies the named repository. +fromRepository | string | FromRepository identifies the named repository which a blob was mounted from if appropriate. +url | string | URL provides a direct link to the content. +tag | string | Tag identifies a tag name in tag events +request | [RequestRecord](https://godoc.org/github.com/docker/distribution/notifications#RequestRecord) | Request covers the request that generated the event. +actor | [ActorRecord](https://godoc.org/github.com/docker/distribution/notifications#ActorRecord). | Actor specifies the agent that initiated the event. For most situations, this could be from the authorization context of the request. +source | [SourceRecord](https://godoc.org/github.com/docker/distribution/notifications#SourceRecord) | Source identifies the registry node that generated the event. Put differently, while the actor "initiates" the event, the source "generates" it. + + + +The following is an example of a JSON event, sent in response to the push of a +manifest: + +```json +{ + "events": [ + { + "id": "320678d8-ca14-430f-8bb6-4ca139cd83f7", + "timestamp": "2016-03-09T14:44:26.402973972-08:00", + "action": "pull", + "target": { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 708, + "digest": "sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf", + "length": 708, + "repository": "hello-world", + "url": "http://192.168.100.227:5000/v2/hello-world/manifests/sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf", + "tag": "latest" + }, + "request": { + "id": "6df24a34-0959-4923-81ca-14f09767db19", + "addr": "192.168.64.11:42961", + "host": "192.168.100.227:5000", + "method": "GET", + "useragent": "curl/7.38.0" + }, + "actor": {}, + "source": { + "addr": "xtal.local:5000", + "instanceID": "a53db899-3b4b-4a62-a067-8dd013beaca4" + } + } + ] +} +``` + + +The target struct of events which are sent when manifests and blobs are deleted +will contain a subset of the data contained in Get and Put events. Specifically, +only the digest and repository will be sent. + +```json +"target": { + "digest": "sha256:d89e1bee20d9cb344674e213b581f14fbd8e70274ecf9d10c514bab78a307845", + "repository": "library/test" +}, +``` + +> __NOTE:__ As of version 2.1, the `length` field for event targets +> is being deprecated for the `size` field, bringing the target in line with +> common nomenclature. Both will continue to be set for the foreseeable +> future. Newer code should favor `size` but accept either. + +## Envelope + +The envelope contains one or more events, with the following json structure: + +```json +{ + "events": [ ... ], +} +``` + +While events may be sent in the same envelope, the set of events within that +envelope have no implied relationship. For example, the registry may choose to +group unrelated events and send them in the same envelope to reduce the total +number of requests. + +The full package has the mediatype +"application/vnd.docker.distribution.events.v1+json", which will be set on the +request coming to an endpoint. + +An example of a full event may look as follows: + +```json +GET /callback +Host: application/vnd.docker.distribution.events.v1+json +Authorization: Bearer +Content-Type: application/vnd.docker.distribution.events.v1+json + +{ + "events": [ + { + "id": "asdf-asdf-asdf-asdf-0", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.distribution.manifest.v1+json", + "length": 1, + "digest": "sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf", + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-1", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "length": 2, + "digest": "sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", + "repository": "library/test", + "url": "http://example.com/v2/library/test/blobs/sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-2", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "length": 3, + "digest": "sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", + "repository": "library/test", + "url": "http://example.com/v2/library/test/blobs/sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + } + ] +} +``` + +## Responses + +The registry is fairly accepting of the response codes from endpoints. If an +endpoint responds with any 2xx or 3xx response code (after following +redirects), the message will be considered delivered and discarded. + +In turn, it is recommended that endpoints are accepting of incoming responses, +as well. While the format of event envelopes are standardized by media type, +any "pickyness" about validation may cause the queue to backup on the +registry. + +## Monitoring + +The state of the endpoints are reported via the debug/vars http interface, +usually configured to `http://localhost:5001/debug/vars`. Information such as +configuration and metrics are available by endpoint. + +The following provides an example of a few endpoints that have experienced +several failures and have since recovered: + +```json +"notifications":{ + "endpoints":[ + { + "name":"local-5003", + "url":"http://localhost:5003/callback", + "Headers":{ + "Authorization":[ + "Bearer \u003can example token\u003e" + ] + }, + "Timeout":1000000000, + "Threshold":10, + "Backoff":1000000000, + "Metrics":{ + "Pending":76, + "Events":76, + "Successes":0, + "Failures":0, + "Errors":46, + "Statuses":{ + + } + } + }, + { + "name":"local-8083", + "url":"http://localhost:8083/callback", + "Headers":null, + "Timeout":1000000000, + "Threshold":10, + "Backoff":1000000000, + "Metrics":{ + "Pending":0, + "Events":76, + "Successes":76, + "Failures":0, + "Errors":28, + "Statuses":{ + "202 Accepted":76 + } + } + } + ] +} +``` + +If using notification as part of a larger application, it is _critical_ to +monitor the size ("Pending" above) of the endpoint queues. If failures or +queue sizes are increasing, it can indicate a larger problem. + +The logs are also a valuable resource for monitoring problems. A failing +endpoint will lead to messages similar to the following: + +``` +ERRO[0340] retryingsink: error writing events: httpSink{http://localhost:5003/callback}: error posting: Post http://localhost:5003/callback: dial tcp 127.0.0.1:5003: connection refused, retrying +WARN[0340] httpSink{http://localhost:5003/callback} encountered too many errors, backing off +``` + +The above indicates that several errors have led to a backoff and the registry +will wait before retrying. + +## Considerations + +Currently, the queues are inmemory, so endpoints should be _reasonably +reliable_. They are designed to make a best-effort to send the messages but if +an instance is lost, messages may be dropped. If an endpoint goes down, care +should be taken to ensure that the registry instance is not terminated before +the endpoint comes back up or messages will be lost. + +This can be mitigated by running endpoints in close proximity to the registry +instances. One could run an endpoint that pages to disk and then forwards a +request to provide better durability. + +The notification system is designed around a series of interchangeable _sinks_ +which can be wired up to achieve interesting behavior. If this system doesn't +provide acceptable guarantees, adding a transactional `Sink` to the registry +is a possibility, although it may have an effect on request service time. +Please see the +[godoc](http://godoc.org/github.com/docker/distribution/notifications#Sink) +for more information. diff --git a/docs/proxy/proxyauth.go b/docs/proxy/proxyauth.go deleted file mode 100644 index a9cc43a6..00000000 --- a/docs/proxy/proxyauth.go +++ /dev/null @@ -1,58 +0,0 @@ -package proxy - -import ( - "net/http" - "net/url" - - "github.com/docker/distribution/registry/client/auth" -) - -const tokenURL = "https://auth.docker.io/token" -const challengeHeader = "Docker-Distribution-Api-Version" - -type userpass struct { - username string - password string -} - -type credentials struct { - creds map[string]userpass -} - -func (c credentials) Basic(u *url.URL) (string, string) { - up := c.creds[u.String()] - - return up.username, up.password -} - -func (c credentials) RefreshToken(u *url.URL, service string) string { - return "" -} - -func (c credentials) SetRefreshToken(u *url.URL, service, token string) { -} - -// configureAuth stores credentials for challenge responses -func configureAuth(username, password string) (auth.CredentialStore, error) { - creds := map[string]userpass{ - tokenURL: { - username: username, - password: password, - }, - } - return credentials{creds: creds}, nil -} - -func ping(manager auth.ChallengeManager, endpoint, versionHeader string) error { - resp, err := http.Get(endpoint) - if err != nil { - return err - } - defer resp.Body.Close() - - if err := manager.AddResponse(resp); err != nil { - return err - } - - return nil -} diff --git a/docs/proxy/proxyblobstore.go b/docs/proxy/proxyblobstore.go deleted file mode 100644 index 7a6d7ea2..00000000 --- a/docs/proxy/proxyblobstore.go +++ /dev/null @@ -1,222 +0,0 @@ -package proxy - -import ( - "io" - "net/http" - "strconv" - "sync" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/proxy/scheduler" -) - -// todo(richardscothern): from cache control header or config file -const blobTTL = time.Duration(24 * 7 * time.Hour) - -type proxyBlobStore struct { - localStore distribution.BlobStore - remoteStore distribution.BlobService - scheduler *scheduler.TTLExpirationScheduler - repositoryName reference.Named - authChallenger authChallenger -} - -var _ distribution.BlobStore = &proxyBlobStore{} - -// inflight tracks currently downloading blobs -var inflight = make(map[digest.Digest]struct{}) - -// mu protects inflight -var mu sync.Mutex - -func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, digest digest.Digest) { - w.Header().Set("Content-Length", strconv.FormatInt(length, 10)) - w.Header().Set("Content-Type", mediaType) - w.Header().Set("Docker-Content-Digest", digest.String()) - w.Header().Set("Etag", digest.String()) -} - -func (pbs *proxyBlobStore) copyContent(ctx context.Context, dgst digest.Digest, writer io.Writer) (distribution.Descriptor, error) { - desc, err := pbs.remoteStore.Stat(ctx, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - - if w, ok := writer.(http.ResponseWriter); ok { - setResponseHeaders(w, desc.Size, desc.MediaType, dgst) - } - - remoteReader, err := pbs.remoteStore.Open(ctx, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - - _, err = io.CopyN(writer, remoteReader, desc.Size) - if err != nil { - return distribution.Descriptor{}, err - } - - proxyMetrics.BlobPush(uint64(desc.Size)) - - return desc, nil -} - -func (pbs *proxyBlobStore) serveLocal(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) (bool, error) { - localDesc, err := pbs.localStore.Stat(ctx, dgst) - if err != nil { - // Stat can report a zero sized file here if it's checked between creation - // and population. Return nil error, and continue - return false, nil - } - - if err == nil { - proxyMetrics.BlobPush(uint64(localDesc.Size)) - return true, pbs.localStore.ServeBlob(ctx, w, r, dgst) - } - - return false, nil - -} - -func (pbs *proxyBlobStore) storeLocal(ctx context.Context, dgst digest.Digest) error { - defer func() { - mu.Lock() - delete(inflight, dgst) - mu.Unlock() - }() - - var desc distribution.Descriptor - var err error - var bw distribution.BlobWriter - - bw, err = pbs.localStore.Create(ctx) - if err != nil { - return err - } - - desc, err = pbs.copyContent(ctx, dgst, bw) - if err != nil { - return err - } - - _, err = bw.Commit(ctx, desc) - if err != nil { - return err - } - - return nil -} - -func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - served, err := pbs.serveLocal(ctx, w, r, dgst) - if err != nil { - context.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error()) - return err - } - - if served { - return nil - } - - if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { - return err - } - - mu.Lock() - _, ok := inflight[dgst] - if ok { - mu.Unlock() - _, err := pbs.copyContent(ctx, dgst, w) - return err - } - inflight[dgst] = struct{}{} - mu.Unlock() - - go func(dgst digest.Digest) { - if err := pbs.storeLocal(ctx, dgst); err != nil { - context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) - } - - blobRef, err := reference.WithDigest(pbs.repositoryName, dgst) - if err != nil { - context.GetLogger(ctx).Errorf("Error creating reference: %s", err) - return - } - - pbs.scheduler.AddBlob(blobRef, repositoryTTL) - }(dgst) - - _, err = pbs.copyContent(ctx, dgst, w) - if err != nil { - return err - } - return nil -} - -func (pbs *proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - desc, err := pbs.localStore.Stat(ctx, dgst) - if err == nil { - return desc, err - } - - if err != distribution.ErrBlobUnknown { - return distribution.Descriptor{}, err - } - - if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { - return distribution.Descriptor{}, err - } - - return pbs.remoteStore.Stat(ctx, dgst) -} - -func (pbs *proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - blob, err := pbs.localStore.Get(ctx, dgst) - if err == nil { - return blob, nil - } - - if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { - return []byte{}, err - } - - blob, err = pbs.remoteStore.Get(ctx, dgst) - if err != nil { - return []byte{}, err - } - - _, err = pbs.localStore.Put(ctx, "", blob) - if err != nil { - return []byte{}, err - } - return blob, nil -} - -// Unsupported functions -func (pbs *proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - return distribution.Descriptor{}, distribution.ErrUnsupported -} - -func (pbs *proxyBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - return nil, distribution.ErrUnsupported -} - -func (pbs *proxyBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - return nil, distribution.ErrUnsupported -} - -func (pbs *proxyBlobStore) Mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) { - return distribution.Descriptor{}, distribution.ErrUnsupported -} - -func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - return nil, distribution.ErrUnsupported -} - -func (pbs *proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { - return distribution.ErrUnsupported -} diff --git a/docs/proxy/proxyblobstore_test.go b/docs/proxy/proxyblobstore_test.go deleted file mode 100644 index 967dcd3d..00000000 --- a/docs/proxy/proxyblobstore_test.go +++ /dev/null @@ -1,409 +0,0 @@ -package proxy - -import ( - "io/ioutil" - "math/rand" - "net/http" - "net/http/httptest" - "sync" - "testing" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/proxy/scheduler" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/docker/distribution/registry/storage/driver/filesystem" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -var sbsMu sync.Mutex - -type statsBlobStore struct { - stats map[string]int - blobs distribution.BlobStore -} - -func (sbs statsBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - sbsMu.Lock() - sbs.stats["put"]++ - sbsMu.Unlock() - - return sbs.blobs.Put(ctx, mediaType, p) -} - -func (sbs statsBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - sbsMu.Lock() - sbs.stats["get"]++ - sbsMu.Unlock() - - return sbs.blobs.Get(ctx, dgst) -} - -func (sbs statsBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - sbsMu.Lock() - sbs.stats["create"]++ - sbsMu.Unlock() - - return sbs.blobs.Create(ctx, options...) -} - -func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - sbsMu.Lock() - sbs.stats["resume"]++ - sbsMu.Unlock() - - return sbs.blobs.Resume(ctx, id) -} - -func (sbs statsBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - sbsMu.Lock() - sbs.stats["open"]++ - sbsMu.Unlock() - - return sbs.blobs.Open(ctx, dgst) -} - -func (sbs statsBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - sbsMu.Lock() - sbs.stats["serveblob"]++ - sbsMu.Unlock() - - return sbs.blobs.ServeBlob(ctx, w, r, dgst) -} - -func (sbs statsBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - - sbsMu.Lock() - sbs.stats["stat"]++ - sbsMu.Unlock() - - return sbs.blobs.Stat(ctx, dgst) -} - -func (sbs statsBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { - sbsMu.Lock() - sbs.stats["delete"]++ - sbsMu.Unlock() - - return sbs.blobs.Delete(ctx, dgst) -} - -type testEnv struct { - numUnique int - inRemote []distribution.Descriptor - store proxyBlobStore - ctx context.Context -} - -func (te *testEnv) LocalStats() *map[string]int { - sbsMu.Lock() - ls := te.store.localStore.(statsBlobStore).stats - sbsMu.Unlock() - return &ls -} - -func (te *testEnv) RemoteStats() *map[string]int { - sbsMu.Lock() - rs := te.store.remoteStore.(statsBlobStore).stats - sbsMu.Unlock() - return &rs -} - -// Populate remote store and record the digests -func makeTestEnv(t *testing.T, name string) *testEnv { - nameRef, err := reference.ParseNamed(name) - if err != nil { - t.Fatalf("unable to parse reference: %s", err) - } - - ctx := context.Background() - - truthDir, err := ioutil.TempDir("", "truth") - if err != nil { - t.Fatalf("unable to create tempdir: %s", err) - } - - cacheDir, err := ioutil.TempDir("", "cache") - if err != nil { - t.Fatalf("unable to create tempdir: %s", err) - } - - localDriver, err := filesystem.FromParameters(map[string]interface{}{ - "rootdirectory": truthDir, - }) - if err != nil { - t.Fatalf("unable to create filesystem driver: %s", err) - } - - // todo: create a tempfile area here - localRegistry, err := storage.NewRegistry(ctx, localDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - localRepo, err := localRegistry.Repository(ctx, nameRef) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - - cacheDriver, err := filesystem.FromParameters(map[string]interface{}{ - "rootdirectory": cacheDir, - }) - if err != nil { - t.Fatalf("unable to create filesystem driver: %s", err) - } - - truthRegistry, err := storage.NewRegistry(ctx, cacheDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - truthRepo, err := truthRegistry.Repository(ctx, nameRef) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - - truthBlobs := statsBlobStore{ - stats: make(map[string]int), - blobs: truthRepo.Blobs(ctx), - } - - localBlobs := statsBlobStore{ - stats: make(map[string]int), - blobs: localRepo.Blobs(ctx), - } - - s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") - - proxyBlobStore := proxyBlobStore{ - repositoryName: nameRef, - remoteStore: truthBlobs, - localStore: localBlobs, - scheduler: s, - authChallenger: &mockChallenger{}, - } - - te := &testEnv{ - store: proxyBlobStore, - ctx: ctx, - } - return te -} - -func makeBlob(size int) []byte { - blob := make([]byte, size, size) - for i := 0; i < size; i++ { - blob[i] = byte('A' + rand.Int()%48) - } - return blob -} - -func init() { - rand.Seed(42) -} - -func perm(m []distribution.Descriptor) []distribution.Descriptor { - for i := 0; i < len(m); i++ { - j := rand.Intn(i + 1) - tmp := m[i] - m[i] = m[j] - m[j] = tmp - } - return m -} - -func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) { - var inRemote []distribution.Descriptor - - for i := 0; i < numUnique; i++ { - bytes := makeBlob(size) - for j := 0; j < blobCount/numUnique; j++ { - desc, err := te.store.remoteStore.Put(te.ctx, "", bytes) - if err != nil { - t.Fatalf("Put in store") - } - - inRemote = append(inRemote, desc) - } - } - - te.inRemote = inRemote - te.numUnique = numUnique -} -func TestProxyStoreGet(t *testing.T) { - te := makeTestEnv(t, "foo/bar") - - localStats := te.LocalStats() - remoteStats := te.RemoteStats() - - populate(t, te, 1, 10, 1) - _, err := te.store.Get(te.ctx, te.inRemote[0].Digest) - if err != nil { - t.Fatal(err) - } - - if (*localStats)["get"] != 1 && (*localStats)["put"] != 1 { - t.Errorf("Unexpected local counts") - } - - if (*remoteStats)["get"] != 1 { - t.Errorf("Unexpected remote get count") - } - - _, err = te.store.Get(te.ctx, te.inRemote[0].Digest) - if err != nil { - t.Fatal(err) - } - - if (*localStats)["get"] != 2 && (*localStats)["put"] != 1 { - t.Errorf("Unexpected local counts") - } - - if (*remoteStats)["get"] != 1 { - t.Errorf("Unexpected remote get count") - } - -} - -func TestProxyStoreStat(t *testing.T) { - te := makeTestEnv(t, "foo/bar") - - remoteBlobCount := 1 - populate(t, te, remoteBlobCount, 10, 1) - - localStats := te.LocalStats() - remoteStats := te.RemoteStats() - - // Stat - touches both stores - for _, d := range te.inRemote { - _, err := te.store.Stat(te.ctx, d.Digest) - if err != nil { - t.Fatalf("Error stating proxy store") - } - } - - if (*localStats)["stat"] != remoteBlobCount { - t.Errorf("Unexpected local stat count") - } - - if (*remoteStats)["stat"] != remoteBlobCount { - t.Errorf("Unexpected remote stat count") - } - - if te.store.authChallenger.(*mockChallenger).count != len(te.inRemote) { - t.Fatalf("Unexpected auth challenge count, got %#v", te.store.authChallenger) - } - -} - -func TestProxyStoreServeHighConcurrency(t *testing.T) { - te := makeTestEnv(t, "foo/bar") - blobSize := 200 - blobCount := 10 - numUnique := 1 - populate(t, te, blobCount, blobSize, numUnique) - - numClients := 16 - testProxyStoreServe(t, te, numClients) -} - -func TestProxyStoreServeMany(t *testing.T) { - te := makeTestEnv(t, "foo/bar") - blobSize := 200 - blobCount := 10 - numUnique := 4 - populate(t, te, blobCount, blobSize, numUnique) - - numClients := 4 - testProxyStoreServe(t, te, numClients) -} - -// todo(richardscothern): blobCount must be smaller than num clients -func TestProxyStoreServeBig(t *testing.T) { - te := makeTestEnv(t, "foo/bar") - - blobSize := 2 << 20 - blobCount := 4 - numUnique := 2 - populate(t, te, blobCount, blobSize, numUnique) - - numClients := 4 - testProxyStoreServe(t, te, numClients) -} - -// testProxyStoreServe will create clients to consume all blobs -// populated in the truth store -func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) { - localStats := te.LocalStats() - remoteStats := te.RemoteStats() - - var wg sync.WaitGroup - - for i := 0; i < numClients; i++ { - // Serveblob - pulls through blobs - wg.Add(1) - go func() { - defer wg.Done() - for _, remoteBlob := range te.inRemote { - w := httptest.NewRecorder() - r, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatal(err) - } - - err = te.store.ServeBlob(te.ctx, w, r, remoteBlob.Digest) - if err != nil { - t.Fatalf(err.Error()) - } - - bodyBytes := w.Body.Bytes() - localDigest := digest.FromBytes(bodyBytes) - if localDigest != remoteBlob.Digest { - t.Fatalf("Mismatching blob fetch from proxy") - } - } - }() - } - - wg.Wait() - - remoteBlobCount := len(te.inRemote) - if (*localStats)["stat"] != remoteBlobCount*numClients && (*localStats)["create"] != te.numUnique { - t.Fatal("Expected: stat:", remoteBlobCount*numClients, "create:", remoteBlobCount) - } - - // Wait for any async storage goroutines to finish - time.Sleep(3 * time.Second) - - remoteStatCount := (*remoteStats)["stat"] - remoteOpenCount := (*remoteStats)["open"] - - // Serveblob - blobs come from local - for _, dr := range te.inRemote { - w := httptest.NewRecorder() - r, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatal(err) - } - - err = te.store.ServeBlob(te.ctx, w, r, dr.Digest) - if err != nil { - t.Fatalf(err.Error()) - } - - dl := digest.FromBytes(w.Body.Bytes()) - if dl != dr.Digest { - t.Errorf("Mismatching blob fetch from proxy") - } - } - - localStats = te.LocalStats() - remoteStats = te.RemoteStats() - - // Ensure remote unchanged - if (*remoteStats)["stat"] != remoteStatCount && (*remoteStats)["open"] != remoteOpenCount { - t.Fatalf("unexpected remote stats: %#v", remoteStats) - } -} diff --git a/docs/proxy/proxymanifeststore.go b/docs/proxy/proxymanifeststore.go deleted file mode 100644 index f08e285d..00000000 --- a/docs/proxy/proxymanifeststore.go +++ /dev/null @@ -1,95 +0,0 @@ -package proxy - -import ( - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/proxy/scheduler" -) - -// todo(richardscothern): from cache control header or config -const repositoryTTL = time.Duration(24 * 7 * time.Hour) - -type proxyManifestStore struct { - ctx context.Context - localManifests distribution.ManifestService - remoteManifests distribution.ManifestService - repositoryName reference.Named - scheduler *scheduler.TTLExpirationScheduler - authChallenger authChallenger -} - -var _ distribution.ManifestService = &proxyManifestStore{} - -func (pms proxyManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - exists, err := pms.localManifests.Exists(ctx, dgst) - if err != nil { - return false, err - } - if exists { - return true, nil - } - if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil { - return false, err - } - return pms.remoteManifests.Exists(ctx, dgst) -} - -func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - // At this point `dgst` was either specified explicitly, or returned by the - // tagstore with the most recent association. - var fromRemote bool - manifest, err := pms.localManifests.Get(ctx, dgst, options...) - if err != nil { - if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil { - return nil, err - } - - manifest, err = pms.remoteManifests.Get(ctx, dgst, options...) - if err != nil { - return nil, err - } - fromRemote = true - } - - _, payload, err := manifest.Payload() - if err != nil { - return nil, err - } - - proxyMetrics.ManifestPush(uint64(len(payload))) - if fromRemote { - proxyMetrics.ManifestPull(uint64(len(payload))) - - _, err = pms.localManifests.Put(ctx, manifest) - if err != nil { - return nil, err - } - - // Schedule the manifest blob for removal - repoBlob, err := reference.WithDigest(pms.repositoryName, dgst) - if err != nil { - context.GetLogger(ctx).Errorf("Error creating reference: %s", err) - return nil, err - } - - pms.scheduler.AddManifest(repoBlob, repositoryTTL) - // Ensure the manifest blob is cleaned up - //pms.scheduler.AddBlob(blobRef, repositoryTTL) - - } - - return manifest, err -} - -func (pms proxyManifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - var d digest.Digest - return d, distribution.ErrUnsupported -} - -func (pms proxyManifestStore) Delete(ctx context.Context, dgst digest.Digest) error { - return distribution.ErrUnsupported -} diff --git a/docs/proxy/proxymanifeststore_test.go b/docs/proxy/proxymanifeststore_test.go deleted file mode 100644 index 0d6b7171..00000000 --- a/docs/proxy/proxymanifeststore_test.go +++ /dev/null @@ -1,274 +0,0 @@ -package proxy - -import ( - "io" - "sync" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/proxy/scheduler" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/testutil" - "github.com/docker/libtrust" -) - -type statsManifest struct { - manifests distribution.ManifestService - stats map[string]int -} - -type manifestStoreTestEnv struct { - manifestDigest digest.Digest // digest of the signed manifest in the local storage - manifests proxyManifestStore -} - -func (te manifestStoreTestEnv) LocalStats() *map[string]int { - ls := te.manifests.localManifests.(statsManifest).stats - return &ls -} - -func (te manifestStoreTestEnv) RemoteStats() *map[string]int { - rs := te.manifests.remoteManifests.(statsManifest).stats - return &rs -} - -func (sm statsManifest) Delete(ctx context.Context, dgst digest.Digest) error { - sm.stats["delete"]++ - return sm.manifests.Delete(ctx, dgst) -} - -func (sm statsManifest) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - sm.stats["exists"]++ - return sm.manifests.Exists(ctx, dgst) -} - -func (sm statsManifest) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - sm.stats["get"]++ - return sm.manifests.Get(ctx, dgst) -} - -func (sm statsManifest) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - sm.stats["put"]++ - return sm.manifests.Put(ctx, manifest) -} - -type mockChallenger struct { - sync.Mutex - count int -} - -// Called for remote operations only -func (m *mockChallenger) tryEstablishChallenges(context.Context) error { - m.Lock() - defer m.Unlock() - m.count++ - return nil -} - -func (m *mockChallenger) credentialStore() auth.CredentialStore { - return nil -} - -func (m *mockChallenger) challengeManager() auth.ChallengeManager { - return nil -} - -func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { - nameRef, err := reference.ParseNamed(name) - if err != nil { - t.Fatalf("unable to parse reference: %s", err) - } - k, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - ctx := context.Background() - truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(), - storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), - storage.Schema1SigningKey(k)) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - truthRepo, err := truthRegistry.Repository(ctx, nameRef) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - tr, err := truthRepo.Manifests(ctx) - if err != nil { - t.Fatal(err.Error()) - } - truthManifests := statsManifest{ - manifests: tr, - stats: make(map[string]int), - } - - manifestDigest, err := populateRepo(t, ctx, truthRepo, name, tag) - if err != nil { - t.Fatalf(err.Error()) - } - - localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption, storage.Schema1SigningKey(k)) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - localRepo, err := localRegistry.Repository(ctx, nameRef) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - lr, err := localRepo.Manifests(ctx) - if err != nil { - t.Fatal(err.Error()) - } - - localManifests := statsManifest{ - manifests: lr, - stats: make(map[string]int), - } - - s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") - return &manifestStoreTestEnv{ - manifestDigest: manifestDigest, - manifests: proxyManifestStore{ - ctx: ctx, - localManifests: localManifests, - remoteManifests: truthManifests, - scheduler: s, - repositoryName: nameRef, - authChallenger: &mockChallenger{}, - }, - } -} - -func populateRepo(t *testing.T, ctx context.Context, repository distribution.Repository, name, tag string) (digest.Digest, error) { - m := schema1.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: name, - Tag: tag, - } - - for i := 0; i < 2; i++ { - wr, err := repository.Blobs(ctx).Create(ctx) - if err != nil { - t.Fatalf("unexpected error creating test upload: %v", err) - } - - rs, ts, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("unexpected error generating test layer file") - } - dgst := digest.Digest(ts) - if _, err := io.Copy(wr, rs); err != nil { - t.Fatalf("unexpected error copying to upload: %v", err) - } - - if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { - t.Fatalf("unexpected error finishing upload: %v", err) - } - } - - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key: %v", err) - } - - sm, err := schema1.Sign(&m, pk) - if err != nil { - t.Fatalf("error signing manifest: %v", err) - } - - ms, err := repository.Manifests(ctx) - if err != nil { - t.Fatalf(err.Error()) - } - dgst, err := ms.Put(ctx, sm) - if err != nil { - t.Fatalf("unexpected errors putting manifest: %v", err) - } - - return dgst, nil -} - -// TestProxyManifests contains basic acceptance tests -// for the pull-through behavior -func TestProxyManifests(t *testing.T) { - name := "foo/bar" - env := newManifestStoreTestEnv(t, name, "latest") - - localStats := env.LocalStats() - remoteStats := env.RemoteStats() - - ctx := context.Background() - // Stat - must check local and remote - exists, err := env.manifests.Exists(ctx, env.manifestDigest) - if err != nil { - t.Fatalf("Error checking existence") - } - if !exists { - t.Errorf("Unexpected non-existant manifest") - } - - if (*localStats)["exists"] != 1 && (*remoteStats)["exists"] != 1 { - t.Errorf("Unexpected exists count : \n%v \n%v", localStats, remoteStats) - } - - if env.manifests.authChallenger.(*mockChallenger).count != 1 { - t.Fatalf("Expected 1 auth challenge, got %#v", env.manifests.authChallenger) - } - - // Get - should succeed and pull manifest into local - _, err = env.manifests.Get(ctx, env.manifestDigest) - if err != nil { - t.Fatal(err) - } - - if (*localStats)["get"] != 1 && (*remoteStats)["get"] != 1 { - t.Errorf("Unexpected get count") - } - - if (*localStats)["put"] != 1 { - t.Errorf("Expected local put") - } - - if env.manifests.authChallenger.(*mockChallenger).count != 2 { - t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) - } - - // Stat - should only go to local - exists, err = env.manifests.Exists(ctx, env.manifestDigest) - if err != nil { - t.Fatal(err) - } - if !exists { - t.Errorf("Unexpected non-existant manifest") - } - - if (*localStats)["exists"] != 2 && (*remoteStats)["exists"] != 1 { - t.Errorf("Unexpected exists count") - } - - if env.manifests.authChallenger.(*mockChallenger).count != 2 { - t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) - } - - // Get proxied - won't require another authchallenge - _, err = env.manifests.Get(ctx, env.manifestDigest) - if err != nil { - t.Fatal(err) - } - - if env.manifests.authChallenger.(*mockChallenger).count != 2 { - t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) - } - -} diff --git a/docs/proxy/proxymetrics.go b/docs/proxy/proxymetrics.go deleted file mode 100644 index d3d84d78..00000000 --- a/docs/proxy/proxymetrics.go +++ /dev/null @@ -1,74 +0,0 @@ -package proxy - -import ( - "expvar" - "sync/atomic" -) - -// Metrics is used to hold metric counters -// related to the proxy -type Metrics struct { - Requests uint64 - Hits uint64 - Misses uint64 - BytesPulled uint64 - BytesPushed uint64 -} - -type proxyMetricsCollector struct { - blobMetrics Metrics - manifestMetrics Metrics -} - -// BlobPull tracks metrics about blobs pulled into the cache -func (pmc *proxyMetricsCollector) BlobPull(bytesPulled uint64) { - atomic.AddUint64(&pmc.blobMetrics.Misses, 1) - atomic.AddUint64(&pmc.blobMetrics.BytesPulled, bytesPulled) -} - -// BlobPush tracks metrics about blobs pushed to clients -func (pmc *proxyMetricsCollector) BlobPush(bytesPushed uint64) { - atomic.AddUint64(&pmc.blobMetrics.Requests, 1) - atomic.AddUint64(&pmc.blobMetrics.Hits, 1) - atomic.AddUint64(&pmc.blobMetrics.BytesPushed, bytesPushed) -} - -// ManifestPull tracks metrics related to Manifests pulled into the cache -func (pmc *proxyMetricsCollector) ManifestPull(bytesPulled uint64) { - atomic.AddUint64(&pmc.manifestMetrics.Misses, 1) - atomic.AddUint64(&pmc.manifestMetrics.BytesPulled, bytesPulled) -} - -// ManifestPush tracks metrics about manifests pushed to clients -func (pmc *proxyMetricsCollector) ManifestPush(bytesPushed uint64) { - atomic.AddUint64(&pmc.manifestMetrics.Requests, 1) - atomic.AddUint64(&pmc.manifestMetrics.Hits, 1) - atomic.AddUint64(&pmc.manifestMetrics.BytesPushed, bytesPushed) -} - -// proxyMetrics tracks metrics about the proxy cache. This is -// kept globally and made available via expvar. -var proxyMetrics = &proxyMetricsCollector{} - -func init() { - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - pm := registry.(*expvar.Map).Get("proxy") - if pm == nil { - pm = &expvar.Map{} - pm.(*expvar.Map).Init() - registry.(*expvar.Map).Set("proxy", pm) - } - - pm.(*expvar.Map).Set("blobs", expvar.Func(func() interface{} { - return proxyMetrics.blobMetrics - })) - - pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} { - return proxyMetrics.manifestMetrics - })) - -} diff --git a/docs/proxy/proxyregistry.go b/docs/proxy/proxyregistry.go deleted file mode 100644 index 56818dab..00000000 --- a/docs/proxy/proxyregistry.go +++ /dev/null @@ -1,248 +0,0 @@ -package proxy - -import ( - "fmt" - "net/http" - "net/url" - "sync" - - "github.com/docker/distribution" - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/registry/proxy/scheduler" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/driver" -) - -// proxyingRegistry fetches content from a remote registry and caches it locally -type proxyingRegistry struct { - embedded distribution.Namespace // provides local registry functionality - scheduler *scheduler.TTLExpirationScheduler - remoteURL url.URL - authChallenger authChallenger -} - -// NewRegistryPullThroughCache creates a registry acting as a pull through cache -func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Namespace, driver driver.StorageDriver, config configuration.Proxy) (distribution.Namespace, error) { - remoteURL, err := url.Parse(config.RemoteURL) - if err != nil { - return nil, err - } - - v := storage.NewVacuum(ctx, driver) - s := scheduler.New(ctx, driver, "/scheduler-state.json") - s.OnBlobExpire(func(ref reference.Reference) error { - var r reference.Canonical - var ok bool - if r, ok = ref.(reference.Canonical); !ok { - return fmt.Errorf("unexpected reference type : %T", ref) - } - - repo, err := registry.Repository(ctx, r) - if err != nil { - return err - } - - blobs := repo.Blobs(ctx) - - // Clear the repository reference and descriptor caches - err = blobs.Delete(ctx, r.Digest()) - if err != nil { - return err - } - - err = v.RemoveBlob(r.Digest().String()) - if err != nil { - return err - } - - return nil - }) - - s.OnManifestExpire(func(ref reference.Reference) error { - var r reference.Canonical - var ok bool - if r, ok = ref.(reference.Canonical); !ok { - return fmt.Errorf("unexpected reference type : %T", ref) - } - - repo, err := registry.Repository(ctx, r) - if err != nil { - return err - } - - manifests, err := repo.Manifests(ctx) - if err != nil { - return err - } - err = manifests.Delete(ctx, r.Digest()) - if err != nil { - return err - } - return nil - }) - - err = s.Start() - if err != nil { - return nil, err - } - - cs, err := configureAuth(config.Username, config.Password) - if err != nil { - return nil, err - } - - return &proxyingRegistry{ - embedded: registry, - scheduler: s, - remoteURL: *remoteURL, - authChallenger: &remoteAuthChallenger{ - remoteURL: *remoteURL, - cm: auth.NewSimpleChallengeManager(), - cs: cs, - }, - }, nil -} - -func (pr *proxyingRegistry) Scope() distribution.Scope { - return distribution.GlobalScope -} - -func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { - return pr.embedded.Repositories(ctx, repos, last) -} - -func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named) (distribution.Repository, error) { - c := pr.authChallenger - - tr := transport.NewTransport(http.DefaultTransport, - auth.NewAuthorizer(c.challengeManager(), auth.NewTokenHandler(http.DefaultTransport, c.credentialStore(), name.Name(), "pull"))) - - localRepo, err := pr.embedded.Repository(ctx, name) - if err != nil { - return nil, err - } - localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification()) - if err != nil { - return nil, err - } - - remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL.String(), tr) - if err != nil { - return nil, err - } - - remoteManifests, err := remoteRepo.Manifests(ctx) - if err != nil { - return nil, err - } - - return &proxiedRepository{ - blobStore: &proxyBlobStore{ - localStore: localRepo.Blobs(ctx), - remoteStore: remoteRepo.Blobs(ctx), - scheduler: pr.scheduler, - repositoryName: name, - authChallenger: pr.authChallenger, - }, - manifests: &proxyManifestStore{ - repositoryName: name, - localManifests: localManifests, // Options? - remoteManifests: remoteManifests, - ctx: ctx, - scheduler: pr.scheduler, - authChallenger: pr.authChallenger, - }, - name: name, - tags: &proxyTagService{ - localTags: localRepo.Tags(ctx), - remoteTags: remoteRepo.Tags(ctx), - authChallenger: pr.authChallenger, - }, - }, nil -} - -func (pr *proxyingRegistry) Blobs() distribution.BlobEnumerator { - return pr.embedded.Blobs() -} - -func (pr *proxyingRegistry) BlobStatter() distribution.BlobStatter { - return pr.embedded.BlobStatter() -} - -// authChallenger encapsulates a request to the upstream to establish credential challenges -type authChallenger interface { - tryEstablishChallenges(context.Context) error - challengeManager() auth.ChallengeManager - credentialStore() auth.CredentialStore -} - -type remoteAuthChallenger struct { - remoteURL url.URL - sync.Mutex - cm auth.ChallengeManager - cs auth.CredentialStore -} - -func (r *remoteAuthChallenger) credentialStore() auth.CredentialStore { - return r.cs -} - -func (r *remoteAuthChallenger) challengeManager() auth.ChallengeManager { - return r.cm -} - -// tryEstablishChallenges will attempt to get a challenge type for the upstream if none currently exist -func (r *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error { - r.Lock() - defer r.Unlock() - - remoteURL := r.remoteURL - remoteURL.Path = "/v2/" - challenges, err := r.cm.GetChallenges(r.remoteURL) - if err != nil { - return err - } - - if len(challenges) > 0 { - return nil - } - - // establish challenge type with upstream - if err := ping(r.cm, remoteURL.String(), challengeHeader); err != nil { - return err - } - - context.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, r.cm) - return nil -} - -// proxiedRepository uses proxying blob and manifest services to serve content -// locally, or pulling it through from a remote and caching it locally if it doesn't -// already exist -type proxiedRepository struct { - blobStore distribution.BlobStore - manifests distribution.ManifestService - name reference.Named - tags distribution.TagService -} - -func (pr *proxiedRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - return pr.manifests, nil -} - -func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore { - return pr.blobStore -} - -func (pr *proxiedRepository) Named() reference.Named { - return pr.name -} - -func (pr *proxiedRepository) Tags(ctx context.Context) distribution.TagService { - return pr.tags -} diff --git a/docs/proxy/proxytagservice.go b/docs/proxy/proxytagservice.go deleted file mode 100644 index a8273030..00000000 --- a/docs/proxy/proxytagservice.go +++ /dev/null @@ -1,65 +0,0 @@ -package proxy - -import ( - "github.com/docker/distribution" - "github.com/docker/distribution/context" -) - -// proxyTagService supports local and remote lookup of tags. -type proxyTagService struct { - localTags distribution.TagService - remoteTags distribution.TagService - authChallenger authChallenger -} - -var _ distribution.TagService = proxyTagService{} - -// Get attempts to get the most recent digest for the tag by checking the remote -// tag service first and then caching it locally. If the remote is unavailable -// the local association is returned -func (pt proxyTagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - err := pt.authChallenger.tryEstablishChallenges(ctx) - if err == nil { - desc, err := pt.remoteTags.Get(ctx, tag) - if err == nil { - err := pt.localTags.Tag(ctx, tag, desc) - if err != nil { - return distribution.Descriptor{}, err - } - return desc, nil - } - } - - desc, err := pt.localTags.Get(ctx, tag) - if err != nil { - return distribution.Descriptor{}, err - } - return desc, nil -} - -func (pt proxyTagService) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - return distribution.ErrUnsupported -} - -func (pt proxyTagService) Untag(ctx context.Context, tag string) error { - err := pt.localTags.Untag(ctx, tag) - if err != nil { - return err - } - return nil -} - -func (pt proxyTagService) All(ctx context.Context) ([]string, error) { - err := pt.authChallenger.tryEstablishChallenges(ctx) - if err == nil { - tags, err := pt.remoteTags.All(ctx) - if err == nil { - return tags, err - } - } - return pt.localTags.All(ctx) -} - -func (pt proxyTagService) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { - return []string{}, distribution.ErrUnsupported -} diff --git a/docs/proxy/proxytagservice_test.go b/docs/proxy/proxytagservice_test.go deleted file mode 100644 index ce0fe78b..00000000 --- a/docs/proxy/proxytagservice_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package proxy - -import ( - "reflect" - "sort" - "sync" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" -) - -type mockTagStore struct { - mapping map[string]distribution.Descriptor - sync.Mutex -} - -var _ distribution.TagService = &mockTagStore{} - -func (m *mockTagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - m.Lock() - defer m.Unlock() - - if d, ok := m.mapping[tag]; ok { - return d, nil - } - return distribution.Descriptor{}, distribution.ErrTagUnknown{} -} - -func (m *mockTagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - m.Lock() - defer m.Unlock() - - m.mapping[tag] = desc - return nil -} - -func (m *mockTagStore) Untag(ctx context.Context, tag string) error { - m.Lock() - defer m.Unlock() - - if _, ok := m.mapping[tag]; ok { - delete(m.mapping, tag) - return nil - } - return distribution.ErrTagUnknown{} -} - -func (m *mockTagStore) All(ctx context.Context) ([]string, error) { - m.Lock() - defer m.Unlock() - - var tags []string - for tag := range m.mapping { - tags = append(tags, tag) - } - - return tags, nil -} - -func (m *mockTagStore) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { - panic("not implemented") -} - -func testProxyTagService(local, remote map[string]distribution.Descriptor) *proxyTagService { - if local == nil { - local = make(map[string]distribution.Descriptor) - } - if remote == nil { - remote = make(map[string]distribution.Descriptor) - } - return &proxyTagService{ - localTags: &mockTagStore{mapping: local}, - remoteTags: &mockTagStore{mapping: remote}, - authChallenger: &mockChallenger{}, - } -} - -func TestGet(t *testing.T) { - remoteDesc := distribution.Descriptor{Size: 42} - remoteTag := "remote" - proxyTags := testProxyTagService(map[string]distribution.Descriptor{remoteTag: remoteDesc}, nil) - - ctx := context.Background() - - // Get pre-loaded tag - d, err := proxyTags.Get(ctx, remoteTag) - if err != nil { - t.Fatal(err) - } - - if proxyTags.authChallenger.(*mockChallenger).count != 1 { - t.Fatalf("Expected 1 auth challenge call, got %#v", proxyTags.authChallenger) - } - - if !reflect.DeepEqual(d, remoteDesc) { - t.Fatal("unable to get put tag") - } - - local, err := proxyTags.localTags.Get(ctx, remoteTag) - if err != nil { - t.Fatal("remote tag not pulled into store") - } - - if !reflect.DeepEqual(local, remoteDesc) { - t.Fatalf("unexpected descriptor pulled through") - } - - // Manually overwrite remote tag - newRemoteDesc := distribution.Descriptor{Size: 43} - err = proxyTags.remoteTags.Tag(ctx, remoteTag, newRemoteDesc) - if err != nil { - t.Fatal(err) - } - - d, err = proxyTags.Get(ctx, remoteTag) - if err != nil { - t.Fatal(err) - } - - if proxyTags.authChallenger.(*mockChallenger).count != 2 { - t.Fatalf("Expected 2 auth challenge calls, got %#v", proxyTags.authChallenger) - } - - if !reflect.DeepEqual(d, newRemoteDesc) { - t.Fatal("unable to get put tag") - } - - _, err = proxyTags.localTags.Get(ctx, remoteTag) - if err != nil { - t.Fatal("remote tag not pulled into store") - } - - // untag, ensure it's removed locally, but present in remote - err = proxyTags.Untag(ctx, remoteTag) - if err != nil { - t.Fatal(err) - } - - _, err = proxyTags.localTags.Get(ctx, remoteTag) - if err == nil { - t.Fatalf("Expected error getting Untag'd tag") - } - - _, err = proxyTags.remoteTags.Get(ctx, remoteTag) - if err != nil { - t.Fatalf("remote tag should not be untagged with proxyTag.Untag") - } - - _, err = proxyTags.Get(ctx, remoteTag) - if err != nil { - t.Fatal("untagged tag should be pulled through") - } - - if proxyTags.authChallenger.(*mockChallenger).count != 3 { - t.Fatalf("Expected 3 auth challenge calls, got %#v", proxyTags.authChallenger) - } - - // Add another tag. Ensure both tags appear in 'All' - err = proxyTags.remoteTags.Tag(ctx, "funtag", distribution.Descriptor{Size: 42}) - if err != nil { - t.Fatal(err) - } - - all, err := proxyTags.All(ctx) - if err != nil { - t.Fatal(err) - } - - if len(all) != 2 { - t.Fatalf("Unexpected tag length returned from All() : %d ", len(all)) - } - - sort.Strings(all) - if all[0] != "funtag" && all[1] != "remote" { - t.Fatalf("Unexpected tags returned from All() : %v ", all) - } - - if proxyTags.authChallenger.(*mockChallenger).count != 4 { - t.Fatalf("Expected 4 auth challenge calls, got %#v", proxyTags.authChallenger) - } -} diff --git a/docs/proxy/scheduler/scheduler.go b/docs/proxy/scheduler/scheduler.go deleted file mode 100644 index 0c8a8534..00000000 --- a/docs/proxy/scheduler/scheduler.go +++ /dev/null @@ -1,258 +0,0 @@ -package scheduler - -import ( - "encoding/json" - "fmt" - "sync" - "time" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/driver" -) - -// onTTLExpiryFunc is called when a repository's TTL expires -type expiryFunc func(reference.Reference) error - -const ( - entryTypeBlob = iota - entryTypeManifest - indexSaveFrequency = 5 * time.Second -) - -// schedulerEntry represents an entry in the scheduler -// fields are exported for serialization -type schedulerEntry struct { - Key string `json:"Key"` - Expiry time.Time `json:"ExpiryData"` - EntryType int `json:"EntryType"` - - timer *time.Timer -} - -// New returns a new instance of the scheduler -func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpirationScheduler { - return &TTLExpirationScheduler{ - entries: make(map[string]*schedulerEntry), - driver: driver, - pathToStateFile: path, - ctx: ctx, - stopped: true, - doneChan: make(chan struct{}), - saveTimer: time.NewTicker(indexSaveFrequency), - } -} - -// TTLExpirationScheduler is a scheduler used to perform actions -// when TTLs expire -type TTLExpirationScheduler struct { - sync.Mutex - - entries map[string]*schedulerEntry - - driver driver.StorageDriver - ctx context.Context - pathToStateFile string - - stopped bool - - onBlobExpire expiryFunc - onManifestExpire expiryFunc - - indexDirty bool - saveTimer *time.Ticker - doneChan chan struct{} -} - -// OnBlobExpire is called when a scheduled blob's TTL expires -func (ttles *TTLExpirationScheduler) OnBlobExpire(f expiryFunc) { - ttles.Lock() - defer ttles.Unlock() - - ttles.onBlobExpire = f -} - -// OnManifestExpire is called when a scheduled manifest's TTL expires -func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) { - ttles.Lock() - defer ttles.Unlock() - - ttles.onManifestExpire = f -} - -// AddBlob schedules a blob cleanup after ttl expires -func (ttles *TTLExpirationScheduler) AddBlob(blobRef reference.Canonical, ttl time.Duration) error { - ttles.Lock() - defer ttles.Unlock() - - if ttles.stopped { - return fmt.Errorf("scheduler not started") - } - - ttles.add(blobRef, ttl, entryTypeBlob) - return nil -} - -// AddManifest schedules a manifest cleanup after ttl expires -func (ttles *TTLExpirationScheduler) AddManifest(manifestRef reference.Canonical, ttl time.Duration) error { - ttles.Lock() - defer ttles.Unlock() - - if ttles.stopped { - return fmt.Errorf("scheduler not started") - } - - ttles.add(manifestRef, ttl, entryTypeManifest) - return nil -} - -// Start starts the scheduler -func (ttles *TTLExpirationScheduler) Start() error { - ttles.Lock() - defer ttles.Unlock() - - err := ttles.readState() - if err != nil { - return err - } - - if !ttles.stopped { - return fmt.Errorf("Scheduler already started") - } - - context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...") - ttles.stopped = false - - // Start timer for each deserialized entry - for _, entry := range ttles.entries { - entry.timer = ttles.startTimer(entry, entry.Expiry.Sub(time.Now())) - } - - // Start a ticker to periodically save the entries index - - go func() { - for { - select { - case <-ttles.saveTimer.C: - if !ttles.indexDirty { - continue - } - - ttles.Lock() - err := ttles.writeState() - if err != nil { - context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) - } else { - ttles.indexDirty = false - } - ttles.Unlock() - - case <-ttles.doneChan: - return - } - } - }() - - return nil -} - -func (ttles *TTLExpirationScheduler) add(r reference.Reference, ttl time.Duration, eType int) { - entry := &schedulerEntry{ - Key: r.String(), - Expiry: time.Now().Add(ttl), - EntryType: eType, - } - context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) - if oldEntry, present := ttles.entries[entry.Key]; present && oldEntry.timer != nil { - oldEntry.timer.Stop() - } - ttles.entries[entry.Key] = entry - entry.timer = ttles.startTimer(entry, ttl) - ttles.indexDirty = true -} - -func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.Duration) *time.Timer { - return time.AfterFunc(ttl, func() { - ttles.Lock() - defer ttles.Unlock() - - var f expiryFunc - - switch entry.EntryType { - case entryTypeBlob: - f = ttles.onBlobExpire - case entryTypeManifest: - f = ttles.onManifestExpire - default: - f = func(reference.Reference) error { - return fmt.Errorf("scheduler entry type") - } - } - - ref, err := reference.Parse(entry.Key) - if err == nil { - if err := f(ref); err != nil { - context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err) - } - } else { - context.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err) - } - - delete(ttles.entries, entry.Key) - ttles.indexDirty = true - }) -} - -// Stop stops the scheduler. -func (ttles *TTLExpirationScheduler) Stop() { - ttles.Lock() - defer ttles.Unlock() - - if err := ttles.writeState(); err != nil { - context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) - } - - for _, entry := range ttles.entries { - entry.timer.Stop() - } - - close(ttles.doneChan) - ttles.saveTimer.Stop() - ttles.stopped = true -} - -func (ttles *TTLExpirationScheduler) writeState() error { - jsonBytes, err := json.Marshal(ttles.entries) - if err != nil { - return err - } - - err = ttles.driver.PutContent(ttles.ctx, ttles.pathToStateFile, jsonBytes) - if err != nil { - return err - } - - return nil -} - -func (ttles *TTLExpirationScheduler) readState() error { - if _, err := ttles.driver.Stat(ttles.ctx, ttles.pathToStateFile); err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return nil - default: - return err - } - } - - bytes, err := ttles.driver.GetContent(ttles.ctx, ttles.pathToStateFile) - if err != nil { - return err - } - - err = json.Unmarshal(bytes, &ttles.entries) - if err != nil { - return err - } - return nil -} diff --git a/docs/proxy/scheduler/scheduler_test.go b/docs/proxy/scheduler/scheduler_test.go deleted file mode 100644 index 556f5204..00000000 --- a/docs/proxy/scheduler/scheduler_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package scheduler - -import ( - "encoding/json" - "testing" - "time" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -func testRefs(t *testing.T) (reference.Reference, reference.Reference, reference.Reference) { - ref1, err := reference.Parse("testrepo@sha256:aaaaeaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - - ref2, err := reference.Parse("testrepo@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - - ref3, err := reference.Parse("testrepo@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - - return ref1, ref2, ref3 -} - -func TestSchedule(t *testing.T) { - ref1, ref2, ref3 := testRefs(t) - timeUnit := time.Millisecond - remainingRepos := map[string]bool{ - ref1.String(): true, - ref2.String(): true, - ref3.String(): true, - } - - s := New(context.Background(), inmemory.New(), "/ttl") - deleteFunc := func(repoName reference.Reference) error { - if len(remainingRepos) == 0 { - t.Fatalf("Incorrect expiry count") - } - _, ok := remainingRepos[repoName.String()] - if !ok { - t.Fatalf("Trying to remove nonexistent repo: %s", repoName) - } - t.Log("removing", repoName) - delete(remainingRepos, repoName.String()) - - return nil - } - s.onBlobExpire = deleteFunc - err := s.Start() - if err != nil { - t.Fatalf("Error starting ttlExpirationScheduler: %s", err) - } - - s.add(ref1, 3*timeUnit, entryTypeBlob) - s.add(ref2, 1*timeUnit, entryTypeBlob) - - func() { - s.add(ref3, 1*timeUnit, entryTypeBlob) - - }() - - // Ensure all repos are deleted - <-time.After(50 * timeUnit) - if len(remainingRepos) != 0 { - t.Fatalf("Repositories remaining: %#v", remainingRepos) - } -} - -func TestRestoreOld(t *testing.T) { - ref1, ref2, _ := testRefs(t) - remainingRepos := map[string]bool{ - ref1.String(): true, - ref2.String(): true, - } - - deleteFunc := func(r reference.Reference) error { - if r.String() == ref1.String() && len(remainingRepos) == 2 { - t.Errorf("ref1 should be removed first") - } - _, ok := remainingRepos[r.String()] - if !ok { - t.Fatalf("Trying to remove nonexistent repo: %s", r) - } - delete(remainingRepos, r.String()) - return nil - } - - timeUnit := time.Millisecond - serialized, err := json.Marshal(&map[string]schedulerEntry{ - ref1.String(): { - Expiry: time.Now().Add(1 * timeUnit), - Key: ref1.String(), - EntryType: 0, - }, - ref2.String(): { - Expiry: time.Now().Add(-3 * timeUnit), // TTL passed, should be removed first - Key: ref2.String(), - EntryType: 0, - }, - }) - if err != nil { - t.Fatalf("Error serializing test data: %s", err.Error()) - } - - ctx := context.Background() - pathToStatFile := "/ttl" - fs := inmemory.New() - err = fs.PutContent(ctx, pathToStatFile, serialized) - if err != nil { - t.Fatal("Unable to write serialized data to fs") - } - s := New(context.Background(), fs, "/ttl") - s.onBlobExpire = deleteFunc - err = s.Start() - if err != nil { - t.Fatalf("Error starting ttlExpirationScheduler: %s", err) - } - - <-time.After(50 * timeUnit) - if len(remainingRepos) != 0 { - t.Fatalf("Repositories remaining: %#v", remainingRepos) - } -} - -func TestStopRestore(t *testing.T) { - ref1, ref2, _ := testRefs(t) - - timeUnit := time.Millisecond - remainingRepos := map[string]bool{ - ref1.String(): true, - ref2.String(): true, - } - - deleteFunc := func(r reference.Reference) error { - delete(remainingRepos, r.String()) - return nil - } - - fs := inmemory.New() - pathToStateFile := "/ttl" - s := New(context.Background(), fs, pathToStateFile) - s.onBlobExpire = deleteFunc - - err := s.Start() - if err != nil { - t.Fatalf(err.Error()) - } - s.add(ref1, 300*timeUnit, entryTypeBlob) - s.add(ref2, 100*timeUnit, entryTypeBlob) - - // Start and stop before all operations complete - // state will be written to fs - s.Stop() - time.Sleep(10 * time.Millisecond) - - // v2 will restore state from fs - s2 := New(context.Background(), fs, pathToStateFile) - s2.onBlobExpire = deleteFunc - err = s2.Start() - if err != nil { - t.Fatalf("Error starting v2: %s", err.Error()) - } - - <-time.After(500 * timeUnit) - if len(remainingRepos) != 0 { - t.Fatalf("Repositories remaining: %#v", remainingRepos) - } - -} - -func TestDoubleStart(t *testing.T) { - s := New(context.Background(), inmemory.New(), "/ttl") - err := s.Start() - if err != nil { - t.Fatalf("Unable to start scheduler") - } - err = s.Start() - if err == nil { - t.Fatalf("Scheduler started twice without error") - } -} diff --git a/docs/recipes/apache.md b/docs/recipes/apache.md new file mode 100644 index 00000000..ac24113b --- /dev/null +++ b/docs/recipes/apache.md @@ -0,0 +1,215 @@ + + +# Authenticating proxy with apache + +## Use-case + +People already relying on an apache proxy to authenticate their users to other services might want to leverage it and have Registry communications tunneled through the same pipeline. + +Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO mechanism fronting their internal http portal. + +### Alternatives + +If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](../deploying.md#native-basic-auth). + +### Solution + +With the method presented here, you implement basic authentication for docker engines in a reverse proxy that sits in front of your registry. + +While we use a simple htpasswd file as an example, any other apache authentication backend should be fairly easy to implement once you are done with the example. + +We also implement push restriction (to a limited user group) for the sake of the example. Again, you should modify this to fit your mileage. + +### Gotchas + +While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. + +Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues. + +## Setting things up + +Read again [the requirements](index.md#requirements). + +Ready? + +Run the following script: + +``` +mkdir -p auth +mkdir -p data + +# This is the main apache configuration you will use +cat < auth/httpd.conf +LoadModule headers_module modules/mod_headers.so + +LoadModule authn_file_module modules/mod_authn_file.so +LoadModule authn_core_module modules/mod_authn_core.so +LoadModule authz_groupfile_module modules/mod_authz_groupfile.so +LoadModule authz_user_module modules/mod_authz_user.so +LoadModule authz_core_module modules/mod_authz_core.so +LoadModule auth_basic_module modules/mod_auth_basic.so +LoadModule access_compat_module modules/mod_access_compat.so + +LoadModule log_config_module modules/mod_log_config.so + +LoadModule ssl_module modules/mod_ssl.so + +LoadModule proxy_module modules/mod_proxy.so +LoadModule proxy_http_module modules/mod_proxy_http.so + +LoadModule unixd_module modules/mod_unixd.so + + + SSLRandomSeed startup builtin + SSLRandomSeed connect builtin + + + + User daemon + Group daemon + + +ServerAdmin you@example.com + +ErrorLog /proc/self/fd/2 + +LogLevel warn + + + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined + LogFormat "%h %l %u %t \"%r\" %>s %b" common + + + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio + + + CustomLog /proc/self/fd/1 common + + +ServerRoot "/usr/local/apache2" + +Listen 5043 + + + AllowOverride none + Require all denied + + + + + ServerName myregistrydomain.com + + SSLEngine on + SSLCertificateFile /usr/local/apache2/conf/domain.crt + SSLCertificateKeyFile /usr/local/apache2/conf/domain.key + + ## SSL settings recommandation from: https://raymii.org/s/tutorials/Strong_SSL_Security_On_Apache2.html + # Anti CRIME + SSLCompression off + + # POODLE and other stuff + SSLProtocol all -SSLv2 -SSLv3 -TLSv1 + + # Secure cypher suites + SSLCipherSuite EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH + SSLHonorCipherOrder on + + Header always set "Docker-Distribution-Api-Version" "registry/2.0" + Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0" + RequestHeader set X-Forwarded-Proto "https" + + ProxyRequests off + ProxyPreserveHost on + + # no proxy for /error/ (Apache HTTPd errors messages) + ProxyPass /error/ ! + + ProxyPass /v2 http://registry:5000/v2 + ProxyPassReverse /v2 http://registry:5000/v2 + + + Order deny,allow + Allow from all + AuthName "Registry Authentication" + AuthType basic + AuthUserFile "/usr/local/apache2/conf/httpd.htpasswd" + AuthGroupFile "/usr/local/apache2/conf/httpd.groups" + + # Read access to authentified users + + Require valid-user + + + # Write access to docker-deployer only + + Require group pusher + + + + + +EOF + +# Now, create a password file for "testuser" and "testpassword" +docker run --entrypoint htpasswd httpd:2.4 -Bbn testuser testpassword > auth/httpd.htpasswd +# Create another one for "testuserpush" and "testpasswordpush" +docker run --entrypoint htpasswd httpd:2.4 -Bbn testuserpush testpasswordpush >> auth/httpd.htpasswd + +# Create your group file +echo "pusher: testuserpush" > auth/httpd.groups + +# Copy over your certificate files +cp domain.crt auth +cp domain.key auth + +# Now create your compose file + +cat < docker-compose.yml +apache: + image: "httpd:2.4" + hostname: myregistrydomain.com + ports: + - 5043:5043 + links: + - registry:registry + volumes: + - `pwd`/auth:/usr/local/apache2/conf + +registry: + image: registry:2 + ports: + - 127.0.0.1:5000:5000 + volumes: + - `pwd`/data:/var/lib/registry + +EOF +``` + +## Starting and stopping + +Now, start your stack: + + docker-compose up -d + +Login with a "push" authorized user (using `testuserpush` and `testpasswordpush`), then tag and push your first image: + + docker login myregistrydomain.com:5043 + docker tag ubuntu myregistrydomain.com:5043/test + docker push myregistrydomain.com:5043/test + +Now, login with a "pull-only" user (using `testuser` and `testpassword`), then pull back the image: + + docker login myregistrydomain.com:5043 + docker pull myregistrydomain.com:5043/test + +Verify that the "pull-only" can NOT push: + + docker push myregistrydomain.com:5043/test diff --git a/docs/recipes/index.md b/docs/recipes/index.md new file mode 100644 index 00000000..b4dd6367 --- /dev/null +++ b/docs/recipes/index.md @@ -0,0 +1,37 @@ + + +# Recipes + +You will find here a list of "recipes", end-to-end scenarios for exotic or otherwise advanced use-cases. + +Most users are not expected to have a use for these. + +## Requirements + +You should have followed entirely the basic [deployment guide](../deploying.md). + +If you have not, please take the time to do so. + +At this point, it's assumed that: + + * you understand Docker security requirements, and how to configure your docker engines properly + * you have installed Docker Compose + * it's HIGHLY recommended that you get a certificate from a known CA instead of self-signed certificates + * inside the current directory, you have a X509 `domain.crt` and `domain.key`, for the CN `myregistrydomain.com` + * be sure you have stopped and removed any previously running registry (typically `docker stop registry && docker rm -v registry`) + +## The List + + * [using Apache as an authenticating proxy](apache.md) + * [using Nginx as an authenticating proxy](nginx.md) + * [running a Registry on OS X](osx-setup-guide.md) + * [mirror the Docker Hub](mirror.md) diff --git a/docs/recipes/menu.md b/docs/recipes/menu.md new file mode 100644 index 00000000..b79c1b30 --- /dev/null +++ b/docs/recipes/menu.md @@ -0,0 +1,21 @@ + + +# Recipes + +## The List + + * [using Apache as an authenticating proxy](apache.md) + * [using Nginx as an authenticating proxy](nginx.md) + * [running a Registry on OS X](osx-setup-guide.md) + * [mirror the Docker Hub](mirror.md) diff --git a/docs/recipes/mirror.md b/docs/recipes/mirror.md new file mode 100644 index 00000000..241e41bd --- /dev/null +++ b/docs/recipes/mirror.md @@ -0,0 +1,74 @@ + + +# Registry as a pull through cache + +## Use-case + +If you have multiple instances of Docker running in your environment (e.g., multiple physical or virtual machines, all running the Docker daemon), each time one of them requires an image that it doesn’t have it will go out to the internet and fetch it from the public Docker registry. By running a local registry mirror, you can keep most of the redundant image fetch traffic on your local network. + +### Alternatives + +Alternatively, if the set of images you are using is well delimited, you can simply pull them manually and push them to a simple, local, private registry. + +Furthermore, if your images are all built in-house, not using the Hub at all and relying entirely on your local registry is the simplest scenario. + +### Gotcha + +It's currently not possible to mirror another private registry. Only the central Hub can be mirrored. + +### Solution + +The Registry can be configured as a pull through cache. In this mode a Registry responds to all normal docker pull requests but stores all content locally. + +## How does it work? + +The first time you request an image from your local registry mirror, it pulls the image from the public Docker registry and stores it locally before handing it back to you. On subsequent requests, the local registry mirror is able to serve the image from its own storage. + +### What if the content changes on the Hub? + +When a pull is attempted with a tag, the Registry will check the remote to ensure if it has the latest version of the requested content. If it doesn't it will fetch the latest content and cache it. + +### What about my disk? + +In environments with high churn rates, stale data can build up in the cache. When running as a pull through cache the Registry will periodically remove old content to save disk space. Subsequent requests for removed content will cause a remote fetch and local re-caching. + +To ensure best performance and guarantee correctness the Registry cache should be configured to use the `filesystem` driver for storage. + +## Running a Registry as a pull through cache + +The easiest way to run a registry as a pull through cache is to run the official Registry image. + +Multiple registry caches can be deployed over the same back-end. A single registry cache will ensure that concurrent requests do not pull duplicate data, but this property will not hold true for a registry cache cluster. + +### Configuring the cache + +To configure a Registry to run as a pull through cache, the addition of a `proxy` section is required to the config file. + +In order to access private images on the Docker Hub, a username and password can be supplied. + + proxy: + remoteurl: https://registry-1.docker.io + username: [username] + password: [password] + +> :warn: if you specify a username and password, it's very important to understand that private resources that this user has access to on the Hub will be made available on your mirror. It's thus paramount that you secure your mirror by implementing authentication if you expect these resources to stay private! + +### Configuring the Docker daemon + +You will need to pass the `--registry-mirror` option to your Docker daemon on startup: + + docker --registry-mirror=https:// daemon + +For example, if your mirror is serving on `http://10.0.0.2:5000`, you would run: + + docker --registry-mirror=https://10.0.0.2:5000 daemon + +NOTE: Depending on your local host setup, you may be able to add the `--registry-mirror` option to the `DOCKER_OPTS` variable in `/etc/default/docker`. diff --git a/docs/recipes/nginx.md b/docs/recipes/nginx.md new file mode 100644 index 00000000..f4a67679 --- /dev/null +++ b/docs/recipes/nginx.md @@ -0,0 +1,190 @@ + + +# Authenticating proxy with nginx + + +## Use-case + +People already relying on a nginx proxy to authenticate their users to other services might want to leverage it and have Registry communications tunneled through the same pipeline. + +Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO mechanism fronting their internal http portal. + +### Alternatives + +If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](../deploying.md#native-basic-auth). + +### Solution + +With the method presented here, you implement basic authentication for docker engines in a reverse proxy that sits in front of your registry. + +While we use a simple htpasswd file as an example, any other nginx authentication backend should be fairly easy to implement once you are done with the example. + +We also implement push restriction (to a limited user group) for the sake of the example. Again, you should modify this to fit your mileage. + +### Gotchas + +While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. + +Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues. Make sure the extra complexity is required. + +For instance, Amazon's Elastic Load Balancer (ELB) in HTTPS mode already sets the following client header: + +``` +X-Real-IP +X-Forwarded-For +X-Forwarded-Proto +``` + +So if you have an nginx sitting behind it, should remove these lines from the example config below: + +``` +X-Real-IP $remote_addr; # pass on real client's IP +X-Forwarded-For $proxy_add_x_forwarded_for; +X-Forwarded-Proto $scheme; +``` + +Otherwise nginx will reset the ELB's values, and the requests will not be routed properly. For more information, see [#970](https://github.com/docker/distribution/issues/970). + +## Setting things up + +Read again [the requirements](index.md#requirements). + +Ready? + +-- + +Create the required directories + +``` +mkdir -p auth +mkdir -p data +``` + +Create the main nginx configuration you will use. + +``` + +cat < auth/nginx.conf +events { + worker_connections 1024; +} + +http { + + upstream docker-registry { + server registry:5000; + } + + ## Set a variable to help us decide if we need to add the + ## 'Docker-Distribution-Api-Version' header. + ## The registry always sets this header. + ## In the case of nginx performing auth, the header will be unset + ## since nginx is auth-ing before proxying. + map \$upstream_http_docker_distribution_api_version \$docker_distribution_api_version { + 'registry/2.0' ''; + default registry/2.0; + } + + server { + listen 443 ssl; + server_name myregistrydomain.com; + + # SSL + ssl_certificate /etc/nginx/conf.d/domain.crt; + ssl_certificate_key /etc/nginx/conf.d/domain.key; + + # Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html + ssl_protocols TLSv1.1 TLSv1.2; + ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; + ssl_prefer_server_ciphers on; + ssl_session_cache shared:SSL:10m; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if (\$http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*\$" ) { + return 404; + } + + # To add basic authentication to v2 use auth_basic setting. + auth_basic "Registry realm"; + auth_basic_user_file /etc/nginx/conf.d/nginx.htpasswd; + + ## If $docker_distribution_api_version is empty, the header will not be added. + ## See the map directive above where this variable is defined. + add_header 'Docker-Distribution-Api-Version' \$docker_distribution_api_version always; + + proxy_pass http://docker-registry; + proxy_set_header Host \$http_host; # required for docker client's sake + proxy_set_header X-Real-IP \$remote_addr; # pass on real client's IP + proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto \$scheme; + proxy_read_timeout 900; + } + } +} +EOF +``` + +Now create a password file for "testuser" and "testpassword" + +``` +docker run --rm --entrypoint htpasswd registry:2 -bn testuser testpassword > auth/nginx.htpasswd +``` + +Copy over your certificate files + +``` +cp domain.crt auth +cp domain.key auth +``` + +Now create your compose file + +``` +cat < docker-compose.yml +nginx: + image: "nginx:1.9" + ports: + - 5043:443 + links: + - registry:registry + volumes: + - ./auth:/etc/nginx/conf.d + - ./auth/nginx.conf:/etc/nginx/nginx.conf:ro + +registry: + image: registry:2 + ports: + - 127.0.0.1:5000:5000 + volumes: + - `pwd`./data:/var/lib/registry +EOF +``` + +## Starting and stopping + +Now, start your stack: + + docker-compose up -d + +Login with a "push" authorized user (using `testuser` and `testpassword`), then tag and push your first image: + + docker login -u=testuser -p=testpassword -e=root@example.ch myregistrydomain.com:5043 + docker tag ubuntu myregistrydomain.com:5043/test + docker push myregistrydomain.com:5043/test + docker pull myregistrydomain.com:5043/test diff --git a/docs/recipes/osx-setup-guide.md b/docs/recipes/osx-setup-guide.md new file mode 100644 index 00000000..d47d31c1 --- /dev/null +++ b/docs/recipes/osx-setup-guide.md @@ -0,0 +1,81 @@ + + +# OS X Setup Guide + +## Use-case + +This is useful if you intend to run a registry server natively on OS X. + +### Alternatives + +You can start a VM on OS X, and deploy your registry normally as a container using Docker inside that VM. + +The simplest road to get there is traditionally to use the [docker Toolbox](https://www.docker.com/toolbox), or [docker-machine](/machine/index.md), which usually relies on the [boot2docker](http://boot2docker.io/) iso inside a VirtualBox VM. + +### Solution + +Using the method described here, you install and compile your own from the git repository and run it as an OS X agent. + +### Gotchas + +Production services operation on OS X is out of scope of this document. Be sure you understand well these aspects before considering going to production with this. + +## Setup golang on your machine + +If you know, safely skip to the next section. + +If you don't, the TLDR is: + + bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer) + source ~/.gvm/scripts/gvm + gvm install go1.4.2 + gvm use go1.4.2 + +If you want to understand, you should read [How to Write Go Code](https://golang.org/doc/code.html). + +## Checkout the Docker Distribution source tree + + mkdir -p $GOPATH/src/github.com/docker + git clone https://github.com/docker/distribution.git $GOPATH/src/github.com/docker/distribution + cd $GOPATH/src/github.com/docker/distribution + +## Build the binary + + GOPATH=$(PWD)/Godeps/_workspace:$GOPATH make binaries + sudo cp bin/registry /usr/local/libexec/registry + +## Setup + +Copy the registry configuration file in place: + + mkdir /Users/Shared/Registry + cp docs/osx/config.yml /Users/Shared/Registry/config.yml + +## Running the Docker Registry under launchd + +Copy the Docker registry plist into place: + + plutil -lint docs/osx/com.docker.registry.plist + cp docs/osx/com.docker.registry.plist ~/Library/LaunchAgents/ + chmod 644 ~/Library/LaunchAgents/com.docker.registry.plist + +Start the Docker registry: + + launchctl load ~/Library/LaunchAgents/com.docker.registry.plist + +### Restarting the docker registry service + + launchctl stop com.docker.registry + launchctl start com.docker.registry + +### Unloading the docker registry service + + launchctl unload ~/Library/LaunchAgents/com.docker.registry.plist diff --git a/docs/recipes/osx/com.docker.registry.plist b/docs/recipes/osx/com.docker.registry.plist new file mode 100644 index 00000000..0982349f --- /dev/null +++ b/docs/recipes/osx/com.docker.registry.plist @@ -0,0 +1,42 @@ + + + + + Label + com.docker.registry + KeepAlive + + StandardErrorPath + /Users/Shared/Registry/registry.log + StandardOutPath + /Users/Shared/Registry/registry.log + Program + /usr/local/libexec/registry + ProgramArguments + + /usr/local/libexec/registry + /Users/Shared/Registry/config.yml + + Sockets + + http-listen-address + + SockServiceName + 5000 + SockType + dgram + SockFamily + IPv4 + + http-debug-address + + SockServiceName + 5001 + SockType + dgram + SockFamily + IPv4 + + + + diff --git a/docs/recipes/osx/config.yml b/docs/recipes/osx/config.yml new file mode 100644 index 00000000..63b8f713 --- /dev/null +++ b/docs/recipes/osx/config.yml @@ -0,0 +1,16 @@ +version: 0.1 +log: + level: info + fields: + service: registry + environment: macbook-air +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /Users/Shared/Registry +http: + addr: 0.0.0.0:5000 + secret: mytokensecret + debug: + addr: localhost:5001 diff --git a/docs/registry.go b/docs/registry.go deleted file mode 100644 index 559f724c..00000000 --- a/docs/registry.go +++ /dev/null @@ -1,345 +0,0 @@ -package registry - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net/http" - "os" - "time" - - "rsc.io/letsencrypt" - - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/formatters/logstash" - "github.com/bugsnag/bugsnag-go" - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/context" - "github.com/docker/distribution/health" - "github.com/docker/distribution/registry/handlers" - "github.com/docker/distribution/registry/listener" - "github.com/docker/distribution/uuid" - "github.com/docker/distribution/version" - gorhandlers "github.com/gorilla/handlers" - "github.com/spf13/cobra" - "github.com/yvasiyarov/gorelic" -) - -// ServeCmd is a cobra command for running the registry. -var ServeCmd = &cobra.Command{ - Use: "serve ", - Short: "`serve` stores and distributes Docker images", - Long: "`serve` stores and distributes Docker images.", - Run: func(cmd *cobra.Command, args []string) { - - // setup context - ctx := context.WithVersion(context.Background(), version.Version) - - config, err := resolveConfiguration(args) - if err != nil { - fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) - cmd.Usage() - os.Exit(1) - } - - if config.HTTP.Debug.Addr != "" { - go func(addr string) { - log.Infof("debug server listening %v", addr) - if err := http.ListenAndServe(addr, nil); err != nil { - log.Fatalf("error listening on debug interface: %v", err) - } - }(config.HTTP.Debug.Addr) - } - - registry, err := NewRegistry(ctx, config) - if err != nil { - log.Fatalln(err) - } - - if err = registry.ListenAndServe(); err != nil { - log.Fatalln(err) - } - }, -} - -// A Registry represents a complete instance of the registry. -// TODO(aaronl): It might make sense for Registry to become an interface. -type Registry struct { - config *configuration.Configuration - app *handlers.App - server *http.Server -} - -// NewRegistry creates a new registry from a context and configuration struct. -func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) { - var err error - ctx, err = configureLogging(ctx, config) - if err != nil { - return nil, fmt.Errorf("error configuring logger: %v", err) - } - - // inject a logger into the uuid library. warns us if there is a problem - // with uuid generation under low entropy. - uuid.Loggerf = context.GetLogger(ctx).Warnf - - app := handlers.NewApp(ctx, config) - // TODO(aaronl): The global scope of the health checks means NewRegistry - // can only be called once per process. - app.RegisterHealthChecks() - handler := configureReporting(app) - handler = alive("/", handler) - handler = health.Handler(handler) - handler = panicHandler(handler) - handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) - - server := &http.Server{ - Handler: handler, - } - - return &Registry{ - app: app, - config: config, - server: server, - }, nil -} - -// ListenAndServe runs the registry's HTTP server. -func (registry *Registry) ListenAndServe() error { - config := registry.config - - ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) - if err != nil { - return err - } - - if config.HTTP.TLS.Certificate != "" || config.HTTP.TLS.LetsEncrypt.CacheFile != "" { - tlsConf := &tls.Config{ - ClientAuth: tls.NoClientCert, - NextProtos: []string{"http/1.1"}, - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - }, - } - - if config.HTTP.TLS.LetsEncrypt.CacheFile != "" { - if config.HTTP.TLS.Certificate != "" { - return fmt.Errorf("cannot specify both certificate and Let's Encrypt") - } - var m letsencrypt.Manager - if err := m.CacheFile(config.HTTP.TLS.LetsEncrypt.CacheFile); err != nil { - return err - } - if !m.Registered() { - if err := m.Register(config.HTTP.TLS.LetsEncrypt.Email, nil); err != nil { - return err - } - } - tlsConf.GetCertificate = m.GetCertificate - } else { - tlsConf.Certificates = make([]tls.Certificate, 1) - tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) - if err != nil { - return err - } - } - - if len(config.HTTP.TLS.ClientCAs) != 0 { - pool := x509.NewCertPool() - - for _, ca := range config.HTTP.TLS.ClientCAs { - caPem, err := ioutil.ReadFile(ca) - if err != nil { - return err - } - - if ok := pool.AppendCertsFromPEM(caPem); !ok { - return fmt.Errorf("Could not add CA to pool") - } - } - - for _, subj := range pool.Subjects() { - context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) - } - - tlsConf.ClientAuth = tls.RequireAndVerifyClientCert - tlsConf.ClientCAs = pool - } - - ln = tls.NewListener(ln, tlsConf) - context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) - } else { - context.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) - } - - return registry.server.Serve(ln) -} - -func configureReporting(app *handlers.App) http.Handler { - var handler http.Handler = app - - if app.Config.Reporting.Bugsnag.APIKey != "" { - bugsnagConfig := bugsnag.Configuration{ - APIKey: app.Config.Reporting.Bugsnag.APIKey, - // TODO(brianbland): provide the registry version here - // AppVersion: "2.0", - } - if app.Config.Reporting.Bugsnag.ReleaseStage != "" { - bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage - } - if app.Config.Reporting.Bugsnag.Endpoint != "" { - bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint - } - bugsnag.Configure(bugsnagConfig) - - handler = bugsnag.Handler(handler) - } - - if app.Config.Reporting.NewRelic.LicenseKey != "" { - agent := gorelic.NewAgent() - agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey - if app.Config.Reporting.NewRelic.Name != "" { - agent.NewrelicName = app.Config.Reporting.NewRelic.Name - } - agent.CollectHTTPStat = true - agent.Verbose = app.Config.Reporting.NewRelic.Verbose - agent.Run() - - handler = agent.WrapHTTPHandler(handler) - } - - return handler -} - -// configureLogging prepares the context with a logger using the -// configuration. -func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { - if config.Log.Level == "" && config.Log.Formatter == "" { - // If no config for logging is set, fallback to deprecated "Loglevel". - log.SetLevel(logLevel(config.Loglevel)) - ctx = context.WithLogger(ctx, context.GetLogger(ctx)) - return ctx, nil - } - - log.SetLevel(logLevel(config.Log.Level)) - - formatter := config.Log.Formatter - if formatter == "" { - formatter = "text" // default formatter - } - - switch formatter { - case "json": - log.SetFormatter(&log.JSONFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - case "text": - log.SetFormatter(&log.TextFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - case "logstash": - log.SetFormatter(&logstash.LogstashFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - default: - // just let the library use default on empty string. - if config.Log.Formatter != "" { - return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter) - } - } - - if config.Log.Formatter != "" { - log.Debugf("using %q logging formatter", config.Log.Formatter) - } - - if len(config.Log.Fields) > 0 { - // build up the static fields, if present. - var fields []interface{} - for k := range config.Log.Fields { - fields = append(fields, k) - } - - ctx = context.WithValues(ctx, config.Log.Fields) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...)) - } - - return ctx, nil -} - -func logLevel(level configuration.Loglevel) log.Level { - l, err := log.ParseLevel(string(level)) - if err != nil { - l = log.InfoLevel - log.Warnf("error parsing level %q: %v, using %q ", level, err, l) - } - - return l -} - -// panicHandler add an HTTP handler to web app. The handler recover the happening -// panic. logrus.Panic transmits panic message to pre-config log hooks, which is -// defined in config.yml. -func panicHandler(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer func() { - if err := recover(); err != nil { - log.Panic(fmt.Sprintf("%v", err)) - } - }() - handler.ServeHTTP(w, r) - }) -} - -// alive simply wraps the handler with a route that always returns an http 200 -// response when the path is matched. If the path is not matched, the request -// is passed to the provided handler. There is no guarantee of anything but -// that the server is up. Wrap with other handlers (such as health.Handler) -// for greater affect. -func alive(path string, handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == path { - w.Header().Set("Cache-Control", "no-cache") - w.WriteHeader(http.StatusOK) - return - } - - handler.ServeHTTP(w, r) - }) -} - -func resolveConfiguration(args []string) (*configuration.Configuration, error) { - var configurationPath string - - if len(args) > 0 { - configurationPath = args[0] - } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { - configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") - } - - if configurationPath == "" { - return nil, fmt.Errorf("configuration path unspecified") - } - - fp, err := os.Open(configurationPath) - if err != nil { - return nil, err - } - - defer fp.Close() - - config, err := configuration.Parse(fp) - if err != nil { - return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) - } - - return config, nil -} diff --git a/docs/root.go b/docs/root.go deleted file mode 100644 index 5d3005c2..00000000 --- a/docs/root.go +++ /dev/null @@ -1,84 +0,0 @@ -package registry - -import ( - "fmt" - "os" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/driver/factory" - "github.com/docker/distribution/version" - "github.com/docker/libtrust" - "github.com/spf13/cobra" -) - -var showVersion bool - -func init() { - RootCmd.AddCommand(ServeCmd) - RootCmd.AddCommand(GCCmd) - GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs") - RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") -} - -// RootCmd is the main command for the 'registry' binary. -var RootCmd = &cobra.Command{ - Use: "registry", - Short: "`registry`", - Long: "`registry`", - Run: func(cmd *cobra.Command, args []string) { - if showVersion { - version.PrintVersion() - return - } - cmd.Usage() - }, -} - -var dryRun bool - -// GCCmd is the cobra command that corresponds to the garbage-collect subcommand -var GCCmd = &cobra.Command{ - Use: "garbage-collect ", - Short: "`garbage-collect` deletes layers not referenced by any manifests", - Long: "`garbage-collect` deletes layers not referenced by any manifests", - Run: func(cmd *cobra.Command, args []string) { - config, err := resolveConfiguration(args) - if err != nil { - fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) - cmd.Usage() - os.Exit(1) - } - - driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters()) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to construct %s driver: %v", config.Storage.Type(), err) - os.Exit(1) - } - - ctx := context.Background() - ctx, err = configureLogging(ctx, config) - if err != nil { - fmt.Fprintf(os.Stderr, "unable to configure logging with config: %s", err) - os.Exit(1) - } - - k, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - fmt.Fprint(os.Stderr, err) - os.Exit(1) - } - - registry, err := storage.NewRegistry(ctx, driver, storage.Schema1SigningKey(k)) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err) - os.Exit(1) - } - - err = storage.MarkAndSweep(ctx, driver, registry, dryRun) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err) - os.Exit(1) - } - }, -} diff --git a/docs/spec/api.md b/docs/spec/api.md new file mode 100644 index 00000000..c4517c0b --- /dev/null +++ b/docs/spec/api.md @@ -0,0 +1,5489 @@ + + +# Docker Registry HTTP API V2 + +## Introduction + +The _Docker Registry HTTP API_ is the protocol to facilitate distribution of +images to the docker engine. It interacts with instances of the docker +registry, which is a service to manage information about docker images and +enable their distribution. The specification covers the operation of version 2 +of this API, known as _Docker Registry HTTP API V2_. + +While the V1 registry protocol is usable, there are several problems with the +architecture that have led to this new version. The main driver of this +specification is a set of changes to the docker the image format, covered in +[docker/docker#8093](https://github.com/docker/docker/issues/8093). +The new, self-contained image manifest simplifies image definition and improves +security. This specification will build on that work, leveraging new properties +of the manifest format to improve performance, reduce bandwidth usage and +decrease the likelihood of backend corruption. + +For relevant details and history leading up to this specification, please see +the following issues: + +- [docker/docker#8093](https://github.com/docker/docker/issues/8093) +- [docker/docker#9015](https://github.com/docker/docker/issues/9015) +- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) + +### Scope + +This specification covers the URL layout and protocols of the interaction +between docker registry and docker core. This will affect the docker core +registry API and the rewrite of docker-registry. Docker registry +implementations may implement other API endpoints, but they are not covered by +this specification. + +This includes the following features: + +- Namespace-oriented URI Layout +- PUSH/PULL registry server for V2 image manifest format +- Resumable layer PUSH support +- V2 Client library implementation + +While authentication and authorization support will influence this +specification, details of the protocol will be left to a future specification. +Relevant header definitions and error codes are present to provide an +indication of what a client may encounter. + +#### Future + +There are features that have been discussed during the process of cutting this +specification. The following is an incomplete list: + +- Immutable image references +- Multiple architecture support +- Migration from v2compatibility representation + +These may represent features that are either out of the scope of this +specification, the purview of another specification or have been deferred to a +future version. + +### Use Cases + +For the most part, the use cases of the former registry API apply to the new +version. Differentiating use cases are covered below. + +#### Image Verification + +A docker engine instance would like to run verified image named +"library/ubuntu", with the tag "latest". The engine contacts the registry, +requesting the manifest for "library/ubuntu:latest". An untrusted registry +returns a manifest. Before proceeding to download the individual layers, the +engine verifies the manifest's signature, ensuring that the content was +produced from a trusted source and no tampering has occurred. After each layer +is downloaded, the engine verifies the digest of the layer, ensuring that the +content matches that specified by the manifest. + +#### Resumable Push + +Company X's build servers lose connectivity to docker registry before +completing an image layer transfer. After connectivity returns, the build +server attempts to re-upload the image. The registry notifies the build server +that the upload has already been partially attempted. The build server +responds by only sending the remaining data to complete the image file. + +#### Resumable Pull + +Company X is having more connectivity problems but this time in their +deployment datacenter. When downloading an image, the connection is +interrupted before completion. The client keeps the partial data and uses http +`Range` requests to avoid downloading repeated data. + +#### Layer Upload De-duplication + +Company Y's build system creates two identical docker layers from build +processes A and B. Build process A completes uploading the layer before B. +When process B attempts to upload the layer, the registry indicates that its +not necessary because the layer is already known. + +If process A and B upload the same layer at the same time, both operations +will proceed and the first to complete will be stored in the registry (Note: +we may modify this to prevent dogpile with some locking mechanism). + +### Changes + +The V2 specification has been written to work as a living document, specifying +only what is certain and leaving what is not specified open or to future +changes. Only non-conflicting additions should be made to the API and accepted +changes should avoid preventing future changes from happening. + +This section should be updated when changes are made to the specification, +indicating what is different. Optionally, we may start marking parts of the +specification to correspond with the versions enumerated here. + +Each set of changes is given a letter corresponding to a set of modifications +that were applied to the baseline specification. These are merely for +reference and shouldn't be used outside the specification other than to +identify a set of modifications. + +
+
l
+
+
    +
  • Document TOOMANYREQUESTS error code.
  • +
+
+ +
k
+
+
    +
  • Document use of Accept and Content-Type headers in manifests endpoint.
  • +
+
+ +
j
+
+
    +
  • Add ability to mount blobs across repositories.
  • +
+
+ +
i
+
+
    +
  • Clarified expected behavior response to manifest HEAD request.
  • +
+
+ +
h
+
+
    +
  • All mention of tarsum removed.
  • +
+
+ +
g
+
+
    +
  • Clarify behavior of pagination behavior with unspecified parameters.
  • +
+
+ +
f
+
+
    +
  • Specify the delete API for layers and manifests.
  • +
+
+ +
e
+
+
    +
  • Added support for listing registry contents.
  • +
  • Added pagination to tags API.
  • +
  • Added common approach to support pagination.
  • +
+
+ +
d
+
+
    +
  • Allow repository name components to be one character.
  • +
  • Clarified that single component names are allowed.
  • +
+
+ +
c
+
+
    +
  • Added section covering digest format.
  • +
  • Added more clarification that manifest cannot be deleted by tag.
  • +
+
+ +
b
+
+
    +
  • Added capability of doing streaming upload to PATCH blob upload.
  • +
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • +
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • +
+
+ +
a
+
+
    +
  • Added support for immutable manifest references in manifest endpoints.
  • +
  • Deleting a manifest by tag has been deprecated.
  • +
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • +
  • Added error code for unsupported operations.
  • +
+
+
+ +## Overview + +This section covers client flows and details of the API endpoints. The URI +layout of the new API is structured to support a rich authentication and +authorization model by leveraging namespaces. All endpoints will be prefixed +by the API version and the repository name: + + /v2// + +For example, an API endpoint that will work with the `library/ubuntu` +repository, the URI prefix will be: + + /v2/library/ubuntu/ + +This scheme provides rich access control over various operations and methods +using the URI prefix and http methods that can be controlled in variety of +ways. + +Classically, repository names have always been two path components where each +path component is less than 30 characters. The V2 registry API does not +enforce this. The rules for a repository name are as follows: + +1. A repository name is broken up into _path components_. A component of a + repository name must be at least one lowercase, alpha-numeric characters, + optionally separated by periods, dashes or underscores. More strictly, it + must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. +2. If a repository name has two or more path components, they must be + separated by a forward slash ("/"). +3. The total length of a repository name, including slashes, must be less the + 256 characters. + +These name requirements _only_ apply to the registry API and should accept a +superset of what is supported by other docker ecosystem components. + +All endpoints should support aggressive http caching, compression and range +headers, where appropriate. The new API attempts to leverage HTTP semantics +where possible but may break from standards to implement targeted features. + +For detail on individual endpoints, please see the [_Detail_](#detail) +section. + +### Errors + +Actionable failure conditions, covered in detail in their relevant sections, +are reported as part of 4xx responses, in a json response body. One or more +errors will be returned in the following format: + + { + "errors:" [{ + "code": , + "message": , + "detail": + }, + ... + ] + } + +The `code` field will be a unique identifier, all caps with underscores by +convention. The `message` field will be a human readable string. The optional +`detail` field may contain arbitrary json data providing information the +client can use to resolve the issue. + +While the client can take action on certain error codes, the registry may add +new error codes over time. All client implementations should treat unknown +error codes as `UNKNOWN`, allowing future error codes to be added without +breaking API compatibility. For the purposes of the specification error codes +will only be added and never removed. + +For a complete account of all error codes, please see the [_Errors_](#errors-2) +section. + +### API Version Check + +A minimal endpoint, mounted at `/v2/` will provide version support information +based on its response statuses. The request format is as follows: + + GET /v2/ + +If a `200 OK` response is returned, the registry implements the V2(.1) +registry API and the client may proceed safely with other V2 operations. +Optionally, the response may contain information about the supported paths in +the response body. The client should be prepared to ignore this data. + +If a `401 Unauthorized` response is returned, the client should take action +based on the contents of the "WWW-Authenticate" header and try the endpoint +again. Depending on access control setup, the client may still have to +authenticate against different resources, even if this check succeeds. + +If `404 Not Found` response status, or other unexpected status, is returned, +the client should proceed with the assumption that the registry does not +implement V2 of the API. + +When a `200 OK` or `401 Unauthorized` response is returned, the +"Docker-Distribution-API-Version" header should be set to "registry/2.0". +Clients may require this header value to determine if the endpoint serves this +API. When this header is omitted, clients may fallback to an older API version. + +### Content Digests + +This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). +The core of this design is the concept of a content addressable identifier. It +uniquely identifies content by taking a collision-resistant hash of the bytes. +Such an identifier can be independently calculated and verified by selection +of a common _algorithm_. If such an identifier can be communicated in a secure +manner, one can retrieve the content from an insecure source, calculate it +independently and be certain that the correct content was obtained. Put simply, +the identifier is a property of the content. + +To disambiguate from other concepts, we call this identifier a _digest_. A +_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ +portion. The _algorithm_ identifies the methodology used to calculate the +digest. The _hex_ portion is the hex-encoded result of the hash. + +We define a _digest_ string to match the following grammar: +``` +digest := algorithm ":" hex +algorithm := /[A-Fa-f0-9_+.-]+/ +hex := /[A-Fa-f0-9]+/ +``` + +Some examples of _digests_ include the following: + +digest | description | +----------------------------------------------------------------------------------|------------------------------------------------ +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | + +While the _algorithm_ does allow one to implement a wide variety of +algorithms, compliant implementations should use sha256. Heavy processing of +input before calculating a hash is discouraged to avoid degrading the +uniqueness of the _digest_ but some canonicalization may be performed to +ensure consistent identifiers. + +Let's use a simple example in pseudo-code to demonstrate a digest calculation: +``` +let C = 'a small string' +let B = sha256(C) +let D = 'sha256:' + EncodeHex(B) +let ID(C) = D +``` + +Above, we have bytestring `C` passed into a function, `SHA256`, that returns a +bytestring `B`, which is the hash of `C`. `D` gets the algorithm concatenated +with the hex encoding of `B`. We then define the identifier of `C` to `ID(C)` +as equal to `D`. A digest can be verified by independently calculating `D` and +comparing it with identifier `ID(C)`. + +#### Digest Header + +To provide verification of http content, any response may include a +`Docker-Content-Digest` header. This will include the digest of the target +entity returned in the response. For blobs, this is the entire blob content. For +manifests, this is the manifest body without the signature content, also known +as the JWS payload. Note that the commonly used canonicalization for digest +calculation may be dependent on the mediatype of the content, such as with +manifests. + +The client may choose to ignore the header or may verify it to ensure content +integrity and transport security. This is most important when fetching by a +digest. To ensure security, the content should be verified against the digest +used to fetch the content. At times, the returned digest may differ from that +used to initiate a request. Such digests are considered to be from different +_domains_, meaning they have different values for _algorithm_. In such a case, +the client may choose to verify the digests in both domains or ignore the +server's digest. To maintain security, the client _must_ always verify the +content against the _digest_ used to fetch the content. + +> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use +> the same digest used to fetch the content to verify it. The header +> `Docker-Content-Digest` should not be trusted over the "local" digest. + +### Pulling An Image + +An "image" is a combination of a JSON manifest and individual layer files. The +process of pulling an image centers around retrieving these two components. + +The first step in pulling an image is to retrieve the manifest. For reference, +the relevant manifest fields for the registry are the following: + + field | description | +----------|------------------------------------------------| +name | The name of the image. | +tag | The tag for this version of the image. | +fsLayers | A list of layer descriptors (including digest) | +signature | A JWS used to verify the manifest content | + +For more information about the manifest format, please see +[docker/docker#8093](https://github.com/docker/docker/issues/8093). + +When the manifest is in hand, the client must verify the signature to ensure +the names and layers are valid. Once confirmed, the client will then use the +digests to download the individual layers. Layers are stored in as blobs in +the V2 registry API, keyed by their digest. + +#### Pulling an Image Manifest + +The image manifest can be fetched with the following url: + +``` +GET /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +The client should include an Accept header indicating which manifest content +types it supports. For more details on the manifest formats and their content +types, see [manifest-v2-1.md](manifest-v2-1.md) and +[manifest-v2-2.md](manifest-v2-2.md). In a successful response, the Content-Type +header will indicate which manifest type is being returned. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful, the image +manifest will be returned, with the following format (see +[docker/docker#8093](https://github.com/docker/docker/issues/8093) for details): + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": + } + +The client should verify the returned manifest signature for authenticity +before fetching layers. + +##### Existing Manifests + +The image manifest can be checked for existence with the following url: + +``` +HEAD /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful the response will +be as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + + +#### Pulling a Layer + +Layers are stored in the blob portion of the registry, keyed by digest. +Pulling a layer is carried out by a standard http request. The URL is as +follows: + + GET /v2//blobs/ + +Access to a layer will be gated by the `name` of the repository but is +identified uniquely in the registry by `digest`. + +This endpoint may issue a 307 (302 for /blobs/uploads/ +``` + +The parameters of this request are the image namespace under which the layer +will be linked. Responses to this request are covered below. + +##### Existing Layers + +The existence of a layer can be checked via a `HEAD` request to the blob store +API. The request should be formatted as follows: + +``` +HEAD /v2//blobs/ +``` + +If the layer with the digest specified in `digest` is available, a 200 OK +response will be received, with no actual body content (this is according to +http specification). The response will look as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + +When this response is received, the client can assume that the layer is +already available in the registry under the given name and should take no +further action to upload the layer. Note that the binary digests may differ +for the existing registry layer, but the digests will be guaranteed to match. + +##### Uploading the Layer + +If the POST request is successful, a `202 Accepted` response will be returned +with the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The rest of the upload process can be carried out with the returned url, +called the "Upload URL" from the `Location` header. All responses to the +upload url, whether sending data or getting status, will be in this format. +Though the URI format (`/v2//blobs/uploads/`) for the `Location` +header is specified, clients should treat it as an opaque url and should never +try to assemble it. While the `uuid` parameter may be an actual UUID, this +proposal imposes no constraints on the format and clients should never impose +any. + +If clients need to correlate local upload state with remote upload state, the +contents of the `Docker-Upload-UUID` header should be used. Such an id can be +used to key the last used location header when implementing resumable uploads. + +##### Upload Progress + +The progress and chunk coordination of the upload process will be coordinated +through the `Range` header. While this is a non-standard use of the `Range` +header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. +For an upload that just started, for an example with a 1000 byte layer file, +the `Range` header would be as follows: + +``` +Range: bytes=0-0 +``` + +To get the status of an upload, issue a GET request to the upload URL: + +``` +GET /v2//blobs/uploads/ +Host: +``` + +The response will be similar to the above, except will return 204 status: + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: bytes=0- +Docker-Upload-UUID: +``` + +Note that the HTTP `Range` header byte ranges are inclusive and that will be +honored, even in non-standard use cases. + +##### Monolithic Upload + +A monolithic upload is simply a chunked upload with a single chunk and may be +favored by clients that would like to avoided the complexity of chunking. To +carry out a "monolithic" upload, one can simply put the entire content blob to +the provided URL: + +``` +PUT /v2//blobs/uploads/?digest= +Content-Length: +Content-Type: application/octet-stream + + +``` + +The "digest" parameter must be included with the PUT request. Please see the +[_Completed Upload_](#completed-upload) section for details on the parameters +and expected responses. + +##### Chunked Upload + +To carry out an upload of a chunk, the client can specify a range header and +only include that part of the layer file: + +``` +PATCH /v2//blobs/uploads/ +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +There is no enforcement on layer chunk splits other than that the server must +receive them in order. The server may enforce a minimum chunk size. If the +server cannot accept the chunk, a `416 Requested Range Not Satisfiable` +response will be returned and will include a `Range` header indicating the +current status: + +``` +416 Requested Range Not Satisfiable +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +If this response is received, the client should resume from the "last valid +range" and upload the subsequent chunk. A 416 will be returned under the +following conditions: + +- Invalid Content-Range header format +- Out of order chunk: the range of the next chunk must start immediately after + the "last valid range" from the previous response. + +When a chunk is accepted as part of the upload, a `202 Accepted` response will +be returned, including a `Range` header with the current upload status: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +##### Completed Upload + +For an upload to be considered complete, the client must submit a `PUT` +request on the upload endpoint with a digest parameter. If it is not provided, +the upload will not be considered complete. The format for the final chunk +will be as follows: + +``` +PUT /v2//blob/uploads/?digest= +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +Optionally, if all chunks have already been uploaded, a `PUT` request with a +`digest` parameter and zero-length body may be sent to complete and validated +the upload. Multiple "digest" parameters may be provided with different +digests. The server may verify none or all of them but _must_ notify the +client if the content is rejected. + +When the last chunk is received and the layer has been validated, the client +will receive a `201 Created` response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +###### Digest Parameter + +The "digest" parameter is designed as an opaque parameter to support +verification of a successful transfer. For example, an HTTP URI parameter +might be as follows: + +``` +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b +``` + +Given this parameter, the registry will verify that the provided content does +match this digest. + +##### Canceling an Upload + +An upload can be cancelled by issuing a DELETE request to the upload endpoint. +The format will be as follows: + +``` +DELETE /v2//blobs/uploads/ +``` + +After this request is issued, the upload uuid will no longer be valid and the +registry server will dump all intermediate data. While uploads will time out +if not completed, clients should issue this request if they encounter a fatal +error but still have the ability to issue an http request. + +##### Cross Repository Blob Mount + +A blob may be mounted from another repository that the client has read access +to, removing the need to upload a blob already known to the registry. To issue +a blob mount instead of an upload, a POST request should be issued in the +following format: + +``` +POST /v2//blobs/uploads/?mount=&from= +Content-Length: 0 +``` + +If the blob is successfully mounted, the client will receive a `201 Created` +response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +If a mount fails due to invalid repository or digest arguments, the registry +will fall back to the standard upload behavior and return a `202 Accepted` with +the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +This behavior is consistent with older versions of the registry, which do not +recognize the repository mount query parameters. + +Note: a client may issue a HEAD request to check existence of a blob in a source +repository to distinguish between the registry not supporting blob mounts and +the blob not existing in the expected repository. + +##### Errors + +If an 502, 503 or 504 error is received, the client should assume that the +download can proceed due to a temporary condition, honoring the appropriate +retry mechanism. Other 5xx errors should be treated as terminal. + +If there is a problem with the upload, a 4xx error will be returned indicating +the problem. After receiving a 4xx response (except 416, as called out above), +the upload will be considered failed and the client should take appropriate +action. + +Note that the upload url will not be available forever. If the upload uuid is +unknown to the registry, a `404 Not Found` response will be returned and the +client must restart the upload process. + +### Deleting a Layer + +A layer may be deleted from the registry via its `name` and `digest`. A +delete may be issued with the following request format: + + DELETE /v2//blobs/ + +If the blob exists and has been successfully deleted, the following response +will be issued: + + 202 Accepted + Content-Length: None + +If the blob had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +If a layer is deleted which is referenced by a manifest in the registry, +then the complete images will not be resolvable. + +#### Pushing an Image Manifest + +Once all of the layers for an image are uploaded, the client can upload the +image manifest. An image can be pushed using the following request format: + + PUT /v2//manifests/ + Content-Type: + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": , + ... + } + +The `name` and `reference` fields of the response body must match those +specified in the URL. The `reference` field may be a "tag" or a "digest". The +content type should match the type of the manifest being uploaded, as specified +in [manifest-v2-1.md](manifest-v2-1.md) and [manifest-v2-2.md](manifest-v2-2.md). + +If there is a problem with pushing the manifest, a relevant 4xx response will +be returned with a JSON error message. Please see the +[_PUT Manifest_](#put-manifest) section for details on possible error codes that +may be returned. + +If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are +returned. The `detail` field of the error response will have a `digest` field +identifying the missing blob. An error is returned for each unknown blob. The +response format is as follows: + + { + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": + } + }, + ... + ] + } + +### Listing Repositories + +Images are stored in collections, known as a _repository_, which is keyed by a +`name`, as seen throughout the API specification. A registry instance may +contain several repositories. The list of available repositories is made +available through the _catalog_. + +The catalog for a given registry can be retrieved with the following request: + +``` +GET /v2/_catalog +``` + +The response will be in the following format: + +``` +200 OK +Content-Type: application/json + +{ + "repositories": [ + , + ... + ] +} +``` + +Note that the contents of the response are specific to the registry +implementation. Some registries may opt to provide a full catalog output, +limit it based on the user's access level or omit upstream results, if +providing mirroring functionality. Subsequently, the presence of a repository +in the catalog listing only means that the registry *may* provide access to +the repository at the time of the request. Conversely, a missing entry does +*not* mean that the registry does not have the repository. More succinctly, +the presence of a repository only guarantees that it is there but not that it +is _not_ there. + +For registries with a large number of repositories, this response may be quite +large. If such a response is expected, one should use pagination. A registry +may also limit the amount of responses returned even if pagination was not +explicitly requested. In this case the `Link` header will be returned along +with the results, and subsequent results can be obtained by following the link +as if pagination had been initially requested. + +For details of the `Link` header, please see the [_Pagination_](#pagination) +section. + +#### Pagination + +Paginated catalog results can be retrieved by adding an `n` parameter to the +request URL, declaring that the response should be limited to `n` results. +Starting a paginated flow begins as follows: + +``` +GET /v2/_catalog?n= +``` + +The above specifies that a catalog response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "repositories": [ + , + ... + ] +} +``` + +The above includes the _first_ `n` entries from the result set. To get the +_next_ `n` entries, one can create a URL where the argument `last` has the +value from `repositories[len(repositories)-1]`. If there are indeed more +results, the URL for the next block is encoded in an +[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" +relation. The presence of the `Link` header communicates to the client that +the entire result set has not been returned and another request must be +issued. If the header is not present, the client can assume that all results +have been received. + +> __NOTE:__ In the request template above, note that the brackets +> are required. For example, if the url is +> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would +> be `; rel="next"`. Please see +> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. + +Compliant client implementations should always use the `Link` header +value when proceeding through results linearly. The client may construct URLs +to skip forward in the catalog. + +To get the next result set, a client would issue the request as follows, using +the URL encoded in the described `Link` header: + +``` +GET /v2/_catalog?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set. + +The catalog result set is represented abstractly as a lexically sorted list, +where the position in that list can be specified by the query term `last`. The +entries in the response start _after_ the term specified by `last`, up to `n` +entries. + +The behavior of `last` is quite simple when demonstrated with an example. Let +us say the registry has the following repositories: + +``` +a +b +c +d +``` + +If the value of `n` is 2, _a_ and _b_ will be returned on the first response. +The `Link` header returned on the response will have `n` set to 2 and last set +to _b_: + +``` +Link: <?n=2&last=b>; rel="next" +``` + +The client can then issue the request with the above value from the `Link` +header, receiving the values _c_ and _d_. Note that `n` may change on the second +to last response or be fully omitted, depending on the server implementation. + +### Listing Image Tags + +It may be necessary to list all of the tags under a given repository. The tags +for an image repository can be retrieved with the following request: + + GET /v2//tags/list + +The response will be in the following format: + + 200 OK + Content-Type: application/json + + { + "name": , + "tags": [ + , + ... + ] + } + +For repositories with a large number of tags, this response may be quite +large. If such a response is expected, one should use the pagination. + +#### Pagination + +Paginated tag results can be retrieved by adding the appropriate parameters to +the request URL described above. The behavior of tag pagination is identical +to that specified for catalog pagination. We cover a simple flow to highlight +any differences. + +Starting a paginated flow may begin as follows: + +``` +GET /v2//tags/list?n= +``` + +The above specifies that a tags response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +To get the next result set, a client would issue the request as follows, using +the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` +header: + +``` +GET /v2//tags/list?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set in the response. The behavior of the `last` parameter, the provided +response result, lexical ordering and encoding of the `Link` header are +identical to that of catalog pagination. + +### Deleting an Image + +An image may be deleted from the registry via its `name` and `reference`. A +delete may be issued with the following request format: + + DELETE /v2//manifests/ + +For deletes, `reference` *must* be a digest or the delete will fail. If the +image exists and has been successfully deleted, the following response will be +issued: + + 202 Accepted + Content-Length: None + +If the image had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +> **Note** When deleting a manifest from a registry version 2.3 or later, the +> following header must be used when `HEAD` or `GET`-ing the manifest to obtain +> the correct digest to delete: + + Accept: application/vnd.docker.distribution.manifest.v2+json + +> for more details, see: [compatibility.md](../compatibility.md#content-addressable-storage-cas) + +## Detail + +> **Note**: This section is still under construction. For the purposes of +> implementation, if any details below differ from the described request flows +> above, the section below should be corrected. When they match, this note +> should be removed. + +The behavior of the endpoints are covered in detail in this section, organized +by route and entity. All aspects of the request and responses are covered, +including headers, parameters and body formats. Examples of requests and their +corresponding responses, with success and failure, are enumerated. + +> **Note**: The sections on endpoint detail are arranged with an example +> request, a description of the request, followed by information about that +> request. + +A list of methods and URIs are covered in the table below: + +|Method|Path|Entity|Description| +|------|----|------|-----------| +| GET | `/v2/` | Base | Check that the endpoint implements Docker Registry API V2. | +| GET | `/v2//tags/list` | Tags | Fetch the tags under the repository identified by `name`. | +| GET | `/v2//manifests/` | Manifest | Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | +| PUT | `/v2//manifests/` | Manifest | Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | +| DELETE | `/v2//manifests/` | Manifest | Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. | +| GET | `/v2//blobs/` | Blob | Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | +| DELETE | `/v2//blobs/` | Blob | Delete the blob identified by `name` and `digest` | +| POST | `/v2//blobs/uploads/` | Initiate Blob Upload | Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. | +| GET | `/v2//blobs/uploads/` | Blob Upload | Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. | +| PATCH | `/v2//blobs/uploads/` | Blob Upload | Upload a chunk of data for the specified upload. | +| PUT | `/v2//blobs/uploads/` | Blob Upload | Complete the upload specified by `uuid`, optionally appending the body as the final chunk. | +| DELETE | `/v2//blobs/uploads/` | Blob Upload | Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. | +| GET | `/v2/_catalog` | Catalog | Retrieve a sorted, json list of repositories available in the registry. | + + +The detail for each endpoint is covered in the following sections. + +### Errors + +The error codes encountered via the API are enumerated in the following table: + +|Code|Message|Description| +|----|-------|-----------| + `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. + `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. + `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. + `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. + `MANIFEST_BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a manifest blob is unknown to the registry. + `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. + `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. + `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. + `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. + `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. + `SIZE_INVALID` | provided length did not match content length | When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned. + `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. + `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. + `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. + `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. + + + +### Base + +Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication. + + + +#### GET Base + +Check that the endpoint implements Docker Registry API V2. + + + +``` +GET /v2/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| + + + + +###### On Success: OK + +``` +200 OK +``` + +The API implements V2 protocol and is accessible. + + + + +###### On Failure: Not Found + +``` +404 Not Found +``` + +The registry does not implement the V2 API. + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + + +### Tags + +Retrieve information about tags. + + + +#### GET Tags + +Fetch the tags under the repository identified by `name`. + + +##### Tags + +``` +GET /v2//tags/list +Host: +Authorization: +``` + +Return all tags for the repository + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +A list of tags for the named repository. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +##### Tags Paginated + +``` +GET /v2//tags/list?n=&last= +``` + +Return a portion of the tags for the specified repository. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`name`|path|Name of the target repository.| +|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| +|`last`|query|Result set will include values lexically after last.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Link: <?n=&last=>; rel="next" +Content-Type: application/json; charset=utf-8 + +{ + "name": , + "tags": [ + , + ... + ], +} +``` + +A list of tags for the named repository. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| +|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| + + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + + +### Manifest + +Create, update, delete and retrieve manifests. + + + +#### GET Manifest + +Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. + + + +``` +GET /v2//manifests/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`reference`|path|Tag or digest of the target manifest.| + + + + +###### On Success: OK + +``` +200 OK +Docker-Content-Digest: +Content-Type: + +{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +} +``` + +The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The name or reference was invalid. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + +#### PUT Manifest + +Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. + + + +``` +PUT /v2//manifests/ +Host: +Authorization: +Content-Type: + +{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +} +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`reference`|path|Tag or digest of the target manifest.| + + + + +###### On Success: Created + +``` +201 Created +Location: +Content-Length: 0 +Docker-Content-Digest: +``` + +The manifest has been accepted by the registry and is stored under the specified `name` and `tag`. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The canonical location url of the uploaded manifest.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Invalid Manifest + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | +| `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. | +| `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +###### On Failure: Missing Layer(s) + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": "" + } + }, + ... + ] +} +``` + +One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + + +#### DELETE Manifest + +Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. + + + +``` +DELETE /v2//manifests/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`reference`|path|Tag or digest of the target manifest.| + + + + +###### On Success: Accepted + +``` +202 Accepted +``` + + + + + + +###### On Failure: Invalid Name or Reference + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The specified `name` or `reference` were invalid and the delete was unable to proceed. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +###### On Failure: Unknown Manifest + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + + + +### Blob + +Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest. + + + +#### GET Blob + +Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. + + +##### Fetch Blob + +``` +GET /v2//blobs/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Docker-Content-Digest: +Content-Type: application/octet-stream + + +``` + +The blob identified by `digest` is available. The blob content will be present in the body of the request. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The length of the requested blob content.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + +###### On Success: Temporary Redirect + +``` +307 Temporary Redirect +Location: +Docker-Content-Digest: +``` + +The blob identified by `digest` is available at the provided location. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The location where the layer should be accessible.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The blob, identified by `name` and `digest`, is unknown to the registry. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +##### Fetch Blob Part + +``` +GET /v2//blobs/ +Host: +Authorization: +Range: bytes=- +``` + +This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Range`|header|HTTP Range header specifying blob chunk.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: Partial Content + +``` +206 Partial Content +Content-Length: +Content-Range: bytes -/ +Content-Type: application/octet-stream + + +``` + +The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The length of the requested blob chunk.| +|`Content-Range`|Content range of blob chunk.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Requested Range Not Satisfiable + +``` +416 Requested Range Not Satisfiable +``` + +The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content. + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + +#### DELETE Blob + +Delete the blob identified by `name` and `digest` + + + +``` +DELETE /v2//blobs/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: Accepted + +``` +202 Accepted +Content-Length: 0 +Docker-Content-Digest: +``` + + + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|0| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The blob, identified by `name` and `digest`, is unknown to the registry. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Method Not Allowed + +``` +405 Method Not Allowed +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + + +### Initiate Blob Upload + +Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads. + + + +#### POST Initiate Blob Upload + +Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. + + +##### Initiate Monolithic Blob Upload + +``` +POST /v2//blobs/uploads/?digest= +Host: +Authorization: +Content-Length: +Content-Type: application/octect-stream + + +``` + +Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|| +|`name`|path|Name of the target repository.| +|`digest`|query|Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.| + + + + +###### On Success: Created + +``` +201 Created +Location: +Content-Length: 0 +Docker-Upload-UUID: +``` + +The blob has been created in the registry and is available at the provided location. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +##### Initiate Resumable Blob Upload + +``` +POST /v2//blobs/uploads/ +Host: +Authorization: +Content-Length: 0 +``` + +Initiate a resumable blob upload with an empty request body. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| +|`name`|path|Name of the target repository.| + + + + +###### On Success: Accepted + +``` +202 Accepted +Content-Length: 0 +Location: /v2//blobs/uploads/ +Range: 0-0 +Docker-Upload-UUID: +``` + +The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Location`|The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.| +|`Range`|Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +##### Mount Blob + +``` +POST /v2//blobs/uploads/?mount=&from= +Host: +Authorization: +Content-Length: 0 +``` + +Mount a blob identified by the `mount` parameter from another repository. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| +|`name`|path|Name of the target repository.| +|`mount`|query|Digest of blob to mount from the source repository.| +|`from`|query|Name of the source repository.| + + + + +###### On Success: Created + +``` +201 Created +Location: +Content-Length: 0 +Docker-Upload-UUID: +``` + +The blob has been mounted in the repository and is available at the provided location. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + + +### Blob Upload + +Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls. + + + +#### GET Blob Upload + +Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. + + + +``` +GET /v2//blobs/uploads/ +Host: +Authorization: +``` + +Retrieve the progress of the current upload, as reported by the `Range` header. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Upload Progress + +``` +204 No Content +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The upload is known and in progress. The last received offset is available in the `Range` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Range`|Range indicating the current progress of the upload.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + +#### PATCH Blob Upload + +Upload a chunk of data for the specified upload. + + +##### Stream upload + +``` +PATCH /v2//blobs/uploads/ +Host: +Authorization: +Content-Type: application/octet-stream + + +``` + +Upload a stream of data to upload without completing the upload. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Data Accepted + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| +|`Range`|Range indicating the current progress of the upload.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +##### Chunked upload + +``` +PATCH /v2//blobs/uploads/ +Host: +Authorization: +Content-Range: - +Content-Length: +Content-Type: application/octet-stream + + +``` + +Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Range`|header|Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.| +|`Content-Length`|header|Length of the chunk being uploaded, corresponding the length of the request body.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Chunk Accepted + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| +|`Range`|Range indicating the current progress of the upload.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Requested Range Not Satisfiable + +``` +416 Requested Range Not Satisfiable +``` + +The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + +#### PUT Blob Upload + +Complete the upload specified by `uuid`, optionally appending the body as the final chunk. + + + +``` +PUT /v2//blobs/uploads/?digest= +Host: +Authorization: +Content-Length: +Content-Type: application/octet-stream + + +``` + +Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| +|`digest`|query|Digest of uploaded blob.| + + + + +###### On Success: Upload Complete + +``` +204 No Content +Location: +Content-Range: - +Content-Length: 0 +Docker-Content-Digest: +``` + +The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The canonical location of the blob for retrieval| +|`Content-Range`|Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + +#### DELETE Blob Upload + +Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. + + + +``` +DELETE /v2//blobs/uploads/ +Host: +Authorization: +Content-Length: 0 +``` + +Cancel the upload specified by `uuid`. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Upload Deleted + +``` +204 No Content +Content-Length: 0 +``` + +The upload has been successfully deleted. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +An error was encountered processing the delete. The client may ignore this error. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + + +### Catalog + +List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available. + + + +#### GET Catalog + +Retrieve a sorted, json list of repositories available in the registry. + + +##### Catalog Fetch + +``` +GET /v2/_catalog +``` + +Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links. + + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "repositories": [ + , + ... + ] +} +``` + +Returns the unabridged list of repositories as a json response. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +##### Catalog Fetch Paginated + +``` +GET /v2/_catalog?n=&last= +``` + +Return the specified portion of repositories. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| +|`last`|query|Result set will include values lexically after last.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Link: <?n=&last=>; rel="next" +Content-Type: application/json; charset=utf-8 + +{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +} +``` + + + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| +|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| + + + + + diff --git a/docs/spec/api.md.tmpl b/docs/spec/api.md.tmpl new file mode 100644 index 00000000..eeafec1e --- /dev/null +++ b/docs/spec/api.md.tmpl @@ -0,0 +1,1219 @@ + + +# Docker Registry HTTP API V2 + +## Introduction + +The _Docker Registry HTTP API_ is the protocol to facilitate distribution of +images to the docker engine. It interacts with instances of the docker +registry, which is a service to manage information about docker images and +enable their distribution. The specification covers the operation of version 2 +of this API, known as _Docker Registry HTTP API V2_. + +While the V1 registry protocol is usable, there are several problems with the +architecture that have led to this new version. The main driver of this +specification is a set of changes to the docker the image format, covered in +[docker/docker#8093](https://github.com/docker/docker/issues/8093). +The new, self-contained image manifest simplifies image definition and improves +security. This specification will build on that work, leveraging new properties +of the manifest format to improve performance, reduce bandwidth usage and +decrease the likelihood of backend corruption. + +For relevant details and history leading up to this specification, please see +the following issues: + +- [docker/docker#8093](https://github.com/docker/docker/issues/8093) +- [docker/docker#9015](https://github.com/docker/docker/issues/9015) +- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) + +### Scope + +This specification covers the URL layout and protocols of the interaction +between docker registry and docker core. This will affect the docker core +registry API and the rewrite of docker-registry. Docker registry +implementations may implement other API endpoints, but they are not covered by +this specification. + +This includes the following features: + +- Namespace-oriented URI Layout +- PUSH/PULL registry server for V2 image manifest format +- Resumable layer PUSH support +- V2 Client library implementation + +While authentication and authorization support will influence this +specification, details of the protocol will be left to a future specification. +Relevant header definitions and error codes are present to provide an +indication of what a client may encounter. + +#### Future + +There are features that have been discussed during the process of cutting this +specification. The following is an incomplete list: + +- Immutable image references +- Multiple architecture support +- Migration from v2compatibility representation + +These may represent features that are either out of the scope of this +specification, the purview of another specification or have been deferred to a +future version. + +### Use Cases + +For the most part, the use cases of the former registry API apply to the new +version. Differentiating use cases are covered below. + +#### Image Verification + +A docker engine instance would like to run verified image named +"library/ubuntu", with the tag "latest". The engine contacts the registry, +requesting the manifest for "library/ubuntu:latest". An untrusted registry +returns a manifest. Before proceeding to download the individual layers, the +engine verifies the manifest's signature, ensuring that the content was +produced from a trusted source and no tampering has occurred. After each layer +is downloaded, the engine verifies the digest of the layer, ensuring that the +content matches that specified by the manifest. + +#### Resumable Push + +Company X's build servers lose connectivity to docker registry before +completing an image layer transfer. After connectivity returns, the build +server attempts to re-upload the image. The registry notifies the build server +that the upload has already been partially attempted. The build server +responds by only sending the remaining data to complete the image file. + +#### Resumable Pull + +Company X is having more connectivity problems but this time in their +deployment datacenter. When downloading an image, the connection is +interrupted before completion. The client keeps the partial data and uses http +`Range` requests to avoid downloading repeated data. + +#### Layer Upload De-duplication + +Company Y's build system creates two identical docker layers from build +processes A and B. Build process A completes uploading the layer before B. +When process B attempts to upload the layer, the registry indicates that its +not necessary because the layer is already known. + +If process A and B upload the same layer at the same time, both operations +will proceed and the first to complete will be stored in the registry (Note: +we may modify this to prevent dogpile with some locking mechanism). + +### Changes + +The V2 specification has been written to work as a living document, specifying +only what is certain and leaving what is not specified open or to future +changes. Only non-conflicting additions should be made to the API and accepted +changes should avoid preventing future changes from happening. + +This section should be updated when changes are made to the specification, +indicating what is different. Optionally, we may start marking parts of the +specification to correspond with the versions enumerated here. + +Each set of changes is given a letter corresponding to a set of modifications +that were applied to the baseline specification. These are merely for +reference and shouldn't be used outside the specification other than to +identify a set of modifications. + +
+
l
+
+
    +
  • Document TOOMANYREQUESTS error code.
  • +
+
+ +
k
+
+
    +
  • Document use of Accept and Content-Type headers in manifests endpoint.
  • +
+
+ +
j
+
+
    +
  • Add ability to mount blobs across repositories.
  • +
+
+ +
i
+
+
    +
  • Clarified expected behavior response to manifest HEAD request.
  • +
+
+ +
h
+
+
    +
  • All mention of tarsum removed.
  • +
+
+ +
g
+
+
    +
  • Clarify behavior of pagination behavior with unspecified parameters.
  • +
+
+ +
f
+
+
    +
  • Specify the delete API for layers and manifests.
  • +
+
+ +
e
+
+
    +
  • Added support for listing registry contents.
  • +
  • Added pagination to tags API.
  • +
  • Added common approach to support pagination.
  • +
+
+ +
d
+
+
    +
  • Allow repository name components to be one character.
  • +
  • Clarified that single component names are allowed.
  • +
+
+ +
c
+
+
    +
  • Added section covering digest format.
  • +
  • Added more clarification that manifest cannot be deleted by tag.
  • +
+
+ +
b
+
+
    +
  • Added capability of doing streaming upload to PATCH blob upload.
  • +
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • +
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • +
+
+ +
a
+
+
    +
  • Added support for immutable manifest references in manifest endpoints.
  • +
  • Deleting a manifest by tag has been deprecated.
  • +
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • +
  • Added error code for unsupported operations.
  • +
+
+
+ +## Overview + +This section covers client flows and details of the API endpoints. The URI +layout of the new API is structured to support a rich authentication and +authorization model by leveraging namespaces. All endpoints will be prefixed +by the API version and the repository name: + + /v2// + +For example, an API endpoint that will work with the `library/ubuntu` +repository, the URI prefix will be: + + /v2/library/ubuntu/ + +This scheme provides rich access control over various operations and methods +using the URI prefix and http methods that can be controlled in variety of +ways. + +Classically, repository names have always been two path components where each +path component is less than 30 characters. The V2 registry API does not +enforce this. The rules for a repository name are as follows: + +1. A repository name is broken up into _path components_. A component of a + repository name must be at least one lowercase, alpha-numeric characters, + optionally separated by periods, dashes or underscores. More strictly, it + must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. +2. If a repository name has two or more path components, they must be + separated by a forward slash ("/"). +3. The total length of a repository name, including slashes, must be less the + 256 characters. + +These name requirements _only_ apply to the registry API and should accept a +superset of what is supported by other docker ecosystem components. + +All endpoints should support aggressive http caching, compression and range +headers, where appropriate. The new API attempts to leverage HTTP semantics +where possible but may break from standards to implement targeted features. + +For detail on individual endpoints, please see the [_Detail_](#detail) +section. + +### Errors + +Actionable failure conditions, covered in detail in their relevant sections, +are reported as part of 4xx responses, in a json response body. One or more +errors will be returned in the following format: + + { + "errors:" [{ + "code": , + "message": , + "detail": + }, + ... + ] + } + +The `code` field will be a unique identifier, all caps with underscores by +convention. The `message` field will be a human readable string. The optional +`detail` field may contain arbitrary json data providing information the +client can use to resolve the issue. + +While the client can take action on certain error codes, the registry may add +new error codes over time. All client implementations should treat unknown +error codes as `UNKNOWN`, allowing future error codes to be added without +breaking API compatibility. For the purposes of the specification error codes +will only be added and never removed. + +For a complete account of all error codes, please see the [_Errors_](#errors-2) +section. + +### API Version Check + +A minimal endpoint, mounted at `/v2/` will provide version support information +based on its response statuses. The request format is as follows: + + GET /v2/ + +If a `200 OK` response is returned, the registry implements the V2(.1) +registry API and the client may proceed safely with other V2 operations. +Optionally, the response may contain information about the supported paths in +the response body. The client should be prepared to ignore this data. + +If a `401 Unauthorized` response is returned, the client should take action +based on the contents of the "WWW-Authenticate" header and try the endpoint +again. Depending on access control setup, the client may still have to +authenticate against different resources, even if this check succeeds. + +If `404 Not Found` response status, or other unexpected status, is returned, +the client should proceed with the assumption that the registry does not +implement V2 of the API. + +When a `200 OK` or `401 Unauthorized` response is returned, the +"Docker-Distribution-API-Version" header should be set to "registry/2.0". +Clients may require this header value to determine if the endpoint serves this +API. When this header is omitted, clients may fallback to an older API version. + +### Content Digests + +This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). +The core of this design is the concept of a content addressable identifier. It +uniquely identifies content by taking a collision-resistant hash of the bytes. +Such an identifier can be independently calculated and verified by selection +of a common _algorithm_. If such an identifier can be communicated in a secure +manner, one can retrieve the content from an insecure source, calculate it +independently and be certain that the correct content was obtained. Put simply, +the identifier is a property of the content. + +To disambiguate from other concepts, we call this identifier a _digest_. A +_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ +portion. The _algorithm_ identifies the methodology used to calculate the +digest. The _hex_ portion is the hex-encoded result of the hash. + +We define a _digest_ string to match the following grammar: +``` +digest := algorithm ":" hex +algorithm := /[A-Fa-f0-9_+.-]+/ +hex := /[A-Fa-f0-9]+/ +``` + +Some examples of _digests_ include the following: + +digest | description | +----------------------------------------------------------------------------------|------------------------------------------------ +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | + +While the _algorithm_ does allow one to implement a wide variety of +algorithms, compliant implementations should use sha256. Heavy processing of +input before calculating a hash is discouraged to avoid degrading the +uniqueness of the _digest_ but some canonicalization may be performed to +ensure consistent identifiers. + +Let's use a simple example in pseudo-code to demonstrate a digest calculation: +``` +let C = 'a small string' +let B = sha256(C) +let D = 'sha256:' + EncodeHex(B) +let ID(C) = D +``` + +Above, we have bytestring `C` passed into a function, `SHA256`, that returns a +bytestring `B`, which is the hash of `C`. `D` gets the algorithm concatenated +with the hex encoding of `B`. We then define the identifier of `C` to `ID(C)` +as equal to `D`. A digest can be verified by independently calculating `D` and +comparing it with identifier `ID(C)`. + +#### Digest Header + +To provide verification of http content, any response may include a +`Docker-Content-Digest` header. This will include the digest of the target +entity returned in the response. For blobs, this is the entire blob content. For +manifests, this is the manifest body without the signature content, also known +as the JWS payload. Note that the commonly used canonicalization for digest +calculation may be dependent on the mediatype of the content, such as with +manifests. + +The client may choose to ignore the header or may verify it to ensure content +integrity and transport security. This is most important when fetching by a +digest. To ensure security, the content should be verified against the digest +used to fetch the content. At times, the returned digest may differ from that +used to initiate a request. Such digests are considered to be from different +_domains_, meaning they have different values for _algorithm_. In such a case, +the client may choose to verify the digests in both domains or ignore the +server's digest. To maintain security, the client _must_ always verify the +content against the _digest_ used to fetch the content. + +> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use +> the same digest used to fetch the content to verify it. The header +> `Docker-Content-Digest` should not be trusted over the "local" digest. + +### Pulling An Image + +An "image" is a combination of a JSON manifest and individual layer files. The +process of pulling an image centers around retrieving these two components. + +The first step in pulling an image is to retrieve the manifest. For reference, +the relevant manifest fields for the registry are the following: + + field | description | +----------|------------------------------------------------| +name | The name of the image. | +tag | The tag for this version of the image. | +fsLayers | A list of layer descriptors (including digest) | +signature | A JWS used to verify the manifest content | + +For more information about the manifest format, please see +[docker/docker#8093](https://github.com/docker/docker/issues/8093). + +When the manifest is in hand, the client must verify the signature to ensure +the names and layers are valid. Once confirmed, the client will then use the +digests to download the individual layers. Layers are stored in as blobs in +the V2 registry API, keyed by their digest. + +#### Pulling an Image Manifest + +The image manifest can be fetched with the following url: + +``` +GET /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +The client should include an Accept header indicating which manifest content +types it supports. For more details on the manifest formats and their content +types, see [manifest-v2-1.md](manifest-v2-1.md) and +[manifest-v2-2.md](manifest-v2-2.md). In a successful response, the Content-Type +header will indicate which manifest type is being returned. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful, the image +manifest will be returned, with the following format (see +[docker/docker#8093](https://github.com/docker/docker/issues/8093) for details): + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": + } + +The client should verify the returned manifest signature for authenticity +before fetching layers. + +##### Existing Manifests + +The image manifest can be checked for existence with the following url: + +``` +HEAD /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful the response will +be as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + + +#### Pulling a Layer + +Layers are stored in the blob portion of the registry, keyed by digest. +Pulling a layer is carried out by a standard http request. The URL is as +follows: + + GET /v2//blobs/ + +Access to a layer will be gated by the `name` of the repository but is +identified uniquely in the registry by `digest`. + +This endpoint may issue a 307 (302 for /blobs/uploads/ +``` + +The parameters of this request are the image namespace under which the layer +will be linked. Responses to this request are covered below. + +##### Existing Layers + +The existence of a layer can be checked via a `HEAD` request to the blob store +API. The request should be formatted as follows: + +``` +HEAD /v2//blobs/ +``` + +If the layer with the digest specified in `digest` is available, a 200 OK +response will be received, with no actual body content (this is according to +http specification). The response will look as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + +When this response is received, the client can assume that the layer is +already available in the registry under the given name and should take no +further action to upload the layer. Note that the binary digests may differ +for the existing registry layer, but the digests will be guaranteed to match. + +##### Uploading the Layer + +If the POST request is successful, a `202 Accepted` response will be returned +with the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The rest of the upload process can be carried out with the returned url, +called the "Upload URL" from the `Location` header. All responses to the +upload url, whether sending data or getting status, will be in this format. +Though the URI format (`/v2//blobs/uploads/`) for the `Location` +header is specified, clients should treat it as an opaque url and should never +try to assemble it. While the `uuid` parameter may be an actual UUID, this +proposal imposes no constraints on the format and clients should never impose +any. + +If clients need to correlate local upload state with remote upload state, the +contents of the `Docker-Upload-UUID` header should be used. Such an id can be +used to key the last used location header when implementing resumable uploads. + +##### Upload Progress + +The progress and chunk coordination of the upload process will be coordinated +through the `Range` header. While this is a non-standard use of the `Range` +header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. +For an upload that just started, for an example with a 1000 byte layer file, +the `Range` header would be as follows: + +``` +Range: bytes=0-0 +``` + +To get the status of an upload, issue a GET request to the upload URL: + +``` +GET /v2//blobs/uploads/ +Host: +``` + +The response will be similar to the above, except will return 204 status: + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: bytes=0- +Docker-Upload-UUID: +``` + +Note that the HTTP `Range` header byte ranges are inclusive and that will be +honored, even in non-standard use cases. + +##### Monolithic Upload + +A monolithic upload is simply a chunked upload with a single chunk and may be +favored by clients that would like to avoided the complexity of chunking. To +carry out a "monolithic" upload, one can simply put the entire content blob to +the provided URL: + +``` +PUT /v2//blobs/uploads/?digest= +Content-Length: +Content-Type: application/octet-stream + + +``` + +The "digest" parameter must be included with the PUT request. Please see the +[_Completed Upload_](#completed-upload) section for details on the parameters +and expected responses. + +##### Chunked Upload + +To carry out an upload of a chunk, the client can specify a range header and +only include that part of the layer file: + +``` +PATCH /v2//blobs/uploads/ +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +There is no enforcement on layer chunk splits other than that the server must +receive them in order. The server may enforce a minimum chunk size. If the +server cannot accept the chunk, a `416 Requested Range Not Satisfiable` +response will be returned and will include a `Range` header indicating the +current status: + +``` +416 Requested Range Not Satisfiable +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +If this response is received, the client should resume from the "last valid +range" and upload the subsequent chunk. A 416 will be returned under the +following conditions: + +- Invalid Content-Range header format +- Out of order chunk: the range of the next chunk must start immediately after + the "last valid range" from the previous response. + +When a chunk is accepted as part of the upload, a `202 Accepted` response will +be returned, including a `Range` header with the current upload status: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +##### Completed Upload + +For an upload to be considered complete, the client must submit a `PUT` +request on the upload endpoint with a digest parameter. If it is not provided, +the upload will not be considered complete. The format for the final chunk +will be as follows: + +``` +PUT /v2//blob/uploads/?digest= +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +Optionally, if all chunks have already been uploaded, a `PUT` request with a +`digest` parameter and zero-length body may be sent to complete and validated +the upload. Multiple "digest" parameters may be provided with different +digests. The server may verify none or all of them but _must_ notify the +client if the content is rejected. + +When the last chunk is received and the layer has been validated, the client +will receive a `201 Created` response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +###### Digest Parameter + +The "digest" parameter is designed as an opaque parameter to support +verification of a successful transfer. For example, an HTTP URI parameter +might be as follows: + +``` +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b +``` + +Given this parameter, the registry will verify that the provided content does +match this digest. + +##### Canceling an Upload + +An upload can be cancelled by issuing a DELETE request to the upload endpoint. +The format will be as follows: + +``` +DELETE /v2//blobs/uploads/ +``` + +After this request is issued, the upload uuid will no longer be valid and the +registry server will dump all intermediate data. While uploads will time out +if not completed, clients should issue this request if they encounter a fatal +error but still have the ability to issue an http request. + +##### Cross Repository Blob Mount + +A blob may be mounted from another repository that the client has read access +to, removing the need to upload a blob already known to the registry. To issue +a blob mount instead of an upload, a POST request should be issued in the +following format: + +``` +POST /v2//blobs/uploads/?mount=&from= +Content-Length: 0 +``` + +If the blob is successfully mounted, the client will receive a `201 Created` +response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +If a mount fails due to invalid repository or digest arguments, the registry +will fall back to the standard upload behavior and return a `202 Accepted` with +the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +This behavior is consistent with older versions of the registry, which do not +recognize the repository mount query parameters. + +Note: a client may issue a HEAD request to check existence of a blob in a source +repository to distinguish between the registry not supporting blob mounts and +the blob not existing in the expected repository. + +##### Errors + +If an 502, 503 or 504 error is received, the client should assume that the +download can proceed due to a temporary condition, honoring the appropriate +retry mechanism. Other 5xx errors should be treated as terminal. + +If there is a problem with the upload, a 4xx error will be returned indicating +the problem. After receiving a 4xx response (except 416, as called out above), +the upload will be considered failed and the client should take appropriate +action. + +Note that the upload url will not be available forever. If the upload uuid is +unknown to the registry, a `404 Not Found` response will be returned and the +client must restart the upload process. + +### Deleting a Layer + +A layer may be deleted from the registry via its `name` and `digest`. A +delete may be issued with the following request format: + + DELETE /v2//blobs/ + +If the blob exists and has been successfully deleted, the following response +will be issued: + + 202 Accepted + Content-Length: None + +If the blob had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +If a layer is deleted which is referenced by a manifest in the registry, +then the complete images will not be resolvable. + +#### Pushing an Image Manifest + +Once all of the layers for an image are uploaded, the client can upload the +image manifest. An image can be pushed using the following request format: + + PUT /v2//manifests/ + Content-Type: + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": , + ... + } + +The `name` and `reference` fields of the response body must match those +specified in the URL. The `reference` field may be a "tag" or a "digest". The +content type should match the type of the manifest being uploaded, as specified +in [manifest-v2-1.md](manifest-v2-1.md) and [manifest-v2-2.md](manifest-v2-2.md). + +If there is a problem with pushing the manifest, a relevant 4xx response will +be returned with a JSON error message. Please see the +[_PUT Manifest_](#put-manifest) section for details on possible error codes that +may be returned. + +If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are +returned. The `detail` field of the error response will have a `digest` field +identifying the missing blob. An error is returned for each unknown blob. The +response format is as follows: + + { + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": + } + }, + ... + ] + } + +### Listing Repositories + +Images are stored in collections, known as a _repository_, which is keyed by a +`name`, as seen throughout the API specification. A registry instance may +contain several repositories. The list of available repositories is made +available through the _catalog_. + +The catalog for a given registry can be retrieved with the following request: + +``` +GET /v2/_catalog +``` + +The response will be in the following format: + +``` +200 OK +Content-Type: application/json + +{ + "repositories": [ + , + ... + ] +} +``` + +Note that the contents of the response are specific to the registry +implementation. Some registries may opt to provide a full catalog output, +limit it based on the user's access level or omit upstream results, if +providing mirroring functionality. Subsequently, the presence of a repository +in the catalog listing only means that the registry *may* provide access to +the repository at the time of the request. Conversely, a missing entry does +*not* mean that the registry does not have the repository. More succinctly, +the presence of a repository only guarantees that it is there but not that it +is _not_ there. + +For registries with a large number of repositories, this response may be quite +large. If such a response is expected, one should use pagination. A registry +may also limit the amount of responses returned even if pagination was not +explicitly requested. In this case the `Link` header will be returned along +with the results, and subsequent results can be obtained by following the link +as if pagination had been initially requested. + +For details of the `Link` header, please see the [_Pagination_](#pagination) +section. + +#### Pagination + +Paginated catalog results can be retrieved by adding an `n` parameter to the +request URL, declaring that the response should be limited to `n` results. +Starting a paginated flow begins as follows: + +``` +GET /v2/_catalog?n= +``` + +The above specifies that a catalog response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "repositories": [ + , + ... + ] +} +``` + +The above includes the _first_ `n` entries from the result set. To get the +_next_ `n` entries, one can create a URL where the argument `last` has the +value from `repositories[len(repositories)-1]`. If there are indeed more +results, the URL for the next block is encoded in an +[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" +relation. The presence of the `Link` header communicates to the client that +the entire result set has not been returned and another request must be +issued. If the header is not present, the client can assume that all results +have been received. + +> __NOTE:__ In the request template above, note that the brackets +> are required. For example, if the url is +> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would +> be `; rel="next"`. Please see +> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. + +Compliant client implementations should always use the `Link` header +value when proceeding through results linearly. The client may construct URLs +to skip forward in the catalog. + +To get the next result set, a client would issue the request as follows, using +the URL encoded in the described `Link` header: + +``` +GET /v2/_catalog?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set. + +The catalog result set is represented abstractly as a lexically sorted list, +where the position in that list can be specified by the query term `last`. The +entries in the response start _after_ the term specified by `last`, up to `n` +entries. + +The behavior of `last` is quite simple when demonstrated with an example. Let +us say the registry has the following repositories: + +``` +a +b +c +d +``` + +If the value of `n` is 2, _a_ and _b_ will be returned on the first response. +The `Link` header returned on the response will have `n` set to 2 and last set +to _b_: + +``` +Link: <?n=2&last=b>; rel="next" +``` + +The client can then issue the request with the above value from the `Link` +header, receiving the values _c_ and _d_. Note that `n` may change on the second +to last response or be fully omitted, depending on the server implementation. + +### Listing Image Tags + +It may be necessary to list all of the tags under a given repository. The tags +for an image repository can be retrieved with the following request: + + GET /v2//tags/list + +The response will be in the following format: + + 200 OK + Content-Type: application/json + + { + "name": , + "tags": [ + , + ... + ] + } + +For repositories with a large number of tags, this response may be quite +large. If such a response is expected, one should use the pagination. + +#### Pagination + +Paginated tag results can be retrieved by adding the appropriate parameters to +the request URL described above. The behavior of tag pagination is identical +to that specified for catalog pagination. We cover a simple flow to highlight +any differences. + +Starting a paginated flow may begin as follows: + +``` +GET /v2//tags/list?n= +``` + +The above specifies that a tags response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +To get the next result set, a client would issue the request as follows, using +the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` +header: + +``` +GET /v2//tags/list?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set in the response. The behavior of the `last` parameter, the provided +response result, lexical ordering and encoding of the `Link` header are +identical to that of catalog pagination. + +### Deleting an Image + +An image may be deleted from the registry via its `name` and `reference`. A +delete may be issued with the following request format: + + DELETE /v2//manifests/ + +For deletes, `reference` *must* be a digest or the delete will fail. If the +image exists and has been successfully deleted, the following response will be +issued: + + 202 Accepted + Content-Length: None + +If the image had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +> **Note** When deleting a manifest from a registry version 2.3 or later, the +> following header must be used when `HEAD` or `GET`-ing the manifest to obtain +> the correct digest to delete: + + Accept: application/vnd.docker.distribution.manifest.v2+json + +> for more details, see: [compatibility.md](../compatibility.md#content-addressable-storage-cas) + +## Detail + +> **Note**: This section is still under construction. For the purposes of +> implementation, if any details below differ from the described request flows +> above, the section below should be corrected. When they match, this note +> should be removed. + +The behavior of the endpoints are covered in detail in this section, organized +by route and entity. All aspects of the request and responses are covered, +including headers, parameters and body formats. Examples of requests and their +corresponding responses, with success and failure, are enumerated. + +> **Note**: The sections on endpoint detail are arranged with an example +> request, a description of the request, followed by information about that +> request. + +A list of methods and URIs are covered in the table below: + +|Method|Path|Entity|Description| +|------|----|------|-----------| +{{range $route := .RouteDescriptors}}{{range $method := .Methods}}| {{$method.Method}} | `{{$route.Path|prettygorilla}}` | {{$route.Entity}} | {{$method.Description}} | +{{end}}{{end}} + +The detail for each endpoint is covered in the following sections. + +### Errors + +The error codes encountered via the API are enumerated in the following table: + +|Code|Message|Description| +|----|-------|-----------| +{{range $err := .ErrorDescriptors}} `{{$err.Value}}` | {{$err.Message}} | {{$err.Description|removenewlines}} +{{end}} + +{{range $route := .RouteDescriptors}} +### {{.Entity}} + +{{.Description}} + +{{range $method := $route.Methods}} + +#### {{.Method}} {{$route.Entity}} + +{{.Description}} + +{{if .Requests}}{{range .Requests}}{{if .Name}} +##### {{.Name}}{{end}} + +``` +{{$method.Method}} {{$route.Path|prettygorilla}}{{range $i, $param := .QueryParameters}}{{if eq $i 0}}?{{else}}&{{end}}{{$param.Name}}={{$param.Format}}{{end}}{{range .Headers}} +{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} +Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} + +{{.Body.Format}}{{end}} +``` + +{{.Description}} + +{{if or .Headers .PathParameters .QueryParameters}} +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +{{range .Headers}}|`{{.Name}}`|header|{{.Description}}| +{{end}}{{range .PathParameters}}|`{{.Name}}`|path|{{.Description}}| +{{end}}{{range .QueryParameters}}|`{{.Name}}`|query|{{.Description}}| +{{end}}{{end}} + +{{if .Successes}} +{{range .Successes}} +###### On Success: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} + +``` +{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} +{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} +Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} + +{{.Body.Format}}{{end}} +``` + +{{.Description}} +{{if .Fields}}The following fields may be returned in the response body: + +|Name|Description| +|----|-----------| +{{range .Fields}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}}{{if .Headers}} +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +{{range .Headers}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}}{{end}}{{end}} + +{{if .Failures}} +{{range .Failures}} +###### On Failure: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} + +``` +{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} +{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} +Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} + +{{.Body.Format}}{{end}} +``` + +{{.Description}} +{{if .Headers}} +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +{{range .Headers}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}} + +{{if .ErrorCodes}} +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +{{range $err := .ErrorCodes}}| `{{$err.Descriptor.Value}}` | {{$err.Descriptor.Message}} | {{$err.Descriptor.Description|removenewlines}} | +{{end}} + +{{end}}{{end}}{{end}}{{end}}{{end}}{{end}} + +{{end}} diff --git a/docs/spec/auth/index.md b/docs/spec/auth/index.md new file mode 100644 index 00000000..f6ee8e1f --- /dev/null +++ b/docs/spec/auth/index.md @@ -0,0 +1,17 @@ + + +# Docker Registry v2 authentication + +See the [Token Authentication Specification](token.md), +[Token Authentication Implementation](jwt.md), +[Token Scope Documentation](scope.md), +[OAuth2 Token Authentication](oauth.md) for more information. diff --git a/docs/spec/auth/jwt.md b/docs/spec/auth/jwt.md new file mode 100644 index 00000000..c90bd6e8 --- /dev/null +++ b/docs/spec/auth/jwt.md @@ -0,0 +1,334 @@ + + +# Docker Registry v2 Bearer token specification + +This specification covers the `docker/distribution` implementation of the +v2 Registry's authentication schema. Specifically, it describes the JSON +Web Token schema that `docker/distribution` has adopted to implement the +client-opaque Bearer token issued by an authentication service and +understood by the registry. + +This document borrows heavily from the [JSON Web Token Draft Spec](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) + +## Getting a Bearer Token + +For this example, the client makes an HTTP GET request to the following URL: + +``` +https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push +``` + +The token server should first attempt to authenticate the client using any +authentication credentials provided with the request. As of Docker 1.8, the +registry client in the Docker Engine only supports Basic Authentication to +these token servers. If an attempt to authenticate to the token server fails, +the token server should return a `401 Unauthorized` response indicating that +the provided credentials are invalid. + +Whether the token server requires authentication is up to the policy of that +access control provider. Some requests may require authentication to determine +access (such as pushing or pulling a private repository) while others may not +(such as pulling from a public repository). + +After authenticating the client (which may simply be an anonymous client if +no attempt was made to authenticate), the token server must next query its +access control list to determine whether the client has the requested scope. In +this example request, if I have authenticated as user `jlhawn`, the token +server will determine what access I have to the repository `samalba/my-app` +hosted by the entity `registry.docker.io`. + +Once the token server has determined what access the client has to the +resources requested in the `scope` parameter, it will take the intersection of +the set of requested actions on each resource and the set of actions that the +client has in fact been granted. If the client only has a subset of the +requested access **it must not be considered an error** as it is not the +responsibility of the token server to indicate authorization errors as part of +this workflow. + +Continuing with the example request, the token server will find that the +client's set of granted access to the repository is `[pull, push]` which when +intersected with the requested access `[pull, push]` yields an equal set. If +the granted access set was found only to be `[pull]` then the intersected set +would only be `[pull]`. If the client has no access to the repository then the +intersected set would be empty, `[]`. + +It is this intersected set of access which is placed in the returned token. + +The server will now construct a JSON Web Token to sign and return. A JSON Web +Token has 3 main parts: + +1. Headers + + The header of a JSON Web Token is a standard JOSE header. The "typ" field + will be "JWT" and it will also contain the "alg" which identifies the + signing algorithm used to produce the signature. It also must have a "kid" + field, representing the ID of the key which was used to sign the token. + + The "kid" field has to be in a libtrust fingerprint compatible format. + Such a format can be generated by following steps: + + 1. Take the DER encoded public key which the JWT token was signed against. + + 2. Create a SHA256 hash out of it and truncate to 240bits. + + 3. Split the result into 12 base32 encoded groups with `:` as delimiter. + + Here is an example JOSE Header for a JSON Web Token (formatted with + whitespace for readability): + + ``` + { + "typ": "JWT", + "alg": "ES256", + "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6" + } + ``` + + It specifies that this object is going to be a JSON Web token signed using + the key with the given ID using the Elliptic Curve signature algorithm + using a SHA256 hash. + +2. Claim Set + + The Claim Set is a JSON struct containing these standard registered claim + name fields: + +
+
+ iss (Issuer) +
+
+ The issuer of the token, typically the fqdn of the authorization + server. +
+
+ sub (Subject) +
+
+ The subject of the token; the name or id of the client which + requested it. This should be empty (`""`) if the client did not + authenticate. +
+
+ aud (Audience) +
+
+ The intended audience of the token; the name or id of the service + which will verify the token to authorize the client/subject. +
+
+ exp (Expiration) +
+
+ The token should only be considered valid up to this specified date + and time. +
+
+ nbf (Not Before) +
+
+ The token should not be considered valid before this specified date + and time. +
+
+ iat (Issued At) +
+
+ Specifies the date and time which the Authorization server + generated this token. +
+
+ jti (JWT ID) +
+
+ A unique identifier for this token. Can be used by the intended + audience to prevent replays of the token. +
+
+ + The Claim Set will also contain a private claim name unique to this + authorization server specification: + +
+
+ access +
+
+ An array of access entry objects with the following fields: + +
+
+ type +
+
+ The type of resource hosted by the service. +
+
+ name +
+
+ The name of the resource of the given type hosted by the + service. +
+
+ actions +
+
+ An array of strings which give the actions authorized on + this resource. +
+
+
+
+ + Here is an example of such a JWT Claim Set (formatted with whitespace for + readability): + + ``` + { + "iss": "auth.docker.com", + "sub": "jlhawn", + "aud": "registry.docker.com", + "exp": 1415387315, + "nbf": 1415387015, + "iat": 1415387015, + "jti": "tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws", + "access": [ + { + "type": "repository", + "name": "samalba/my-app", + "actions": [ + "pull", + "push" + ] + } + ] + } + ``` + +3. Signature + + The authorization server will produce a JOSE header and Claim Set with no + extraneous whitespace, i.e., the JOSE Header from above would be + + ``` + {"typ":"JWT","alg":"ES256","kid":"PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6"} + ``` + + and the Claim Set from above would be + + ``` + {"iss":"auth.docker.com","sub":"jlhawn","aud":"registry.docker.com","exp":1415387315,"nbf":1415387015,"iat":1415387015,"jti":"tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws","access":[{"type":"repository","name":"samalba/my-app","actions":["push","pull"]}]} + ``` + + The utf-8 representation of this JOSE header and Claim Set are then + url-safe base64 encoded (sans trailing '=' buffer), producing: + + ``` + eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0 + ``` + + for the JOSE Header and + + ``` + eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 + ``` + + for the Claim Set. These two are concatenated using a '.' character, + yielding the string: + + ``` + eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 + ``` + + This is then used as the payload to a the `ES256` signature algorithm + specified in the JOSE header and specified fully in [Section 3.4 of the JSON Web Algorithms (JWA) + draft specification](https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-38#section-3.4) + + This example signature will use the following ECDSA key for the server: + + ``` + { + "kty": "EC", + "crv": "P-256", + "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6", + "d": "R7OnbfMaD5J2jl7GeE8ESo7CnHSBm_1N2k9IXYFrKJA", + "x": "m7zUpx3b-zmVE5cymSs64POG9QcyEpJaYCD82-549_Q", + "y": "dU3biz8sZ_8GPB-odm8Wxz3lNDr1xcAQQPQaOcr1fmc" + } + ``` + + A resulting signature of the above payload using this key is: + + ``` + QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w + ``` + + Concatenating all of these together with a `.` character gives the + resulting JWT: + + ``` + eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w + ``` + +This can now be placed in an HTTP response and returned to the client to use to +authenticate to the audience service: + + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w"} +``` + +## Using the signed token + +Once the client has a token, it will try the registry request again with the +token placed in the HTTP `Authorization` header like so: + +``` +Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw +``` + +This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) + +## Verifying the token + +The registry must now verify the token presented by the user by inspecting the +claim set within. The registry will: + +- Ensure that the issuer (`iss` claim) is an authority it trusts. +- Ensure that the registry identifies as the audience (`aud` claim). +- Check that the current time is between the `nbf` and `exp` claim times. +- If enforcing single-use tokens, check that the JWT ID (`jti` claim) value has + not been seen before. + - To enforce this, the registry may keep a record of `jti`s it has seen for + up to the `exp` time of the token to prevent token replays. +- Check the `access` claim value and use the identified resources and the list + of actions authorized to determine whether the token grants the required + level of access for the operation the client is attempting to perform. +- Verify that the signature of the token is valid. + +If any of these requirements are not met, the registry will return a +`403 Forbidden` response to indicate that the token is invalid. + +**Note**: it is only at this point in the workflow that an authorization error +may occur. The token server should *not* return errors when the user does not +have the requested authorization. Instead, the returned token should indicate +whatever of the requested scope the client does have (the intersection of +requested and granted access). If the token does not supply proper +authorization then the registry will return the appropriate error. + +At no point in this process should the registry need to call back to the +authorization server. The registry only needs to be supplied with the trusted +public keys to verify the token signatures. diff --git a/docs/spec/auth/oauth.md b/docs/spec/auth/oauth.md new file mode 100644 index 00000000..3d1ae0aa --- /dev/null +++ b/docs/spec/auth/oauth.md @@ -0,0 +1,191 @@ + + +# Docker Registry v2 authentication using OAuth2 + +This document describes support for the OAuth2 protocol within the authorization +server. [RFC6749](https://tools.ietf.org/html/rfc6749) should be used as a +reference for the protocol and HTTP endpoints described here. + +## Refresh token format + +The format of the refresh token is completely opaque to the client and should be +determined by the authorization server. The authorization should ensure the +token is sufficiently long and is responsible for storing any information about +long-lived tokens which may be needed for revoking. Any information stored +inside the token will not be extracted and presented by clients. + +## Getting a token + +POST /token + +#### Headers +Content-Type: application/x-www-form-urlencoded + +#### Post parameters + +
+
+ grant_type +
+
+ (REQUIRED) Type of grant used to get token. When getting a refresh token + using credentials this type should be set to "password" and have the + accompanying username and password paramters. Type "authorization_code" + is reserved for future use for authenticating to an authorization server + without having to send credentials directly from the client. When + requesting an access token with a refresh token this should be set to + "refresh_token". +
+
+ service +
+
+ (REQUIRED) The name of the service which hosts the resource to get + access for. Refresh tokens will only be good for getting tokens for + this service. +
+
+ client_id +
+
+ (REQUIRED) String identifying the client. This client_id does not need + to be registered with the authorization server but should be set to a + meaningful value in order to allow auditing keys created by unregistered + clients. Accepted syntax is defined in + [RFC6749 Appendix A.1](https://tools.ietf.org/html/rfc6749#appendix-A.1) +
+
+ access_type +
+
+ (OPTIONAL) Access which is being requested. If "offline" is provided + then a refresh token will be returned. The default is "online" only + returning short lived access token. If the grant type is "refresh_token" + this will only return the same refresh token and not a new one. +
+
+ scope +
+
+ (OPTIONAL) The resource in question, formatted as one of the space-delimited + entries from the scope parameters from the WWW-Authenticate header + shown above. This query parameter should only be specified once but may + contain multiple scopes using the scope list format defined in the scope + grammar. If multiple scope is provided from + WWW-Authenticate header the scopes should first be + converted to a scope list before requesting the token. The above example + would be specified as: scope=repository:samalba/my-app:push. + When requesting a refresh token the scopes may be empty since the + refresh token will not be limited by this scope, only the provided short + lived access token will have the scope limitation. +
+
+ refresh_token +
+
+ (OPTIONAL) The refresh token to use for authentication when grant type "refresh_token" is used. +
+
+ username +
+
+ (OPTIONAL) The username to use for authentication when grant type "password" is used. +
+
+ password +
+
+ (OPTIONAL) The password to use for authentication when grant type "password" is used. +
+
+ +#### Response fields + +
+
+ access_token +
+
+ (REQUIRED) An opaque Bearer token that clients should + supply to subsequent requests in the Authorization header. + This token should not be attempted to be parsed or understood by the + client but treated as opaque string. +
+
+ scope +
+
+ (REQUIRED) The scope granted inside the access token. This may be the + same scope as requested or a subset. This requirement is stronger than + specified in [RFC6749 Section 4.2.2](https://tools.ietf.org/html/rfc6749#section-4.2.2) + by strictly requiring the scope in the return value. +
+
+ expires_in +
+
+ (REQUIRED) The duration in seconds since the token was issued that it + will remain valid. When omitted, this defaults to 60 seconds. For + compatibility with older clients, a token should never be returned with + less than 60 seconds to live. +
+
+ issued_at +
+
+ (Optional) The RFC3339-serialized UTC + standard time at which a given token was issued. If issued_at is omitted, the + expiration is from when the token exchange completed. +
+
+ refresh_token +
+
+ (Optional) Token which can be used to get additional access tokens for + the same subject with different scopes. This token should be kept secure + by the client and only sent to the authorization server which issues + bearer tokens. This field will only be set when `access_type=offline` is + provided in the request. +
+
+ + +#### Example getting refresh token + +``` +POST /token HTTP/1.1 +Host: auth.docker.io +Content-Type: application/x-www-form-urlencoded + +grant_type=password&username=johndoe&password=A3ddj3w&service=hub.docker.io&client_id=dockerengine&access_type=offline + +HTTP/1.1 200 OK +Content-Type: application/json + +{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5","expires_in":900,"scope":""} +``` + +#### Example refreshing an Access Token + +``` +POST /token HTTP/1.1 +Host: auth.docker.io +Content-Type: application/x-www-form-urlencoded + +grant_type=refresh_token&refresh_token=kas9Da81Dfa8&service=registry-1.docker.io&client_id=dockerengine&scope=repository:samalba/my-app:pull,push + +HTTP/1.1 200 OK +Content-Type: application/json + +{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5":"expires_in":900,"scope":"repository:samalba/my-app:pull,repository:samalba/my-app:push"} +``` + diff --git a/docs/spec/auth/scope.md b/docs/spec/auth/scope.md new file mode 100644 index 00000000..a8f6c062 --- /dev/null +++ b/docs/spec/auth/scope.md @@ -0,0 +1,143 @@ + + +# Docker Registry Token Scope and Access + +Tokens used by the registry are always restricted what resources they may +be used to access, where those resources may be accessed, and what actions +may be done on those resources. Tokens always have the context of a user which +the token was originally created for. This document describes how these +restrictions are represented and enforced by the authorization server and +resource providers. + +## Scope Components + +### Subject (Authenticated User) + +The subject represents the user for which a token is valid. Any actions +performed using an access token should be considered on behalf of the subject. +This is included in the `sub` field of access token JWT. A refresh token should +be limited to a single subject and only be able to give out access tokens for +that subject. + +### Audience (Resource Provider) + +The audience represents a resource provider which is intended to be able to +perform the actions specified in the access token. Any resource provider which +does not match the audience should not use that access token. The audience is +included in the `aud` field of the access token JWT. A refresh token should be +limited to a single audience and only be able to give out access tokens for that +audience. + +### Resource Type + +The resource type represents the type of resource which the resource name is +intended to represent. This type may be specific to a resource provider but must +be understood by the authorization server in order to validate the subject +is authorized for a specific resource. + +#### Example Resource Types + + - `repository` - represents a single repository within a registry. A +repository may represent many manifest or content blobs, but the resource type +is considered the collections of those items. Actions which may be performed on +a `repository` are `pull` for accessing the collection and `push` for adding to +it. + +### Resource Name + +The resource name represent the name which identifies a resource for a resource +provider. A resource is identified by this name and the provided resource type. +An example of a resource name would be the name component of an image tag, such +as "samalba/myapp" or "hostname/samalba/myapp". + +### Resource Actions + +The resource actions define the actions which the access token allows to be +performed on the identified resource. These actions are type specific but will +normally have actions identifying read and write access on the resource. Example +for the `repository` type are `pull` for read access and `push` for write +access. + +## Authorization Server Use + +Each access token request may include a scope and an audience. The subject is +always derived from the passed in credentials or refresh token. When using +a refresh token the passed in audience must match the audience defined for +the refresh token. The audience (resource provider) is provided using the +`service` field. Multiple resource scopes may be provided using multiple `scope` +fields on the `GET` request. The `POST` request only takes in a single +`scope` field but may use a space to separate a list of multiple resource +scopes. + +### Resource Scope Grammar + +``` +scope := resourcescope [ ' ' resourcescope ]* +resourcescope := resourcetype ":" resourcename ":" action [ ',' action ]* +resourcetype := /[a-z]*/ +resourcename := [ hostname '/' ] component [ '/' component ]* +hostname := hostcomponent ['.' hostcomponent]* [':' port-number] +hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +port-number := /[0-9]+/ +action := /[a-z]*/ +component := alpha-numeric [ separator alpha-numeric ]* +alpha-numeric := /[a-z0-9]+/ +separator := /[_.]|__|[-]*/ +``` +Full reference grammar is defined +[here](https://godoc.org/github.com/docker/distribution/reference). Currently +the scope name grammar is a subset of the reference grammar. + +> **NOTE:** that the `resourcename` may contain one `:` due to a possible port +> number in the hostname component of the `resourcename`, so a naive +> implementation that interprets the first three `:`-delimited tokens of a +> `scope` to be the `resourcetype`, `resourcename`, and a list of `action` +> would be insufficient. + +## Resource Provider Use + +Once a resource provider has verified the authenticity of the scope through +JWT access token verification, the resource provider must ensure that scope +satisfies the request. The resource provider should match the given audience +according to name or URI the resource provider uses to identify itself. Any +denial based on subject is not defined here and is up to resource provider, the +subject is mainly provided for audit logs and any other user-specific rules +which may need to be provided but are not defined by the authorization server. + +The resource provider must ensure that ANY resource being accessed as the +result of a request has the appropriate access scope. Both the resource type +and resource name must match the accessed resource and an appropriate action +scope must be included. + +When appropriate authorization is not provided either due to lack of scope +or missing token, the resource provider to return a `WWW-AUTHENTICATE` HTTP +header with the `realm` as the authorization server, the `service` as the +expected audience identifying string, and a `scope` field for each required +resource scope to complete the request. + +## JWT Access Tokens + +Each JWT access token may only have a single subject and audience but multiple +resource scopes. The subject and audience are put into standard JWT fields +`sub` and `aud`. The resource scope is put into the `access` field. The +structure of the access field can be seen in the +[jwt documentation](jwt.md). + +## Refresh Tokens + +A refresh token must be defined for a single subject and audience. Further +restricting scope to specific type, name, and actions combinations should be +done by fetching an access token using the refresh token. Since the refresh +token is not scoped to specific resources for an audience, extra care should +be taken to only use the refresh token to negotiate new access tokens directly +with the authorization server, and never with a resource provider. + diff --git a/docs/spec/auth/token.md b/docs/spec/auth/token.md new file mode 100644 index 00000000..81af53b2 --- /dev/null +++ b/docs/spec/auth/token.md @@ -0,0 +1,255 @@ + + +# Docker Registry v2 authentication via central service + +This document outlines the v2 Docker registry authentication scheme: + +![v2 registry auth](../../images/v2-registry-auth.png) + +1. Attempt to begin a push/pull operation with the registry. +2. If the registry requires authorization it will return a `401 Unauthorized` + HTTP response with information on how to authenticate. +3. The registry client makes a request to the authorization service for a + Bearer token. +4. The authorization service returns an opaque Bearer token representing the + client's authorized access. +5. The client retries the original request with the Bearer token embedded in + the request's Authorization header. +6. The Registry authorizes the client by validating the Bearer token and the + claim set embedded within it and begins the push/pull session as usual. + +## Requirements + +- Registry clients which can understand and respond to token auth challenges + returned by the resource server. +- An authorization server capable of managing access controls to their + resources hosted by any given service (such as repositories in a Docker + Registry). +- A Docker Registry capable of trusting the authorization server to sign tokens + which clients can use for authorization and the ability to verify these + tokens for single use or for use during a sufficiently short period of time. + +## Authorization Server Endpoint Descriptions + +The described server is meant to serve as a standalone access control manager +for resources hosted by other services which wish to authenticate and manage +authorizations using a separate access control manager. + +A service like this is used by the official Docker Registry to authenticate +clients and verify their authorization to Docker image repositories. + +As of Docker 1.6, the registry client within the Docker Engine has been updated +to handle such an authorization workflow. + +## How to authenticate + +Registry V1 clients first contact the index to initiate a push or pull. Under +the Registry V2 workflow, clients should contact the registry first. If the +registry server requires authentication it will return a `401 Unauthorized` +response with a `WWW-Authenticate` header detailing how to authenticate to this +registry. + +For example, say I (username `jlhawn`) am attempting to push an image to the +repository `samalba/my-app`. For the registry to authorize this, I will need +`push` access to the `samalba/my-app` repository. The registry will first +return this response: + +``` +HTTP/1.1 401 Unauthorized +Content-Type: application/json; charset=utf-8 +Docker-Distribution-Api-Version: registry/2.0 +Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" +Date: Thu, 10 Sep 2015 19:32:31 GMT +Content-Length: 235 +Strict-Transport-Security: max-age=31536000 + +{"errors":[{"code":"UNAUTHORIZED","message":"access to the requested resource is not authorized","detail":[{"Type":"repository","Name":"samalba/my-app","Action":"pull"},{"Type":"repository","Name":"samalba/my-app","Action":"push"}]}]} +``` + +Note the HTTP Response Header indicating the auth challenge: + +``` +Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" +``` + +This format is documented in [Section 3 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-3) + +This challenge indicates that the registry requires a token issued by the +specified token server and that the request the client is attempting will +need to include sufficient access entries in its claim set. To respond to this +challenge, the client will need to make a `GET` request to the URL +`https://auth.docker.io/token` using the `service` and `scope` values from the +`WWW-Authenticate` header. + +## Requesting a Token + +Defines getting a bearer and refresh token using the token endpoint. + +#### Query Parameters + +
+
+ service +
+
+ The name of the service which hosts the resource. +
+
+ offline_token +
+
+ Whether to return a refresh token along with the bearer token. A refresh + token is capable of getting additional bearer tokens for the same + subject with different scopes. The refresh token does not have an + expiration and should be considered completely opaque to the client. +
+
+ client_id +
+
+ String identifying the client. This client_id does not need + to be registered with the authorization server but should be set to a + meaningful value in order to allow auditing keys created by unregistered + clients. Accepted syntax is defined in + [RFC6749 Appendix A.1](https://tools.ietf.org/html/rfc6749#appendix-A.1). +
+
+ scope +
+
+ The resource in question, formatted as one of the space-delimited + entries from the scope parameters from the WWW-Authenticate header + shown above. This query parameter should be specified multiple times if + there is more than one scope entry from the WWW-Authenticate + header. The above example would be specified as: + scope=repository:samalba/my-app:push. The scope field may + be empty to request a refresh token without providing any resource + permissions to the returned bearer token. +
+
+ + +#### Token Response Fields + +
+
+ token +
+
+ An opaque Bearer token that clients should supply to subsequent + requests in the Authorization header. +
+
+ access_token +
+
+ For compatibility with OAuth 2.0, we will also accept token under the name + access_token. At least one of these fields must be specified, but + both may also appear (for compatibility with older clients). When both are specified, + they should be equivalent; if they differ the client's choice is undefined. +
+
+ expires_in +
+
+ (Optional) The duration in seconds since the token was issued that it + will remain valid. When omitted, this defaults to 60 seconds. For + compatibility with older clients, a token should never be returned with + less than 60 seconds to live. +
+
+ issued_at +
+
+ (Optional) The RFC3339-serialized UTC + standard time at which a given token was issued. If issued_at is omitted, the + expiration is from when the token exchange completed. +
+
+ refresh_token +
+
+ (Optional) Token which can be used to get additional access tokens for + the same subject with different scopes. This token should be kept secure + by the client and only sent to the authorization server which issues + bearer tokens. This field will only be set when `offline_token=true` is + provided in the request. +
+
+ +#### Example + +For this example, the client makes an HTTP GET request to the following URL: + +``` +https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push +``` + +The token server should first attempt to authenticate the client using any +authentication credentials provided with the request. From Docker 1.11 the +Docker engine supports both Basic Authentication and [OAuth2](oauth.md) for +getting tokens. Docker 1.10 and before, the registry client in the Docker Engine +only supports Basic Authentication. If an attempt to authenticate to the token +server fails, the token server should return a `401 Unauthorized` response +indicating that the provided credentials are invalid. + +Whether the token server requires authentication is up to the policy of that +access control provider. Some requests may require authentication to determine +access (such as pushing or pulling a private repository) while others may not +(such as pulling from a public repository). + +After authenticating the client (which may simply be an anonymous client if +no attempt was made to authenticate), the token server must next query its +access control list to determine whether the client has the requested scope. In +this example request, if I have authenticated as user `jlhawn`, the token +server will determine what access I have to the repository `samalba/my-app` +hosted by the entity `registry.docker.io`. + +Once the token server has determined what access the client has to the +resources requested in the `scope` parameter, it will take the intersection of +the set of requested actions on each resource and the set of actions that the +client has in fact been granted. If the client only has a subset of the +requested access **it must not be considered an error** as it is not the +responsibility of the token server to indicate authorization errors as part of +this workflow. + +Continuing with the example request, the token server will find that the +client's set of granted access to the repository is `[pull, push]` which when +intersected with the requested access `[pull, push]` yields an equal set. If +the granted access set was found only to be `[pull]` then the intersected set +would only be `[pull]`. If the client has no access to the repository then the +intersected set would be empty, `[]`. + +It is this intersected set of access which is placed in the returned token. + +The server then constructs an implementation-specific token with this +intersected set of access, and returns it to the Docker client to use to +authenticate to the audience service (within the indicated window of time): + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w", "expires_in": 3600,"issued_at": "2009-11-10T23:00:00Z"} +``` + + +## Using the Bearer token + +Once the client has a token, it will try the registry request again with the +token placed in the HTTP `Authorization` header like so: + +``` +Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw +``` + +This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) diff --git a/docs/spec/implementations.md b/docs/spec/implementations.md new file mode 100644 index 00000000..ec937b64 --- /dev/null +++ b/docs/spec/implementations.md @@ -0,0 +1,32 @@ + + +# Distribution API Implementations + +This is a list of known implementations of the Distribution API spec. + +## [Docker Distribution Registry](https://github.com/docker/distribution) + +Docker distribution is the reference implementation of the distribution API +specification. It aims to fully implement the entire specification. + +### Releases +#### 2.0.1 (_in development_) +Implements API 2.0.1 + +_Known Issues_ + - No resumable push support + - Content ranges ignored + - Blob upload status will always return a starting range of 0 + +#### 2.0.0 +Implements API 2.0.0 + +_Known Issues_ + - No resumable push support + - No PATCH implementation for blob upload + - Content ranges ignored + diff --git a/docs/spec/index.md b/docs/spec/index.md new file mode 100644 index 00000000..474bd455 --- /dev/null +++ b/docs/spec/index.md @@ -0,0 +1,17 @@ + + +# Docker Registry Reference + +* [HTTP API V2](api.md) +* [Storage Driver](../storage-drivers/index.md) +* [Token Authentication Specification](auth/token.md) +* [Token Authentication Implementation](auth/jwt.md) diff --git a/docs/spec/json.md b/docs/spec/json.md new file mode 100644 index 00000000..a8916dcc --- /dev/null +++ b/docs/spec/json.md @@ -0,0 +1,94 @@ + + + + +# Docker Distribution JSON Canonicalization + +To provide consistent content hashing of JSON objects throughout Docker +Distribution APIs, the specification defines a canonical JSON format. Adopting +such a canonicalization also aids in caching JSON responses. + +Note that protocols should not be designed to depend on identical JSON being +generated across different versions or clients. The canonicalization rules are +merely useful for caching and consistency. + +## Rules + +Compliant JSON should conform to the following rules: + +1. All generated JSON should comply with [RFC + 7159](http://www.ietf.org/rfc/rfc7159.txt). +2. Resulting "JSON text" shall always be encoded in UTF-8. +3. Unless a canonical key order is defined for a particular schema, object + keys shall always appear in lexically sorted order. +4. All whitespace between tokens should be removed. +5. No "trailing commas" are allowed in object or array definitions. +6. The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e". + Ampersand "&" is escaped to "\u0026". + +## Examples + +The following is a simple example of a canonicalized JSON string: + +```json +{"asdf":1,"qwer":[],"zxcv":[{},true,1000000000,"tyui"]} +``` + +## Reference + +### Other Canonicalizations + +The OLPC project specifies [Canonical +JSON](http://wiki.laptop.org/go/Canonical_JSON). While this is used in +[TUF](http://theupdateframework.com/), which may be used with other +distribution-related protocols, this alternative format has been proposed in +case the original source changes. Specifications complying with either this +specification or an alternative should explicitly call out the +canonicalization format. Except for key ordering, this specification is mostly +compatible. + +### Go + +In Go, the [`encoding/json`](http://golang.org/pkg/encoding/json/) library +will emit canonical JSON by default. Simply using `json.Marshal` will suffice +in most cases: + +```go +incoming := map[string]interface{}{ + "asdf": 1, + "qwer": []interface{}{}, + "zxcv": []interface{}{ + map[string]interface{}{}, + true, + int(1e9), + "tyui", + }, +} + +canonical, err := json.Marshal(incoming) +if err != nil { + // ... handle error +} +``` + +To apply canonical JSON format spacing to an existing serialized JSON buffer, one +can use +[`json.Indent`](http://golang.org/src/encoding/json/indent.go?s=1918:1989#L65) +with the following arguments: + +```go +incoming := getBytes() +var canonical bytes.Buffer +if err := json.Indent(&canonical, incoming, "", ""); err != nil { + // ... handle error +} +``` diff --git a/docs/spec/manifest-v2-1.md b/docs/spec/manifest-v2-1.md new file mode 100644 index 00000000..056f4bc6 --- /dev/null +++ b/docs/spec/manifest-v2-1.md @@ -0,0 +1,167 @@ + + +# Image Manifest Version 2, Schema 1 + +This document outlines the format of of the V2 image manifest. The image +manifest described herein was introduced in the Docker daemon in the [v1.3.0 +release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453). +It is a provisional manifest to provide a compatibility with the [V1 Image +format](https://github.com/docker/docker/blob/master/image/spec/v1.md), as the +requirements are defined for the [V2 Schema 2 +image](https://github.com/docker/distribution/pull/62). + + +Image manifests describe the various constituents of a docker image. Image +manifests can be serialized to JSON format with the following media types: + +Manifest Type | Media Type +------------- | ------------- +manifest | "application/vnd.docker.distribution.manifest.v1+json" +signed manifest | "application/vnd.docker.distribution.manifest.v1+prettyjws" + +*Note that "application/json" will also be accepted for schema 1.* + +References: + + - [Proposal: JSON Registry API V2.1](https://github.com/docker/docker/issues/9015) + - [Proposal: Provenance step 1 - Transform images for validation and verification](https://github.com/docker/docker/issues/8093) + +## *Manifest* Field Descriptions + +Manifest provides the base accessible fields for working with V2 image format + in the registry. + +- **`name`** *string* + + name is the name of the image's repository + +- **`tag`** *string* + + tag is the tag of the image + +- **`architecture`** *string* + + architecture is the host architecture on which this image is intended to + run. This is for information purposes and not currently used by the engine + +- **`fsLayers`** *array* + + fsLayers is a list of filesystem layer blob sums contained in this image. + + An fsLayer is a struct consisting of the following fields + - **`blobSum`** *digest.Digest* + + blobSum is the digest of the referenced filesystem image layer. A + digest must be a sha256 hash. + + +- **`history`** *array* + + history is a list of unstructured historical data for v1 compatibility. It + contains ID of the image layer and ID of the layer's parent layers. + + history is a struct consisting of the following fields + - **`v1Compatibility`** string + + V1Compatibility is the raw V1 compatibility information. This will + contain the JSON object describing the V1 of this image. + +- **`schemaVersion`** *int* + + SchemaVersion is the image manifest schema that this image follows. + +>**Note**:the length of `history` must be equal to the length of `fsLayers` and +>entries in each are correlated by index. + +## Signed Manifests + +Signed manifests provides an envelope for a signed image manifest. A signed +manifest consists of an image manifest along with an additional field +containing the signature of the manifest. + +The docker client can verify signed manifests and displays a message to the user. + +### Signing Manifests + +Image manifests can be signed in two different ways: with a *libtrust* private + key or an x509 certificate chain. When signing with an x509 certificate chain, + the public key of the first element in the chain must be the public key + corresponding with the sign key. + +### Signed Manifest Field Description + +Signed manifests include an image manifest and a list of signatures generated +by *libtrust*. A signature consists of the following fields: + + +- **`header`** *[JOSE](http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2)* + + A [JSON Web Signature](http://self-issued.info/docs/draft-ietf-jose-json-web-signature.html) + +- **`signature`** *string* + + A signature for the image manifest, signed by a *libtrust* private key + +- **`protected`** *string* + + The signed protected header + +## Example Manifest + +*Example showing the official 'hello-world' image manifest.* + +``` +{ + "name": "hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + }, + { + "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + }, + { + "blobSum": "sha256:cc8567d70002e957612902a8e985ea129d831ebe04057d88fb644857caa45d11" + }, + { + "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + ], + "schemaVersion": 1, + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OD6I:6DRK:JXEJ:KBM4:255X:NSAA:MUSF:E4VM:ZI6W:CUN2:L4Z6:LSF4", + "kty": "EC", + "x": "3gAwX48IQ5oaYQAYSxor6rYYc_6yjuLCjtQ9LUakg4A", + "y": "t72ge6kIA1XOjqjVoEOiPPAURltJFBMGDSQvEGVB010" + }, + "alg": "ES256" + }, + "signature": "XREm0L8WNn27Ga_iE_vRnTxVMhhYY0Zst_FfkKopg6gWSoTOZTuW4rK0fg_IqnKkEKlbD83tD46LKEGi5aIVFg", + "protected": "eyJmb3JtYXRMZW5ndGgiOjY2MjgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wNC0wOFQxODo1Mjo1OVoifQ" + } + ] +} + +``` diff --git a/docs/spec/manifest-v2-2.md b/docs/spec/manifest-v2-2.md new file mode 100644 index 00000000..fc705639 --- /dev/null +++ b/docs/spec/manifest-v2-2.md @@ -0,0 +1,296 @@ + + +# Image Manifest Version 2, Schema 2 + +This document outlines the format of of the V2 image manifest, schema version 2. +The original (and provisional) image manifest for V2 (schema 1), was introduced +in the Docker daemon in the [v1.3.0 +release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453) +and is specified in the [schema 1 manifest definition](manifest-v2-1.md) + +This second schema version has two primary goals. The first is to allow +multi-architecture images, through a "fat manifest" which references image +manifests for platform-specific versions of an image. The second is to +move the Docker engine towards content-addressable images, by supporting +an image model where the image's configuration can be hashed to generate +an ID for the image. + +# Media Types + +The following media types are used by the manifest formats described here, and +the resources they reference: + +- `application/vnd.docker.distribution.manifest.v1+json`: schema1 (existing manifest format) +- `application/vnd.docker.distribution.manifest.v2+json`: New image manifest format (schemaVersion = 2) +- `application/vnd.docker.distribution.manifest.list.v2+json`: Manifest list, aka "fat manifest" +- `application/vnd.docker.image.rootfs.diff.tar.gzip`: "Layer", as a gzipped tar +- `application/vnd.docker.container.image.v1+json`: Container config JSON + +## Manifest List + +The manifest list is the "fat manifest" which points to specific image manifests +for one or more platforms. Its use is optional, and relatively few images will +use one of these manifests. A client will distinguish a manifest list from an +image manifest based on the Content-Type returned in the HTTP response. + +## *Manifest List* Field Descriptions + +- **`schemaVersion`** *int* + + This field specifies the image manifest schema version as an integer. This + schema uses the version `2`. + +- **`mediaType`** *string* + + The MIME type of the manifest list. This should be set to + `application/vnd.docker.distribution.manifest.list.v2+json`. + +- **`manifests`** *array* + + The manifests field contains a list of manifests for specific platforms. + + Fields of an object in the manifests list are: + + - **`mediaType`** *string* + + The MIME type of the referenced object. This will generally be + `application/vnd.docker.image.manifest.v2+json`, but it could also + be `application/vnd.docker.image.manifest.v1+json` if the manifest + list references a legacy schema-1 manifest. + + - **`size`** *int* + + The size in bytes of the object. This field exists so that a client + will have an expected size for the content before validating. If the + length of the retrieved content does not match the specified length, + the content should not be trusted. + + - **`digest`** *string* + + The digest of the content, as defined by the + [Registry V2 HTTP API Specificiation](api.md#digest-parameter). + + - **`platform`** *object* + + The platform object describes the platform which the image in the + manifest runs on. A full list of valid operating system and architecture + values are listed in the [Go language documentation for `$GOOS` and + `$GOARCH`](https://golang.org/doc/install/source#environment) + + - **`architecture`** *string* + + The architecture field specifies the CPU architecture, for example + `amd64` or `ppc64le`. + + - **`os`** *string* + + The os field specifies the operating system, for example + `linux` or `windows`. + + - **`os.version`** *string* + + The optional os.version field specifies the operating system version, + for example `10.0.10586`. + + - **`os.features`** *array* + + The optional os.features field specifies an array of strings, + each listing a required OS feature (for example on Windows + `win32k`). + + - **`variant`** *string* + + The optional variant field specifies a variant of the CPU, for + example `armv6l` to specify a particular CPU variant of the ARM CPU. + + - **`features`** *array* + + The optional features field specifies an array of strings, each + listing a required CPU feature (for example `sse4` or `aes`). + +## Example Manifest List + +*Example showing a simple manifest list pointing to image manifests for two platforms:* +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "manifests": [ + { + "mediaType": "application/vnd.docker.image.manifest.v2+json", + "size": 7143, + "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + "platform": { + "architecture": "ppc64le", + "os": "linux", + } + }, + { + "mediaType": "application/vnd.docker.image.manifest.v2+json", + "size": 7682, + "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270", + "platform": { + "architecture": "amd64", + "os": "linux", + "features": [ + "sse4" + ] + } + } + ] +} +``` + +# Image Manifest + +The image manifest provides a configuration and a set of layers for a container +image. It's the direct replacement for the schema-1 manifest. + +## *Image Manifest* Field Descriptions + +- **`schemaVersion`** *int* + + This field specifies the image manifest schema version as an integer. This + schema uses version `2`. + +- **`mediaType`** *string* + + The MIME type of the manifest. This should be set to + `application/vnd.docker.distribution.manifest.v2+json`. + +- **`config`** *object* + + The config field references a configuration object for a container, by + digest. This configuration item is a JSON blob that the runtime uses + to set up the container. This new schema uses a tweaked version + of this configuration to allow image content-addressability on the + daemon side. + + Fields of a config object are: + + - **`mediaType`** *string* + + The MIME type of the referenced object. This should generally be + `application/vnd.docker.container.image.v1+json`. + + - **`size`** *int* + + The size in bytes of the object. This field exists so that a client + will have an expected size for the content before validating. If the + length of the retrieved content does not match the specified length, + the content should not be trusted. + + - **`digest`** *string* + + The digest of the content, as defined by the + [Registry V2 HTTP API Specificiation](api.md#digest-parameter). + +- **`layers`** *array* + + The layer list is ordered starting from the base image (opposite order of schema1). + + Fields of an item in the layers list are: + + - **`mediaType`** *string* + + The MIME type of the referenced object. This should + generally be `application/vnd.docker.image.rootfs.diff.tar.gzip`. + + - **`size`** *int* + + The size in bytes of the object. This field exists so that a client + will have an expected size for the content before validating. If the + length of the retrieved content does not match the specified length, + the content should not be trusted. + + - **`digest`** *string* + + The digest of the content, as defined by the + [Registry V2 HTTP API Specificiation](api.md#digest-parameter). + + - **`urls`** *array* + + For an ordinary layer, this is empty, and the layer contents can be + retrieved directly from the registry. For a layer with *`mediatype`* of + `application/vnd.docker.image.rootfs.foreign.diff.tar.gzip`, this + contains a non-empty list of URLs from which this object can be + downloaded. + +## Example Image Manifest + +*Example showing an image manifest:* +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 7023, + "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 32654, + "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 16724, + "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 73109, + "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736" + } + ], +} +``` + +# Backward compatibility + +The registry will continue to accept uploads of manifests in both the old and +new formats. + +When pushing images, clients which support the new manifest format should first +construct a manifest in the new format. If uploading this manifest fails, +presumably because the registry only supports the old format, the client may +fall back to uploading a manifest in the old format. + +When pulling images, clients indicate support for this new version of the +manifest format by sending the +`application/vnd.docker.distribution.manifest.v2+json` and +`application/vnd.docker.distribution.manifest.list.v2+json` media types in an +`Accept` header when making a request to the `manifests` endpoint. Updated +clients should check the `Content-Type` header to see whether the manifest +returned from the endpoint is in the old format, or is an image manifest or +manifest list in the new format. + +If the manifest being requested uses the new format, and the appropriate media +type is not present in an `Accept` header, the registry will assume that the +client cannot handle the manifest as-is, and rewrite it on the fly into the old +format. If the object that would otherwise be returned is a manifest list, the +registry will look up the appropriate manifest for the amd64 platform and +linux OS, rewrite that manifest into the old format if necessary, and return +the result to the client. If no suitable manifest is found in the manifest +list, the registry will return a 404 error. + +One of the challenges in rewriting manifests to the old format is that the old +format involves an image configuration for each layer in the manifest, but the +new format only provides one image configuration. To work around this, the +registry will create synthetic image configurations for all layers except the +top layer. These image configurations will not result in runnable images on +their own, but only serve to fill in the parent chain in a compatible way. +The IDs in these synthetic configurations will be derived from hashes of their +respective blobs. The registry will create these configurations and their IDs +using the same scheme as Docker 1.10 when it creates a legacy manifest to push +to a registry which doesn't support the new format. diff --git a/docs/spec/menu.md b/docs/spec/menu.md new file mode 100644 index 00000000..ebc52327 --- /dev/null +++ b/docs/spec/menu.md @@ -0,0 +1,13 @@ + + diff --git a/docs/storage-drivers/azure.md b/docs/storage-drivers/azure.md new file mode 100644 index 00000000..a84888de --- /dev/null +++ b/docs/storage-drivers/azure.md @@ -0,0 +1,78 @@ + + + +# Microsoft Azure storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/) for object storage. + +## Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ accountname + + yes + + Name of the Azure Storage Account. +
+ accountkey + + yes + + Primary or Secondary Key for the Storage Account. +
+ container + + yes + + Name of the Azure root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api]. +
+ realm + + no + + Domain name suffix for the Storage Service API endpoint. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`. By default, this + is core.windows.net. +
+ + +## Related Information + +* To get information about +[azure-blob-storage](http://azure.microsoft.com/en-us/services/storage/) visit +the Microsoft website. +* You can use Microsoft's [Blob Service REST API](https://msdn.microsoft.com/en-us/library/azure/dd135733.aspx) to [create a container] (https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx). diff --git a/docs/storage-drivers/filesystem.md b/docs/storage-drivers/filesystem.md new file mode 100644 index 00000000..8e269cdb --- /dev/null +++ b/docs/storage-drivers/filesystem.md @@ -0,0 +1,24 @@ + + + +# Filesystem storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses the local filesystem. + +## Parameters + +`rootdirectory`: (optional) The absolute path to a root directory tree in which +to store all registry files. The registry stores all its data here so make sure +there is adequate space available. Defaults to `/var/lib/registry`. +`maxthreads`: (optional) The maximum number of simultaneous blocking filesystem +operations permitted within the registry. Each operation spawns a new thread and +may cause thread exhaustion issues if many are done in parallel. Defaults to +`100`, and can be no lower than `25`. diff --git a/docs/storage-drivers/gcs.md b/docs/storage-drivers/gcs.md new file mode 100644 index 00000000..1bc67f9e --- /dev/null +++ b/docs/storage-drivers/gcs.md @@ -0,0 +1,78 @@ + + + +# Google Cloud Storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses Google Cloud for object storage. + +## Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ bucket + + yes + + Storage bucket name. +
+ keyfile + + no + + A private service account key file in JSON format. Instead of a key file Google Application Default Credentials can be used. +
+ rootdirectory + + no + + This is a prefix that will be applied to all Google Cloud Storage keys to allow you to segment data in your bucket if necessary. +
+ chunksize + + no (default 5242880) + + This is the chunk size used for uploading large blobs, must be a multiple of 256*1024. +
+ + +`bucket`: The name of your Google Cloud Storage bucket where you wish to store objects (needs to already be created prior to driver initialization). + +`keyfile`: (optional) A private key file in JSON format, used for [Service Account Authentication](https://cloud.google.com/storage/docs/authentication#service_accounts). + +**Note** Instead of a key file you can use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials). + +`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/docs/storage-drivers/index.md b/docs/storage-drivers/index.md new file mode 100644 index 00000000..89635bd3 --- /dev/null +++ b/docs/storage-drivers/index.md @@ -0,0 +1,66 @@ + + + +# Docker Registry Storage Driver + +This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers. + +## Provided Drivers + +This storage driver package comes bundled with several drivers: + +- [inmemory](inmemory.md): A temporary storage driver using a local inmemory map. This exists solely for reference and testing. +- [filesystem](filesystem.md): A local storage driver configured to use a directory tree in the local filesystem. +- [s3](s3.md): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket. +- [azure](azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/). +- [swift](swift.md): A driver storing objects in [Openstack Swift](http://docs.openstack.org/developer/swift/). +- [oss](oss.md): A driver storing objects in [Aliyun OSS](http://www.aliyun.com/product/oss). +- [gcs](gcs.md): A driver storing objects in a [Google Cloud Storage](https://cloud.google.com/storage/) bucket. + +## Storage Driver API + +The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems. + +Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key. + +Storage drivers are intended to be written in Go, providing compile-time +validation of the `storagedriver.StorageDriver` interface. + +## Driver Selection and Configuration + +The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package. + +Storage driver factories may be registered by name using the +`factory.Register` method, and then later invoked by calling `factory.Create` +with a driver name and parameters map. If no such storage driver can be found, +`factory.Create` will return an `InvalidStorageDriverError`. + +## Driver Contribution + +### Writing new storage drivers + +To create a valid storage driver, one must implement the +`storagedriver.StorageDriver` interface and make sure to expose this driver +via the factory system. + +#### Registering + +Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase. + +## Testing + +Storage driver test suites are provided in +`storagedriver/testsuites/testsuites.go` and may be used for any storage +driver written in Go. Tests can be registered using the `RegisterSuite` +function, which run the same set of tests for any registered drivers. diff --git a/docs/storage-drivers/inmemory.md b/docs/storage-drivers/inmemory.md new file mode 100644 index 00000000..1a14e77a --- /dev/null +++ b/docs/storage-drivers/inmemory.md @@ -0,0 +1,23 @@ + + + +# In-memory storage driver (Testing Only) + +For purely tests purposes, you can use the `inmemory` storage driver. This +driver is an implementation of the `storagedriver.StorageDriver` interface which +uses local memory for object storage. If you would like to run a registry from +volatile memory, use the [`filesystem` driver](filesystem.md) on a ramdisk. + +**IMPORTANT**: This storage driver *does not* persist data across runs. This is why it is only suitable for testing. *Never* use this driver in production. + +## Parameters + +None diff --git a/docs/storage-drivers/menu.md b/docs/storage-drivers/menu.md new file mode 100644 index 00000000..3638649f --- /dev/null +++ b/docs/storage-drivers/menu.md @@ -0,0 +1,13 @@ + + diff --git a/docs/storage-drivers/oss.md b/docs/storage-drivers/oss.md new file mode 100644 index 00000000..a85e315e --- /dev/null +++ b/docs/storage-drivers/oss.md @@ -0,0 +1,126 @@ + + +# Aliyun OSS storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses [Aliyun OSS](http://www.aliyun.com/product/oss) for object storage. + +## Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ accesskeyid + +yes + +Your access key ID. +
+ accesskeysecret + +yes + +Your access key secret. +
+ region + +yes + The name of the OSS region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at +
+ endpoint + +no + +An endpoint which defaults to `..aliyuncs.com` or `.-internal.aliyuncs.com` (when `internal=true`). You can change the default endpoint by changing this value. +
+ internal + +no + An internal endpoint or the public endpoint for OSS access. The default is false. For a list of regions, you can look at +
+ bucket + +yes + The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization). +
+ encrypt + +no + Specifies whether you would like your data encrypted on the server side. Defaults to false if not specified. +
+ secure + +no + Specifies whether to transfer data to the bucket over ssl or not. If you omit this value, `true` is used. +
+ chunksize + +no + The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS. +
+ rootdirectory + +no + The root directory tree in which to store all registry files. Defaults to an empty string (bucket root). +
diff --git a/docs/storage-drivers/s3.md b/docs/storage-drivers/s3.md new file mode 100644 index 00000000..97cfbfc1 --- /dev/null +++ b/docs/storage-drivers/s3.md @@ -0,0 +1,268 @@ + + + +# S3 storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses Amazon S3 or S3 compatible services for object storage. + +## Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ accesskey + + yes + + Your AWS Access Key. +
+ secretkey + + yes + + Your AWS Secret Key. +
+ region + + yes + + The AWS region in which your bucket exists. For the moment, the Go AWS + library in use does not use the newer DNS based bucket routing. +
+ regionendpoint + + no + + Endpoint for S3 compatible storage services (Minio, etc) +
+ bucket + + yes + + The bucket name in which you want to store the registry's data. +
+ encrypt + + no + + Specifies whether the registry stores the image in encrypted format or + not. A boolean value. The default is false. +
+ keyid + + no + + Optional KMS key ID to use for encryption (encrypt must be true, or this + parameter will be ignored). The default is none. +
+ secure + + no + + Indicates whether to use HTTPS instead of HTTP. A boolean value. The + default is true. +
+ v4auth + + no + + Indicates whether the registry uses Version 4 of AWS's authentication. + Generally, you should set this to true. By default, this is + false. +
+ chunksize + + no + + The S3 API requires multipart upload chunks to be at least 5MB. This value + should be a number that is larger than 5*1024*1024. +
+ rootdirectory + + no + + This is a prefix that will be applied to all S3 keys to allow you to segment data in your bucket if necessary. +
+ storageclass + + no + + The S3 storage class applied to each registry file. The default value is STANDARD. +
+ + +`accesskey`: Your aws access key. + +`secretkey`: Your aws secret key. + +**Note** You can provide empty strings for your access and secret keys if you plan on running the driver on an ec2 instance and will handle authentication with the instance's credentials. + +`region`: The name of the aws region in which you would like to store objects (for example `us-east-1`). For a list of regions, you can look at http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html + +`regionendpoint`: (optional) Endpoint URL for S3 compatible APIs. This should not be provided when using Amazon S3. + +`bucket`: The name of your S3 bucket where you wish to store objects. The bucket must exist prior to the driver initialization. + +`encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). + +`keyid`: (optional) Whether you would like your data encrypted with this KMS key ID (defaults to none if not specified, will be ignored if encrypt is not true). + +`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transferring over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. + +`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to false if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false) + +`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to S3. The default is 10 MB. Keep in mind that the minimum part size for S3 is 5MB. Depending on the speed of your connection to S3, a larger chunk size may result in better performance; faster connections will benefit from larger chunk sizes. + +`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). + +`storageclass`: (optional) The storage class applied to each registry file. Defaults to STANDARD. Valid options are STANDARD and REDUCED_REDUNDANCY. + +## S3 permission scopes + +The following IAM permissions are required by the registry for push and pull. See [the S3 policy documentation](http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) for more details. + +``` + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:GetBucketLocation", + "s3:ListBucketMultipartUploads" + ], + "Resource": "arn:aws:s3:::mybucket" + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:ListMultipartUploadParts", + "s3:AbortMultipartUpload" + ], + "Resource": "arn:aws:s3:::mybucket/*" + } +] +``` + +# CloudFront as Middleware with S3 backend + +## Use Case + +Adding CloudFront as a middleware for your S3 backed registry can dramatically improve pull times. Your registry will have the ability to retrieve your images from edge servers, rather than the geographically limited location of your S3 bucket. The farther your registry is from your bucket, the more improvements you will see. See [Amazon CloudFront](https://aws.amazon.com/cloudfront/details/). + +## Configuring CloudFront for Distribution + +If you are unfamiliar with creating a CloudFront distribution, see [Getting Started with Cloudfront](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/GettingStarted.html). + +Defaults can be kept in most areas except: + +### Origin: + +The CloudFront distribution must be created such that the `Origin Path` is set to the directory level of the root "docker" key in S3. If your registry exists on the root of the bucket, this path should be left blank. + +### Behaviors: + + - Viewer Protocol Policy: HTTPS Only + - Allowed HTTP Methods: GET, HEAD, OPTIONS, PUT, POST, PATCH, DELETE + - Cached HTTP Methods: OPTIONS (checked) + - Restrict Viewer Access (Use Signed URLs or Signed Cookies): Yes + - Trusted Signers: Self (Can add other accounts as long as you have access to CloudFront Key Pairs for those additional accounts) + +## Registry configuration + +Here the `middleware` option is used. It is still important to keep the `storage` option as CloudFront will only handle `pull` actions; `push` actions are still directly written to S3. + +The following example shows what you will need at minimum: +``` +... +storage: + s3: + region: us-east-1 + bucket: docker.myregistry.com +middleware: + storage: + - name: cloudfront + options: + baseurl: https://abcdefghijklmn.cloudfront.net/ + privatekey: /etc/docker/cloudfront/pk-ABCEDFGHIJKLMNOPQRST.pem + keypairid: ABCEDFGHIJKLMNOPQRST +... +``` + +## CloudFront Key-Pair + +A CloudFront key-pair is required for all AWS accounts needing access to your CloudFront distribution. For information, please see [Creating CloudFront Key Pairs](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-trusted-signers.html#private-content-creating-cloudfront-key-pairs). diff --git a/docs/storage-drivers/swift.md b/docs/storage-drivers/swift.md new file mode 100644 index 00000000..b1a0c932 --- /dev/null +++ b/docs/storage-drivers/swift.md @@ -0,0 +1,246 @@ + + + +# OpenStack Swift storage driver + +An implementation of the `storagedriver.StorageDriver` interface that uses [OpenStack Swift](http://docs.openstack.org/developer/swift/) for object storage. + +## Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ authurl + + yes + + URL for obtaining an auth token. https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth +
+ username + + yes + + Your Openstack user name. +
+ password + + yes + + Your Openstack password. +
+ region + + no + + The Openstack region in which your container exists. +
+ container + + yes + + The name of your Swift container where you wish to store the registry's data. The driver creates the named container during its initialization. +
+ tenant + + no + + Your Openstack tenant name. You can either use tenant or tenantid. +
+ tenantid + + no + + Your Openstack tenant id. You can either use tenant or tenantid. +
+ domain + + no + + Your Openstack domain name for Identity v3 API. You can either use domain or domainid. +
+ domainid + + no + + Your Openstack domain id for Identity v3 API. You can either use domain or domainid. +
+ trustid + + no + + Your Openstack trust id for Identity v3 API. +
+ insecureskipverify + + no + + true to skip TLS verification, false by default. +
+ chunksize + + no + + Size of the data segments for the Swift Dynamic Large Objects. This value should be a number (defaults to 5M). +
+ prefix + + no + + This is a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. Defaults to the empty string which is the container's root. +
+ secretkey + + no + + The secret key used to generate temporary URLs. +
+ accesskey + + no + + The access key to generate temporary URLs. It is used by HP Cloud Object Storage in addition to the `secretkey` parameter. +
+ authversion + + no + + Specify the OpenStack Auth's version,for example 3. By default the driver will autodetect the auth's version from the AuthURL. +
+ endpointtype + + no + + The endpoint type used when connecting to swift. Possible values are `public`, `internal` and `admin`. Default is `public`. +
+ +The features supported by the Swift server are queried by requesting the `/info` URL on the server. In case the administrator +disabled that feature, the configuration file can specify the following optional parameters : + + + + + + + + + + +
+ tempurlcontainerkey + +

+ Specify whether to use container secret key to generate temporary URL when set to true, or the account secret key otherwise.

+

+
+ tempurlmethods + +

+ Array of HTTP methods that are supported by the TempURL middleware of the Swift server. Example:

+ + - tempurlmethods: + - GET + - PUT + - HEAD + - POST + - DELETE + +

+
diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go deleted file mode 100644 index 767526bb..00000000 --- a/docs/storage/blob_test.go +++ /dev/null @@ -1,614 +0,0 @@ -package storage - -import ( - "bytes" - "crypto/sha256" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "reflect" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/docker/distribution/registry/storage/driver/testdriver" - "github.com/docker/distribution/testutil" -) - -// TestWriteSeek tests that the current file size can be -// obtained using Seek -func TestWriteSeek(t *testing.T) { - ctx := context.Background() - imageName, _ := reference.ParseNamed("foo/bar") - driver := testdriver.New() - registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - repository, err := registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - bs := repository.Blobs(ctx) - - blobUpload, err := bs.Create(ctx) - - if err != nil { - t.Fatalf("unexpected error starting layer upload: %s", err) - } - contents := []byte{1, 2, 3} - blobUpload.Write(contents) - blobUpload.Close() - offset := blobUpload.Size() - if offset != int64(len(contents)) { - t.Fatalf("unexpected value for blobUpload offset: %v != %v", offset, len(contents)) - } - -} - -// TestSimpleBlobUpload covers the blob upload process, exercising common -// error paths that might be seen during an upload. -func TestSimpleBlobUpload(t *testing.T) { - randomDataReader, dgst, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating random reader: %v", err) - } - - ctx := context.Background() - imageName, _ := reference.ParseNamed("foo/bar") - driver := testdriver.New() - registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - repository, err := registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - bs := repository.Blobs(ctx) - - h := sha256.New() - rd := io.TeeReader(randomDataReader, h) - - blobUpload, err := bs.Create(ctx) - - if err != nil { - t.Fatalf("unexpected error starting layer upload: %s", err) - } - - // Cancel the upload then restart it - if err := blobUpload.Cancel(ctx); err != nil { - t.Fatalf("unexpected error during upload cancellation: %v", err) - } - - // get the enclosing directory - uploadPath := path.Dir(blobUpload.(*blobWriter).path) - - // ensure state was cleaned up - _, err = driver.List(ctx, uploadPath) - if err == nil { - t.Fatal("files in upload path after cleanup") - } - - // Do a resume, get unknown upload - blobUpload, err = bs.Resume(ctx, blobUpload.ID()) - if err != distribution.ErrBlobUploadUnknown { - t.Fatalf("unexpected error resuming upload, should be unknown: %v", err) - } - - // Restart! - blobUpload, err = bs.Create(ctx) - if err != nil { - t.Fatalf("unexpected error starting layer upload: %s", err) - } - - // Get the size of our random tarfile - randomDataSize, err := seekerSize(randomDataReader) - if err != nil { - t.Fatalf("error getting seeker size of random data: %v", err) - } - - nn, err := io.Copy(blobUpload, rd) - if err != nil { - t.Fatalf("unexpected error uploading layer data: %v", err) - } - - if nn != randomDataSize { - t.Fatalf("layer data write incomplete") - } - - blobUpload.Close() - - offset := blobUpload.Size() - if offset != nn { - t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn) - } - - // Do a resume, for good fun - blobUpload, err = bs.Resume(ctx, blobUpload.ID()) - if err != nil { - t.Fatalf("unexpected error resuming upload: %v", err) - } - - sha256Digest := digest.NewDigest("sha256", h) - desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) - if err != nil { - t.Fatalf("unexpected error finishing layer upload: %v", err) - } - - // ensure state was cleaned up - uploadPath = path.Dir(blobUpload.(*blobWriter).path) - _, err = driver.List(ctx, uploadPath) - if err == nil { - t.Fatal("files in upload path after commit") - } - - // After finishing an upload, it should no longer exist. - if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown { - t.Fatalf("expected layer upload to be unknown, got %v", err) - } - - // Test for existence. - statDesc, err := bs.Stat(ctx, desc.Digest) - if err != nil { - t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) - } - - if !reflect.DeepEqual(statDesc, desc) { - t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) - } - - rc, err := bs.Open(ctx, desc.Digest) - if err != nil { - t.Fatalf("unexpected error opening blob for read: %v", err) - } - defer rc.Close() - - h.Reset() - nn, err = io.Copy(h, rc) - if err != nil { - t.Fatalf("error reading layer: %v", err) - } - - if nn != randomDataSize { - t.Fatalf("incorrect read length") - } - - if digest.NewDigest("sha256", h) != sha256Digest { - t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest) - } - - // Delete a blob - err = bs.Delete(ctx, desc.Digest) - if err != nil { - t.Fatalf("Unexpected error deleting blob") - } - - d, err := bs.Stat(ctx, desc.Digest) - if err == nil { - t.Fatalf("unexpected non-error stating deleted blob: %v", d) - } - - switch err { - case distribution.ErrBlobUnknown: - break - default: - t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) - } - - _, err = bs.Open(ctx, desc.Digest) - if err == nil { - t.Fatalf("unexpected success opening deleted blob for read") - } - - switch err { - case distribution.ErrBlobUnknown: - break - default: - t.Errorf("Unexpected error type getting deleted manifest: %#v", err) - } - - // Re-upload the blob - randomBlob, err := ioutil.ReadAll(randomDataReader) - if err != nil { - t.Fatalf("Error reading all of blob %s", err.Error()) - } - expectedDigest := digest.FromBytes(randomBlob) - simpleUpload(t, bs, randomBlob, expectedDigest) - - d, err = bs.Stat(ctx, expectedDigest) - if err != nil { - t.Errorf("unexpected error stat-ing blob") - } - if d.Digest != expectedDigest { - t.Errorf("Mismatching digest with restored blob") - } - - _, err = bs.Open(ctx, expectedDigest) - if err != nil { - t.Errorf("Unexpected error opening blob") - } - - // Reuse state to test delete with a delete-disabled registry - registry, err = NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - repository, err = registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - bs = repository.Blobs(ctx) - err = bs.Delete(ctx, desc.Digest) - if err == nil { - t.Errorf("Unexpected success deleting while disabled") - } -} - -// TestSimpleBlobRead just creates a simple blob file and ensures that basic -// open, read, seek, read works. More specific edge cases should be covered in -// other tests. -func TestSimpleBlobRead(t *testing.T) { - ctx := context.Background() - imageName, _ := reference.ParseNamed("foo/bar") - driver := testdriver.New() - registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - repository, err := registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - bs := repository.Blobs(ctx) - - randomLayerReader, dgst, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. - if err != nil { - t.Fatalf("error creating random data: %v", err) - } - - // Test for existence. - desc, err := bs.Stat(ctx, dgst) - if err != distribution.ErrBlobUnknown { - t.Fatalf("expected not found error when testing for existence: %v", err) - } - - rc, err := bs.Open(ctx, dgst) - if err != distribution.ErrBlobUnknown { - t.Fatalf("expected not found error when opening non-existent blob: %v", err) - } - - randomLayerSize, err := seekerSize(randomLayerReader) - if err != nil { - t.Fatalf("error getting seeker size for random layer: %v", err) - } - - descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize} - t.Logf("desc: %v", descBefore) - - desc, err = addBlob(ctx, bs, descBefore, randomLayerReader) - if err != nil { - t.Fatalf("error adding blob to blobservice: %v", err) - } - - if desc.Size != randomLayerSize { - t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize) - } - - rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest. - if err != nil { - t.Fatalf("error opening blob with %v: %v", dgst, err) - } - defer rc.Close() - - // Now check the sha digest and ensure its the same - h := sha256.New() - nn, err := io.Copy(h, rc) - if err != nil { - t.Fatalf("unexpected error copying to hash: %v", err) - } - - if nn != randomLayerSize { - t.Fatalf("stored incorrect number of bytes in blob: %d != %d", nn, randomLayerSize) - } - - sha256Digest := digest.NewDigest("sha256", h) - if sha256Digest != desc.Digest { - t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, desc.Digest) - } - - // Now seek back the blob, read the whole thing and check against randomLayerData - offset, err := rc.Seek(0, os.SEEK_SET) - if err != nil { - t.Fatalf("error seeking blob: %v", err) - } - - if offset != 0 { - t.Fatalf("seek failed: expected 0 offset, got %d", offset) - } - - p, err := ioutil.ReadAll(rc) - if err != nil { - t.Fatalf("error reading all of blob: %v", err) - } - - if len(p) != int(randomLayerSize) { - t.Fatalf("blob data read has different length: %v != %v", len(p), randomLayerSize) - } - - // Reset the randomLayerReader and read back the buffer - _, err = randomLayerReader.Seek(0, os.SEEK_SET) - if err != nil { - t.Fatalf("error resetting layer reader: %v", err) - } - - randomLayerData, err := ioutil.ReadAll(randomLayerReader) - if err != nil { - t.Fatalf("random layer read failed: %v", err) - } - - if !bytes.Equal(p, randomLayerData) { - t.Fatalf("layer data not equal") - } -} - -// TestBlobMount covers the blob mount process, exercising common -// error paths that might be seen during a mount. -func TestBlobMount(t *testing.T) { - randomDataReader, dgst, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating random reader: %v", err) - } - - ctx := context.Background() - imageName, _ := reference.ParseNamed("foo/bar") - sourceImageName, _ := reference.ParseNamed("foo/source") - driver := testdriver.New() - registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - - repository, err := registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - sourceRepository, err := registry.Repository(ctx, sourceImageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - - sbs := sourceRepository.Blobs(ctx) - - blobUpload, err := sbs.Create(ctx) - - if err != nil { - t.Fatalf("unexpected error starting layer upload: %s", err) - } - - // Get the size of our random tarfile - randomDataSize, err := seekerSize(randomDataReader) - if err != nil { - t.Fatalf("error getting seeker size of random data: %v", err) - } - - nn, err := io.Copy(blobUpload, randomDataReader) - if err != nil { - t.Fatalf("unexpected error uploading layer data: %v", err) - } - - desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) - if err != nil { - t.Fatalf("unexpected error finishing layer upload: %v", err) - } - - // Test for existence. - statDesc, err := sbs.Stat(ctx, desc.Digest) - if err != nil { - t.Fatalf("unexpected error checking for existence: %v, %#v", err, sbs) - } - - if !reflect.DeepEqual(statDesc, desc) { - t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) - } - - bs := repository.Blobs(ctx) - // Test destination for existence. - statDesc, err = bs.Stat(ctx, desc.Digest) - if err == nil { - t.Fatalf("unexpected non-error stating unmounted blob: %v", desc) - } - - canonicalRef, err := reference.WithDigest(sourceRepository.Named(), desc.Digest) - if err != nil { - t.Fatal(err) - } - - bw, err := bs.Create(ctx, WithMountFrom(canonicalRef)) - if bw != nil { - t.Fatal("unexpected blobwriter returned from Create call, should mount instead") - } - - ebm, ok := err.(distribution.ErrBlobMounted) - if !ok { - t.Fatalf("unexpected error mounting layer: %v", err) - } - - if !reflect.DeepEqual(ebm.Descriptor, desc) { - t.Fatalf("descriptors not equal: %v != %v", ebm.Descriptor, desc) - } - - // Test for existence. - statDesc, err = bs.Stat(ctx, desc.Digest) - if err != nil { - t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) - } - - if !reflect.DeepEqual(statDesc, desc) { - t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) - } - - rc, err := bs.Open(ctx, desc.Digest) - if err != nil { - t.Fatalf("unexpected error opening blob for read: %v", err) - } - defer rc.Close() - - h := sha256.New() - nn, err = io.Copy(h, rc) - if err != nil { - t.Fatalf("error reading layer: %v", err) - } - - if nn != randomDataSize { - t.Fatalf("incorrect read length") - } - - if digest.NewDigest("sha256", h) != dgst { - t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), dgst) - } - - // Delete the blob from the source repo - err = sbs.Delete(ctx, desc.Digest) - if err != nil { - t.Fatalf("Unexpected error deleting blob") - } - - d, err := bs.Stat(ctx, desc.Digest) - if err != nil { - t.Fatalf("unexpected error stating blob deleted from source repository: %v", err) - } - - d, err = sbs.Stat(ctx, desc.Digest) - if err == nil { - t.Fatalf("unexpected non-error stating deleted blob: %v", d) - } - - switch err { - case distribution.ErrBlobUnknown: - break - default: - t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) - } - - // Delete the blob from the dest repo - err = bs.Delete(ctx, desc.Digest) - if err != nil { - t.Fatalf("Unexpected error deleting blob") - } - - d, err = bs.Stat(ctx, desc.Digest) - if err == nil { - t.Fatalf("unexpected non-error stating deleted blob: %v", d) - } - - switch err { - case distribution.ErrBlobUnknown: - break - default: - t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) - } -} - -// TestLayerUploadZeroLength uploads zero-length -func TestLayerUploadZeroLength(t *testing.T) { - ctx := context.Background() - imageName, _ := reference.ParseNamed("foo/bar") - driver := testdriver.New() - registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - repository, err := registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - bs := repository.Blobs(ctx) - - simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar) -} - -func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) { - ctx := context.Background() - wr, err := bs.Create(ctx) - if err != nil { - t.Fatalf("unexpected error starting upload: %v", err) - } - - nn, err := io.Copy(wr, bytes.NewReader(blob)) - if err != nil { - t.Fatalf("error copying into blob writer: %v", err) - } - - if nn != 0 { - t.Fatalf("unexpected number of bytes copied: %v > 0", nn) - } - - dgst, err := digest.FromReader(bytes.NewReader(blob)) - if err != nil { - t.Fatalf("error getting digest: %v", err) - } - - if dgst != expectedDigest { - // sanity check on zero digest - t.Fatalf("digest not as expected: %v != %v", dgst, expectedDigest) - } - - desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}) - if err != nil { - t.Fatalf("unexpected error committing write: %v", err) - } - - if desc.Digest != dgst { - t.Fatalf("unexpected digest: %v != %v", desc.Digest, dgst) - } -} - -// seekerSize seeks to the end of seeker, checks the size and returns it to -// the original state, returning the size. The state of the seeker should be -// treated as unknown if an error is returned. -func seekerSize(seeker io.ReadSeeker) (int64, error) { - current, err := seeker.Seek(0, os.SEEK_CUR) - if err != nil { - return 0, err - } - - end, err := seeker.Seek(0, os.SEEK_END) - if err != nil { - return 0, err - } - - resumed, err := seeker.Seek(current, os.SEEK_SET) - if err != nil { - return 0, err - } - - if resumed != current { - return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location") - } - - return end, nil -} - -// addBlob simply consumes the reader and inserts into the blob service, -// returning a descriptor on success. -func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distribution.Descriptor, rd io.Reader) (distribution.Descriptor, error) { - wr, err := bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, err - } - defer wr.Cancel(ctx) - - if nn, err := io.Copy(wr, rd); err != nil { - return distribution.Descriptor{}, err - } else if nn != desc.Size { - return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Size) - } - - return wr.Commit(ctx, desc) -} diff --git a/docs/storage/blobcachemetrics.go b/docs/storage/blobcachemetrics.go deleted file mode 100644 index fad0a77a..00000000 --- a/docs/storage/blobcachemetrics.go +++ /dev/null @@ -1,60 +0,0 @@ -package storage - -import ( - "expvar" - "sync/atomic" - - "github.com/docker/distribution/registry/storage/cache" -) - -type blobStatCollector struct { - metrics cache.Metrics -} - -func (bsc *blobStatCollector) Hit() { - atomic.AddUint64(&bsc.metrics.Requests, 1) - atomic.AddUint64(&bsc.metrics.Hits, 1) -} - -func (bsc *blobStatCollector) Miss() { - atomic.AddUint64(&bsc.metrics.Requests, 1) - atomic.AddUint64(&bsc.metrics.Misses, 1) -} - -func (bsc *blobStatCollector) Metrics() cache.Metrics { - return bsc.metrics -} - -// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor -// cache requests. Note this is kept globally and made available via expvar. -// For more detailed metrics, its recommend to instrument a particular cache -// implementation. -var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{} - -func init() { - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - cache := registry.(*expvar.Map).Get("cache") - if cache == nil { - cache = &expvar.Map{} - cache.(*expvar.Map).Init() - registry.(*expvar.Map).Set("cache", cache) - } - - storage := cache.(*expvar.Map).Get("storage") - if storage == nil { - storage = &expvar.Map{} - storage.(*expvar.Map).Init() - cache.(*expvar.Map).Set("storage", storage) - } - - storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { - // no need for synchronous access: the increments are atomic and - // during reading, we don't care if the data is up to date. The - // numbers will always *eventually* be reported correctly. - return blobStatterCacheMetrics - })) -} diff --git a/docs/storage/blobserver.go b/docs/storage/blobserver.go deleted file mode 100644 index 2655e011..00000000 --- a/docs/storage/blobserver.go +++ /dev/null @@ -1,78 +0,0 @@ -package storage - -import ( - "fmt" - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver" -) - -// TODO(stevvooe): This should configurable in the future. -const blobCacheControlMaxAge = 365 * 24 * time.Hour - -// blobServer simply serves blobs from a driver instance using a path function -// to identify paths and a descriptor service to fill in metadata. -type blobServer struct { - driver driver.StorageDriver - statter distribution.BlobStatter - pathFn func(dgst digest.Digest) (string, error) - redirect bool // allows disabling URLFor redirects -} - -func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - desc, err := bs.statter.Stat(ctx, dgst) - if err != nil { - return err - } - - path, err := bs.pathFn(desc.Digest) - if err != nil { - return err - } - - if bs.redirect { - redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) - switch err.(type) { - case nil: - // Redirect to storage URL. - http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) - return err - - case driver.ErrUnsupportedMethod: - // Fallback to serving the content directly. - default: - // Some unexpected error. - return err - } - } - - br, err := newFileReader(ctx, bs.driver, path, desc.Size) - if err != nil { - return err - } - defer br.Close() - - w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent - w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) - - if w.Header().Get("Docker-Content-Digest") == "" { - w.Header().Set("Docker-Content-Digest", desc.Digest.String()) - } - - if w.Header().Get("Content-Type") == "" { - // Set the content type if not already set. - w.Header().Set("Content-Type", desc.MediaType) - } - - if w.Header().Get("Content-Length") == "" { - // Set the content length if not already set. - w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) - } - - http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) - return nil -} diff --git a/docs/storage/blobstore.go b/docs/storage/blobstore.go deleted file mode 100644 index 4274cc9e..00000000 --- a/docs/storage/blobstore.go +++ /dev/null @@ -1,223 +0,0 @@ -package storage - -import ( - "path" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver" -) - -// blobStore implements the read side of the blob store interface over a -// driver without enforcing per-repository membership. This object is -// intentionally a leaky abstraction, providing utility methods that support -// creating and traversing backend links. -type blobStore struct { - driver driver.StorageDriver - statter distribution.BlobStatter -} - -var _ distribution.BlobProvider = &blobStore{} - -// Get implements the BlobReadService.Get call. -func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - bp, err := bs.path(dgst) - if err != nil { - return nil, err - } - - p, err := bs.driver.GetContent(ctx, bp) - if err != nil { - switch err.(type) { - case driver.PathNotFoundError: - return nil, distribution.ErrBlobUnknown - } - - return nil, err - } - - return p, err -} - -func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - desc, err := bs.statter.Stat(ctx, dgst) - if err != nil { - return nil, err - } - - path, err := bs.path(desc.Digest) - if err != nil { - return nil, err - } - - return newFileReader(ctx, bs.driver, path, desc.Size) -} - -// Put stores the content p in the blob store, calculating the digest. If the -// content is already present, only the digest will be returned. This should -// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations -func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - dgst := digest.FromBytes(p) - desc, err := bs.statter.Stat(ctx, dgst) - if err == nil { - // content already present - return desc, nil - } else if err != distribution.ErrBlobUnknown { - context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %v", dgst, err) - // real error, return it - return distribution.Descriptor{}, err - } - - bp, err := bs.path(dgst) - if err != nil { - return distribution.Descriptor{}, err - } - - // TODO(stevvooe): Write out mediatype here, as well. - return distribution.Descriptor{ - Size: int64(len(p)), - - // NOTE(stevvooe): The central blob store firewalls media types from - // other users. The caller should look this up and override the value - // for the specific repository. - MediaType: "application/octet-stream", - Digest: dgst, - }, bs.driver.PutContent(ctx, bp, p) -} - -func (bs *blobStore) Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error { - - specPath, err := pathFor(blobsPathSpec{}) - if err != nil { - return err - } - - err = Walk(ctx, bs.driver, specPath, func(fileInfo driver.FileInfo) error { - // skip directories - if fileInfo.IsDir() { - return nil - } - - currentPath := fileInfo.Path() - // we only want to parse paths that end with /data - _, fileName := path.Split(currentPath) - if fileName != "data" { - return nil - } - - digest, err := digestFromPath(currentPath) - if err != nil { - return err - } - - return ingester(digest) - }) - return err -} - -// path returns the canonical path for the blob identified by digest. The blob -// may or may not exist. -func (bs *blobStore) path(dgst digest.Digest) (string, error) { - bp, err := pathFor(blobDataPathSpec{ - digest: dgst, - }) - - if err != nil { - return "", err - } - - return bp, nil -} - -// link links the path to the provided digest by writing the digest into the -// target file. Caller must ensure that the blob actually exists. -func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error { - // The contents of the "link" file are the exact string contents of the - // digest, which is specified in that package. - return bs.driver.PutContent(ctx, path, []byte(dgst)) -} - -// readlink returns the linked digest at path. -func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) { - content, err := bs.driver.GetContent(ctx, path) - if err != nil { - return "", err - } - - linked, err := digest.ParseDigest(string(content)) - if err != nil { - return "", err - } - - return linked, nil -} - -// resolve reads the digest link at path and returns the blob store path. -func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { - dgst, err := bs.readlink(ctx, path) - if err != nil { - return "", err - } - - return bs.path(dgst) -} - -type blobStatter struct { - driver driver.StorageDriver -} - -var _ distribution.BlobDescriptorService = &blobStatter{} - -// Stat implements BlobStatter.Stat by returning the descriptor for the blob -// in the main blob store. If this method returns successfully, there is -// strong guarantee that the blob exists and is available. -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - path, err := pathFor(blobDataPathSpec{ - digest: dgst, - }) - - if err != nil { - return distribution.Descriptor{}, err - } - - fi, err := bs.driver.Stat(ctx, path) - if err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return distribution.Descriptor{}, distribution.ErrBlobUnknown - default: - return distribution.Descriptor{}, err - } - } - - if fi.IsDir() { - // NOTE(stevvooe): This represents a corruption situation. Somehow, we - // calculated a blob path and then detected a directory. We log the - // error and then error on the side of not knowing about the blob. - context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path) - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - // TODO(stevvooe): Add method to resolve the mediatype. We can store and - // cache a "global" media type for the blob, even if a specific repo has a - // mediatype that overrides the main one. - - return distribution.Descriptor{ - Size: fi.Size(), - - // NOTE(stevvooe): The central blob store firewalls media types from - // other users. The caller should look this up and override the value - // for the specific repository. - MediaType: "application/octet-stream", - Digest: dgst, - }, nil -} - -func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - return distribution.ErrUnsupported -} - -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - return distribution.ErrUnsupported -} diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go deleted file mode 100644 index 668a6fc9..00000000 --- a/docs/storage/blobwriter.go +++ /dev/null @@ -1,399 +0,0 @@ -package storage - -import ( - "errors" - "fmt" - "io" - "path" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -var ( - errResumableDigestNotAvailable = errors.New("resumable digest not available") -) - -// blobWriter is used to control the various aspects of resumable -// blob upload. -type blobWriter struct { - ctx context.Context - blobStore *linkedBlobStore - - id string - startedAt time.Time - digester digest.Digester - written int64 // track the contiguous write - - fileWriter storagedriver.FileWriter - driver storagedriver.StorageDriver - path string - - resumableDigestEnabled bool - committed bool -} - -var _ distribution.BlobWriter = &blobWriter{} - -// ID returns the identifier for this upload. -func (bw *blobWriter) ID() string { - return bw.id -} - -func (bw *blobWriter) StartedAt() time.Time { - return bw.startedAt -} - -// Commit marks the upload as completed, returning a valid descriptor. The -// final size and digest are checked against the first descriptor provided. -func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - context.GetLogger(ctx).Debug("(*blobWriter).Commit") - - if err := bw.fileWriter.Commit(); err != nil { - return distribution.Descriptor{}, err - } - - bw.Close() - desc.Size = bw.Size() - - canonical, err := bw.validateBlob(ctx, desc) - if err != nil { - return distribution.Descriptor{}, err - } - - if err := bw.moveBlob(ctx, canonical); err != nil { - return distribution.Descriptor{}, err - } - - if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil { - return distribution.Descriptor{}, err - } - - if err := bw.removeResources(ctx); err != nil { - return distribution.Descriptor{}, err - } - - err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical) - if err != nil { - return distribution.Descriptor{}, err - } - - bw.committed = true - return canonical, nil -} - -// Cancel the blob upload process, releasing any resources associated with -// the writer and canceling the operation. -func (bw *blobWriter) Cancel(ctx context.Context) error { - context.GetLogger(ctx).Debug("(*blobWriter).Cancel") - if err := bw.fileWriter.Cancel(); err != nil { - return err - } - - if err := bw.Close(); err != nil { - context.GetLogger(ctx).Errorf("error closing blobwriter: %s", err) - } - - if err := bw.removeResources(ctx); err != nil { - return err - } - - return nil -} - -func (bw *blobWriter) Size() int64 { - return bw.fileWriter.Size() -} - -func (bw *blobWriter) Write(p []byte) (int, error) { - // Ensure that the current write offset matches how many bytes have been - // written to the digester. If not, we need to update the digest state to - // match the current write position. - if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { - return 0, err - } - - n, err := io.MultiWriter(bw.fileWriter, bw.digester.Hash()).Write(p) - bw.written += int64(n) - - return n, err -} - -func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { - // Ensure that the current write offset matches how many bytes have been - // written to the digester. If not, we need to update the digest state to - // match the current write position. - if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { - return 0, err - } - - nn, err := io.Copy(io.MultiWriter(bw.fileWriter, bw.digester.Hash()), r) - bw.written += nn - - return nn, err -} - -func (bw *blobWriter) Close() error { - if bw.committed { - return errors.New("blobwriter close after commit") - } - - if err := bw.storeHashState(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { - return err - } - - return bw.fileWriter.Close() -} - -// validateBlob checks the data against the digest, returning an error if it -// does not match. The canonical descriptor is returned. -func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - var ( - verified, fullHash bool - canonical digest.Digest - ) - - if desc.Digest == "" { - // if no descriptors are provided, we have nothing to validate - // against. We don't really want to support this for the registry. - return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ - Reason: fmt.Errorf("cannot validate against empty digest"), - } - } - - var size int64 - - // Stat the on disk file - if fi, err := bw.driver.Stat(ctx, bw.path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // NOTE(stevvooe): We really don't care if the file is - // not actually present for the reader. We now assume - // that the desc length is zero. - desc.Size = 0 - default: - // Any other error we want propagated up the stack. - return distribution.Descriptor{}, err - } - } else { - if fi.IsDir() { - return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) - } - - size = fi.Size() - } - - if desc.Size > 0 { - if desc.Size != size { - return distribution.Descriptor{}, distribution.ErrBlobInvalidLength - } - } else { - // if provided 0 or negative length, we can assume caller doesn't know or - // care about length. - desc.Size = size - } - - // TODO(stevvooe): This section is very meandering. Need to be broken down - // to be a lot more clear. - - if err := bw.resumeDigest(ctx); err == nil { - canonical = bw.digester.Digest() - - if canonical.Algorithm() == desc.Digest.Algorithm() { - // Common case: client and server prefer the same canonical digest - // algorithm - currently SHA256. - verified = desc.Digest == canonical - } else { - // The client wants to use a different digest algorithm. They'll just - // have to be patient and wait for us to download and re-hash the - // uploaded content using that digest algorithm. - fullHash = true - } - } else if err == errResumableDigestNotAvailable { - // Not using resumable digests, so we need to hash the entire layer. - fullHash = true - } else { - return distribution.Descriptor{}, err - } - - if fullHash { - // a fantastic optimization: if the the written data and the size are - // the same, we don't need to read the data from the backend. This is - // because we've written the entire file in the lifecycle of the - // current instance. - if bw.written == size && digest.Canonical == desc.Digest.Algorithm() { - canonical = bw.digester.Digest() - verified = desc.Digest == canonical - } - - // If the check based on size fails, we fall back to the slowest of - // paths. We may be able to make the size-based check a stronger - // guarantee, so this may be defensive. - if !verified { - digester := digest.Canonical.New() - - digestVerifier, err := digest.NewDigestVerifier(desc.Digest) - if err != nil { - return distribution.Descriptor{}, err - } - - // Read the file from the backend driver and validate it. - fr, err := newFileReader(ctx, bw.driver, bw.path, desc.Size) - if err != nil { - return distribution.Descriptor{}, err - } - defer fr.Close() - - tr := io.TeeReader(fr, digester.Hash()) - - if _, err := io.Copy(digestVerifier, tr); err != nil { - return distribution.Descriptor{}, err - } - - canonical = digester.Digest() - verified = digestVerifier.Verified() - } - } - - if !verified { - context.GetLoggerWithFields(ctx, - map[interface{}]interface{}{ - "canonical": canonical, - "provided": desc.Digest, - }, "canonical", "provided"). - Errorf("canonical digest does match provided digest") - return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ - Digest: desc.Digest, - Reason: fmt.Errorf("content does not match digest"), - } - } - - // update desc with canonical hash - desc.Digest = canonical - - if desc.MediaType == "" { - desc.MediaType = "application/octet-stream" - } - - return desc, nil -} - -// moveBlob moves the data into its final, hash-qualified destination, -// identified by dgst. The layer should be validated before commencing the -// move. -func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error { - blobPath, err := pathFor(blobDataPathSpec{ - digest: desc.Digest, - }) - - if err != nil { - return err - } - - // Check for existence - if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // ensure that it doesn't exist. - default: - return err - } - } else { - // If the path exists, we can assume that the content has already - // been uploaded, since the blob storage is content-addressable. - // While it may be corrupted, detection of such corruption belongs - // elsewhere. - return nil - } - - // If no data was received, we may not actually have a file on disk. Check - // the size here and write a zero-length file to blobPath if this is the - // case. For the most part, this should only ever happen with zero-length - // tars. - if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // HACK(stevvooe): This is slightly dangerous: if we verify above, - // get a hash, then the underlying file is deleted, we risk moving - // a zero-length blob into a nonzero-length blob location. To - // prevent this horrid thing, we employ the hack of only allowing - // to this happen for the digest of an empty tar. - if desc.Digest == digest.DigestSha256EmptyTar { - return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) - } - - // We let this fail during the move below. - logrus. - WithField("upload.id", bw.ID()). - WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest") - default: - return err // unrelated error - } - } - - // TODO(stevvooe): We should also write the mediatype when executing this move. - - return bw.blobStore.driver.Move(ctx, bw.path, blobPath) -} - -// removeResources should clean up all resources associated with the upload -// instance. An error will be returned if the clean up cannot proceed. If the -// resources are already not present, no error will be returned. -func (bw *blobWriter) removeResources(ctx context.Context) error { - dataPath, err := pathFor(uploadDataPathSpec{ - name: bw.blobStore.repository.Named().Name(), - id: bw.id, - }) - - if err != nil { - return err - } - - // Resolve and delete the containing directory, which should include any - // upload related files. - dirPath := path.Dir(dataPath) - if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // already gone! - default: - // This should be uncommon enough such that returning an error - // should be okay. At this point, the upload should be mostly - // complete, but perhaps the backend became unaccessible. - context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) - return err - } - } - - return nil -} - -func (bw *blobWriter) Reader() (io.ReadCloser, error) { - // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 - try := 1 - for try <= 5 { - _, err := bw.driver.Stat(bw.ctx, bw.path) - if err == nil { - break - } - switch err.(type) { - case storagedriver.PathNotFoundError: - context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try) - time.Sleep(1 * time.Second) - try++ - default: - return nil, err - } - } - - readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0) - if err != nil { - return nil, err - } - - return readCloser, nil -} diff --git a/docs/storage/blobwriter_nonresumable.go b/docs/storage/blobwriter_nonresumable.go deleted file mode 100644 index 32f13097..00000000 --- a/docs/storage/blobwriter_nonresumable.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build noresumabledigest - -package storage - -import ( - "github.com/docker/distribution/context" -) - -// resumeHashAt is a noop when resumable digest support is disabled. -func (bw *blobWriter) resumeDigest(ctx context.Context) error { - return errResumableDigestNotAvailable -} - -// storeHashState is a noop when resumable digest support is disabled. -func (bw *blobWriter) storeHashState(ctx context.Context) error { - return errResumableDigestNotAvailable -} diff --git a/docs/storage/blobwriter_resumable.go b/docs/storage/blobwriter_resumable.go deleted file mode 100644 index ff5482c3..00000000 --- a/docs/storage/blobwriter_resumable.go +++ /dev/null @@ -1,145 +0,0 @@ -// +build !noresumabledigest - -package storage - -import ( - "fmt" - "path" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/stevvooe/resumable" - - // register resumable hashes with import - _ "github.com/stevvooe/resumable/sha256" - _ "github.com/stevvooe/resumable/sha512" -) - -// resumeDigest attempts to restore the state of the internal hash function -// by loading the most recent saved hash state equal to the current size of the blob. -func (bw *blobWriter) resumeDigest(ctx context.Context) error { - if !bw.resumableDigestEnabled { - return errResumableDigestNotAvailable - } - - h, ok := bw.digester.Hash().(resumable.Hash) - if !ok { - return errResumableDigestNotAvailable - } - offset := bw.fileWriter.Size() - if offset == int64(h.Len()) { - // State of digester is already at the requested offset. - return nil - } - - // List hash states from storage backend. - var hashStateMatch hashStateEntry - hashStates, err := bw.getStoredHashStates(ctx) - if err != nil { - return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) - } - - // Find the highest stored hashState with offset equal to - // the requested offset. - for _, hashState := range hashStates { - if hashState.offset == offset { - hashStateMatch = hashState - break // Found an exact offset match. - } - } - - if hashStateMatch.offset == 0 { - // No need to load any state, just reset the hasher. - h.Reset() - } else { - storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) - if err != nil { - return err - } - - if err = h.Restore(storedState); err != nil { - return err - } - } - - // Mind the gap. - if gapLen := offset - int64(h.Len()); gapLen > 0 { - return errResumableDigestNotAvailable - } - - return nil -} - -type hashStateEntry struct { - offset int64 - path string -} - -// getStoredHashStates returns a slice of hashStateEntries for this upload. -func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { - uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Named().String(), - id: bw.id, - alg: bw.digester.Digest().Algorithm(), - list: true, - }) - - if err != nil { - return nil, err - } - - paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) - if err != nil { - if _, ok := err.(storagedriver.PathNotFoundError); !ok { - return nil, err - } - // Treat PathNotFoundError as no entries. - paths = nil - } - - hashStateEntries := make([]hashStateEntry, 0, len(paths)) - - for _, p := range paths { - pathSuffix := path.Base(p) - // The suffix should be the offset. - offset, err := strconv.ParseInt(pathSuffix, 0, 64) - if err != nil { - logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) - } - - hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) - } - - return hashStateEntries, nil -} - -func (bw *blobWriter) storeHashState(ctx context.Context) error { - if !bw.resumableDigestEnabled { - return errResumableDigestNotAvailable - } - - h, ok := bw.digester.Hash().(resumable.Hash) - if !ok { - return errResumableDigestNotAvailable - } - - uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Named().String(), - id: bw.id, - alg: bw.digester.Digest().Algorithm(), - offset: int64(h.Len()), - }) - - if err != nil { - return err - } - - hashState, err := h.State() - if err != nil { - return err - } - - return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) -} diff --git a/docs/storage/cache/cache.go b/docs/storage/cache/cache.go deleted file mode 100644 index 10a39091..00000000 --- a/docs/storage/cache/cache.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package cache provides facilities to speed up access to the storage -// backend. -package cache - -import ( - "fmt" - - "github.com/docker/distribution" -) - -// BlobDescriptorCacheProvider provides repository scoped -// BlobDescriptorService cache instances and a global descriptor cache. -type BlobDescriptorCacheProvider interface { - distribution.BlobDescriptorService - - RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) -} - -// ValidateDescriptor provides a helper function to ensure that caches have -// common criteria for admitting descriptors. -func ValidateDescriptor(desc distribution.Descriptor) error { - if err := desc.Digest.Validate(); err != nil { - return err - } - - if desc.Size < 0 { - return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) - } - - if desc.MediaType == "" { - return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) - } - - return nil -} diff --git a/docs/storage/cache/cachecheck/suite.go b/docs/storage/cache/cachecheck/suite.go deleted file mode 100644 index cba5addd..00000000 --- a/docs/storage/cache/cachecheck/suite.go +++ /dev/null @@ -1,180 +0,0 @@ -package cachecheck - -import ( - "reflect" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/cache" -) - -// CheckBlobDescriptorCache takes a cache implementation through a common set -// of operations. If adding new tests, please add them here so new -// implementations get the benefit. This should be used for unit tests. -func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCacheProvider) { - ctx := context.Background() - - checkBlobDescriptorCacheEmptyRepository(t, ctx, provider) - checkBlobDescriptorCacheSetAndRead(t, ctx, provider) - checkBlobDescriptorCacheClear(t, ctx, provider) -} - -func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { - if _, err := provider.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { - t.Fatalf("expected unknown blob error with empty store: %v", err) - } - - cache, err := provider.RepositoryScoped("") - if err == nil { - t.Fatalf("expected an error when asking for invalid repo") - } - - cache, err = provider.RepositoryScoped("foo/bar") - if err != nil { - t.Fatalf("unexpected error getting repository: %v", err) - } - - if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ - Digest: "sha384:abc", - Size: 10, - MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { - t.Fatalf("expected error with invalid digest: %v", err) - } - - if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{ - Digest: "", - Size: 10, - MediaType: "application/octet-stream"}); err == nil { - t.Fatalf("expected error setting value on invalid descriptor") - } - - if _, err := cache.Stat(ctx, ""); err != digest.ErrDigestInvalidFormat { - t.Fatalf("expected error checking for cache item with empty digest: %v", err) - } - - if _, err := cache.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { - t.Fatalf("expected unknown blob error with empty repo: %v", err) - } -} - -func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { - localDigest := digest.Digest("sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") - expected := distribution.Descriptor{ - Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111", - Size: 10, - MediaType: "application/octet-stream"} - - cache, err := provider.RepositoryScoped("foo/bar") - if err != nil { - t.Fatalf("unexpected error getting scoped cache: %v", err) - } - - if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { - t.Fatalf("error setting descriptor: %v", err) - } - - desc, err := cache.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("unexpected error statting fake2:abc: %v", err) - } - - if !reflect.DeepEqual(expected, desc) { - t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) - } - - // also check that we set the canonical key ("fake:abc") - desc, err = cache.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("descriptor not returned for canonical key: %v", err) - } - - if !reflect.DeepEqual(expected, desc) { - t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) - } - - // ensure that global gets extra descriptor mapping - desc, err = provider.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("expected blob unknown in global cache: %v, %v", err, desc) - } - - if !reflect.DeepEqual(desc, expected) { - t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) - } - - // get at it through canonical descriptor - desc, err = provider.Stat(ctx, expected.Digest) - if err != nil { - t.Fatalf("unexpected error checking glboal descriptor: %v", err) - } - - if !reflect.DeepEqual(desc, expected) { - t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) - } - - // now, we set the repo local mediatype to something else and ensure it - // doesn't get changed in the provider cache. - expected.MediaType = "application/json" - - if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { - t.Fatalf("unexpected error setting descriptor: %v", err) - } - - desc, err = cache.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("unexpected error getting descriptor: %v", err) - } - - if !reflect.DeepEqual(desc, expected) { - t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) - } - - desc, err = provider.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("unexpected error getting global descriptor: %v", err) - } - - expected.MediaType = "application/octet-stream" // expect original mediatype in global - - if !reflect.DeepEqual(desc, expected) { - t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) - } -} - -func checkBlobDescriptorCacheClear(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { - localDigest := digest.Digest("sha384:def111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") - expected := distribution.Descriptor{ - Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111", - Size: 10, - MediaType: "application/octet-stream"} - - cache, err := provider.RepositoryScoped("foo/bar") - if err != nil { - t.Fatalf("unexpected error getting scoped cache: %v", err) - } - - if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { - t.Fatalf("error setting descriptor: %v", err) - } - - desc, err := cache.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("unexpected error statting fake2:abc: %v", err) - } - - if !reflect.DeepEqual(expected, desc) { - t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) - } - - err = cache.Clear(ctx, localDigest) - if err != nil { - t.Error(err) - } - - desc, err = cache.Stat(ctx, localDigest) - if err == nil { - t.Fatalf("expected error statting deleted blob: %v", err) - } -} diff --git a/docs/storage/cache/cachedblobdescriptorstore.go b/docs/storage/cache/cachedblobdescriptorstore.go deleted file mode 100644 index 94ca8a90..00000000 --- a/docs/storage/cache/cachedblobdescriptorstore.go +++ /dev/null @@ -1,101 +0,0 @@ -package cache - -import ( - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - - "github.com/docker/distribution" -) - -// Metrics is used to hold metric counters -// related to the number of times a cache was -// hit or missed. -type Metrics struct { - Requests uint64 - Hits uint64 - Misses uint64 -} - -// MetricsTracker represents a metric tracker -// which simply counts the number of hits and misses. -type MetricsTracker interface { - Hit() - Miss() - Metrics() Metrics -} - -type cachedBlobStatter struct { - cache distribution.BlobDescriptorService - backend distribution.BlobDescriptorService - tracker MetricsTracker -} - -// NewCachedBlobStatter creates a new statter which prefers a cache and -// falls back to a backend. -func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - } -} - -// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and -// falls back to a backend. Hits and misses will send to the tracker. -func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - tracker: tracker, - } -} - -func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - desc, err := cbds.cache.Stat(ctx, dgst) - if err != nil { - if err != distribution.ErrBlobUnknown { - context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) - } - - goto fallback - } - - if cbds.tracker != nil { - cbds.tracker.Hit() - } - return desc, nil -fallback: - if cbds.tracker != nil { - cbds.tracker.Miss() - } - desc, err = cbds.backend.Stat(ctx, dgst) - if err != nil { - return desc, err - } - - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) - } - - return desc, err - -} - -func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - err := cbds.cache.Clear(ctx, dgst) - if err != nil { - return err - } - - err = cbds.backend.Clear(ctx, dgst) - if err != nil { - return err - } - return nil -} - -func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) - } - return nil -} diff --git a/docs/storage/cache/memory/memory.go b/docs/storage/cache/memory/memory.go deleted file mode 100644 index 68a68f08..00000000 --- a/docs/storage/cache/memory/memory.go +++ /dev/null @@ -1,170 +0,0 @@ -package memory - -import ( - "sync" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache" -) - -type inMemoryBlobDescriptorCacheProvider struct { - global *mapBlobDescriptorCache - repositories map[string]*mapBlobDescriptorCache - mu sync.RWMutex -} - -// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for -// storing blob descriptor data. -func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { - return &inMemoryBlobDescriptorCacheProvider{ - global: newMapBlobDescriptorCache(), - repositories: make(map[string]*mapBlobDescriptorCache), - } -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if _, err := reference.ParseNamed(repo); err != nil { - return nil, err - } - - imbdcp.mu.RLock() - defer imbdcp.mu.RUnlock() - - return &repositoryScopedInMemoryBlobDescriptorCache{ - repo: repo, - parent: imbdcp, - repository: imbdcp.repositories[repo], - }, nil -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return imbdcp.global.Stat(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { - return imbdcp.global.Clear(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - _, err := imbdcp.Stat(ctx, dgst) - if err == distribution.ErrBlobUnknown { - - if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { - // if the digests differ, set the other canonical mapping - if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { - return err - } - } - - // unknown, just set it - return imbdcp.global.SetDescriptor(ctx, dgst, desc) - } - - // we already know it, do nothing - return err -} - -// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped -// repository cache. Instances are not thread-safe but the delegated -// operations are. -type repositoryScopedInMemoryBlobDescriptorCache struct { - repo string - parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map - repository *mapBlobDescriptorCache -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if rsimbdcp.repository == nil { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return rsimbdcp.repository.Stat(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - if rsimbdcp.repository == nil { - return distribution.ErrBlobUnknown - } - - return rsimbdcp.repository.Clear(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if rsimbdcp.repository == nil { - // allocate map since we are setting it now. - rsimbdcp.parent.mu.Lock() - var ok bool - // have to read back value since we may have allocated elsewhere. - rsimbdcp.repository, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] - if !ok { - rsimbdcp.repository = newMapBlobDescriptorCache() - rsimbdcp.parent.repositories[rsimbdcp.repo] = rsimbdcp.repository - } - - rsimbdcp.parent.mu.Unlock() - } - - if err := rsimbdcp.repository.SetDescriptor(ctx, dgst, desc); err != nil { - return err - } - - return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) -} - -// mapBlobDescriptorCache provides a simple map-based implementation of the -// descriptor cache. -type mapBlobDescriptorCache struct { - descriptors map[digest.Digest]distribution.Descriptor - mu sync.RWMutex -} - -var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} - -func newMapBlobDescriptorCache() *mapBlobDescriptorCache { - return &mapBlobDescriptorCache{ - descriptors: make(map[digest.Digest]distribution.Descriptor), - } -} - -func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err - } - - mbdc.mu.RLock() - defer mbdc.mu.RUnlock() - - desc, ok := mbdc.descriptors[dgst] - if !ok { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return desc, nil -} - -func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - delete(mbdc.descriptors, dgst) - return nil -} - -func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := dgst.Validate(); err != nil { - return err - } - - if err := cache.ValidateDescriptor(desc); err != nil { - return err - } - - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - mbdc.descriptors[dgst] = desc - return nil -} diff --git a/docs/storage/cache/memory/memory_test.go b/docs/storage/cache/memory/memory_test.go deleted file mode 100644 index 49c2b5c3..00000000 --- a/docs/storage/cache/memory/memory_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package memory - -import ( - "testing" - - "github.com/docker/distribution/registry/storage/cache/cachecheck" -) - -// TestInMemoryBlobInfoCache checks the in memory implementation is working -// correctly. -func TestInMemoryBlobInfoCache(t *testing.T) { - cachecheck.CheckBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider()) -} diff --git a/docs/storage/cache/redis/redis.go b/docs/storage/cache/redis/redis.go deleted file mode 100644 index cb264b09..00000000 --- a/docs/storage/cache/redis/redis.go +++ /dev/null @@ -1,268 +0,0 @@ -package redis - -import ( - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache" - "github.com/garyburd/redigo/redis" -) - -// redisBlobStatService provides an implementation of -// BlobDescriptorCacheProvider based on redis. Blob descriptors are stored in -// two parts. The first provide fast access to repository membership through a -// redis set for each repo. The second is a redis hash keyed by the digest of -// the layer, providing path, length and mediatype information. There is also -// a per-repository redis hash of the blob descriptor, allowing override of -// data. This is currently used to override the mediatype on a per-repository -// basis. -// -// Note that there is no implied relationship between these two caches. The -// layer may exist in one, both or none and the code must be written this way. -type redisBlobDescriptorService struct { - pool *redis.Pool - - // TODO(stevvooe): We use a pool because we don't have great control over - // the cache lifecycle to manage connections. A new connection if fetched - // for each operation. Once we have better lifecycle management of the - // request objects, we can change this to a connection. -} - -// NewRedisBlobDescriptorCacheProvider returns a new redis-based -// BlobDescriptorCacheProvider using the provided redis connection pool. -func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorCacheProvider { - return &redisBlobDescriptorService{ - pool: pool, - } -} - -// RepositoryScoped returns the scoped cache. -func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if _, err := reference.ParseNamed(repo); err != nil { - return nil, err - } - - return &repositoryScopedRedisBlobDescriptorService{ - repo: repo, - upstream: rbds, - }, nil -} - -// Stat retrieves the descriptor data from the redis hash entry. -func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err - } - - conn := rbds.pool.Get() - defer conn.Close() - - return rbds.stat(ctx, conn, dgst) -} - -func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { - if err := dgst.Validate(); err != nil { - return err - } - - conn := rbds.pool.Get() - defer conn.Close() - - // Not atomic in redis <= 2.3 - reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype") - if err != nil { - return err - } - - if reply == 0 { - return distribution.ErrBlobUnknown - } - - return nil -} - -// stat provides an internal stat call that takes a connection parameter. This -// allows some internal management of the connection scope. -func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) { - reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype")) - if err != nil { - return distribution.Descriptor{}, err - } - - // NOTE(stevvooe): The "size" field used to be "length". We treat a - // missing "size" field here as an unknown blob, which causes a cache - // miss, effectively migrating the field. - if len(reply) < 3 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - var desc distribution.Descriptor - if _, err := redis.Scan(reply, &desc.Digest, &desc.Size, &desc.MediaType); err != nil { - return distribution.Descriptor{}, err - } - - return desc, nil -} - -// SetDescriptor sets the descriptor data for the given digest using a redis -// hash. A hash is used here since we may store unrelated fields about a layer -// in the future. -func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := dgst.Validate(); err != nil { - return err - } - - if err := cache.ValidateDescriptor(desc); err != nil { - return err - } - - conn := rbds.pool.Get() - defer conn.Close() - - return rbds.setDescriptor(ctx, conn, dgst, desc) -} - -func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { - if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst), - "digest", desc.Digest, - "size", desc.Size); err != nil { - return err - } - - // Only set mediatype if not already set. - if _, err := conn.Do("HSETNX", rbds.blobDescriptorHashKey(dgst), - "mediatype", desc.MediaType); err != nil { - return err - } - - return nil -} - -func (rbds *redisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { - return "blobs::" + dgst.String() -} - -type repositoryScopedRedisBlobDescriptorService struct { - repo string - upstream *redisBlobDescriptorService -} - -var _ distribution.BlobDescriptorService = &repositoryScopedRedisBlobDescriptorService{} - -// Stat ensures that the digest is a member of the specified repository and -// forwards the descriptor request to the global blob store. If the media type -// differs for the repository, we override it. -func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err - } - - conn := rsrbds.upstream.pool.Get() - defer conn.Close() - - // Check membership to repository first - member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) - if err != nil { - return distribution.Descriptor{}, err - } - - if !member { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - upstream, err := rsrbds.upstream.stat(ctx, conn, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - - // We allow a per repository mediatype, let's look it up here. - mediatype, err := redis.String(conn.Do("HGET", rsrbds.blobDescriptorHashKey(dgst), "mediatype")) - if err != nil { - return distribution.Descriptor{}, err - } - - if mediatype != "" { - upstream.MediaType = mediatype - } - - return upstream, nil -} - -// Clear removes the descriptor from the cache and forwards to the upstream descriptor store -func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { - if err := dgst.Validate(); err != nil { - return err - } - - conn := rsrbds.upstream.pool.Get() - defer conn.Close() - - // Check membership to repository first - member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) - if err != nil { - return err - } - - if !member { - return distribution.ErrBlobUnknown - } - - return rsrbds.upstream.Clear(ctx, dgst) -} - -func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := dgst.Validate(); err != nil { - return err - } - - if err := cache.ValidateDescriptor(desc); err != nil { - return err - } - - if dgst != desc.Digest { - if dgst.Algorithm() == desc.Digest.Algorithm() { - return fmt.Errorf("redis cache: digest for descriptors differ but algorthim does not: %q != %q", dgst, desc.Digest) - } - } - - conn := rsrbds.upstream.pool.Get() - defer conn.Close() - - return rsrbds.setDescriptor(ctx, conn, dgst, desc) -} - -func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { - if _, err := conn.Do("SADD", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst); err != nil { - return err - } - - if err := rsrbds.upstream.setDescriptor(ctx, conn, dgst, desc); err != nil { - return err - } - - // Override repository mediatype. - if _, err := conn.Do("HSET", rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType); err != nil { - return err - } - - // Also set the values for the primary descriptor, if they differ by - // algorithm (ie sha256 vs sha512). - if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() { - if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil { - return err - } - } - - return nil -} - -func (rsrbds *repositoryScopedRedisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { - return "repository::" + rsrbds.repo + "::blobs::" + dgst.String() -} - -func (rsrbds *repositoryScopedRedisBlobDescriptorService) repositoryBlobSetKey(repo string) string { - return "repository::" + rsrbds.repo + "::blobs" -} diff --git a/docs/storage/cache/redis/redis_test.go b/docs/storage/cache/redis/redis_test.go deleted file mode 100644 index 81bcaddd..00000000 --- a/docs/storage/cache/redis/redis_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package redis - -import ( - "flag" - "os" - "testing" - "time" - - "github.com/docker/distribution/registry/storage/cache/cachecheck" - "github.com/garyburd/redigo/redis" -) - -var redisAddr string - -func init() { - flag.StringVar(&redisAddr, "test.registry.storage.cache.redis.addr", "", "configure the address of a test instance of redis") -} - -// TestRedisLayerInfoCache exercises a live redis instance using the cache -// implementation. -func TestRedisBlobDescriptorCacheProvider(t *testing.T) { - if redisAddr == "" { - // fallback to an environement variable - redisAddr = os.Getenv("TEST_REGISTRY_STORAGE_CACHE_REDIS_ADDR") - } - - if redisAddr == "" { - // skip if still not set - t.Skip("please set -registry.storage.cache.redis to test layer info cache against redis") - } - - pool := &redis.Pool{ - Dial: func() (redis.Conn, error) { - return redis.Dial("tcp", redisAddr) - }, - MaxIdle: 1, - MaxActive: 2, - TestOnBorrow: func(c redis.Conn, t time.Time) error { - _, err := c.Do("PING") - return err - }, - Wait: false, // if a connection is not avialable, proceed without cache. - } - - // Clear the database - if _, err := pool.Get().Do("FLUSHDB"); err != nil { - t.Fatalf("unexpected error flushing redis db: %v", err) - } - - cachecheck.CheckBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool)) -} diff --git a/docs/storage/catalog.go b/docs/storage/catalog.go deleted file mode 100644 index 3b13b7ad..00000000 --- a/docs/storage/catalog.go +++ /dev/null @@ -1,97 +0,0 @@ -package storage - -import ( - "errors" - "io" - "path" - "strings" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" -) - -// ErrFinishedWalk is used when the called walk function no longer wants -// to accept any more values. This is used for pagination when the -// required number of repos have been found. -var ErrFinishedWalk = errors.New("finished walk") - -// Returns a list, or partial list, of repositories in the registry. -// Because it's a quite expensive operation, it should only be used when building up -// an initial set of repositories. -func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, errVal error) { - var foundRepos []string - - if len(repos) == 0 { - return 0, errors.New("no space in slice") - } - - root, err := pathFor(repositoriesRootPathSpec{}) - if err != nil { - return 0, err - } - - err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { - filePath := fileInfo.Path() - - // lop the base path off - repoPath := filePath[len(root)+1:] - - _, file := path.Split(repoPath) - if file == "_layers" { - repoPath = strings.TrimSuffix(repoPath, "/_layers") - if repoPath > last { - foundRepos = append(foundRepos, repoPath) - } - return ErrSkipDir - } else if strings.HasPrefix(file, "_") { - return ErrSkipDir - } - - // if we've filled our array, no need to walk any further - if len(foundRepos) == len(repos) { - return ErrFinishedWalk - } - - return nil - }) - - n = copy(repos, foundRepos) - - // Signal that we have no more entries by setting EOF - if len(foundRepos) <= len(repos) && err != ErrFinishedWalk { - errVal = io.EOF - } - - return n, errVal -} - -// Enumerate applies ingester to each repository -func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) error { - repoNameBuffer := make([]string, 100) - var last string - for { - n, err := reg.Repositories(ctx, repoNameBuffer, last) - if err != nil && err != io.EOF { - return err - } - - if n == 0 { - break - } - - last = repoNameBuffer[n-1] - for i := 0; i < n; i++ { - repoName := repoNameBuffer[i] - err = ingester(repoName) - if err != nil { - return err - } - } - - if err == io.EOF { - break - } - } - return nil - -} diff --git a/docs/storage/catalog_test.go b/docs/storage/catalog_test.go deleted file mode 100644 index eb062c5b..00000000 --- a/docs/storage/catalog_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package storage - -import ( - "io" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -type setupEnv struct { - ctx context.Context - driver driver.StorageDriver - expected []string - registry distribution.Namespace -} - -func setupFS(t *testing.T) *setupEnv { - d := inmemory.New() - c := []byte("") - ctx := context.Background() - registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - rootpath, _ := pathFor(repositoriesRootPathSpec{}) - - repos := []string{ - "/foo/a/_layers/1", - "/foo/b/_layers/2", - "/bar/c/_layers/3", - "/bar/d/_layers/4", - "/foo/d/in/_layers/5", - "/an/invalid/repo", - "/bar/d/_layers/ignored/dir/6", - } - - for _, repo := range repos { - if err := d.PutContent(ctx, rootpath+repo, c); err != nil { - t.Fatalf("Unable to put to inmemory fs") - } - } - - expected := []string{ - "bar/c", - "bar/d", - "foo/a", - "foo/b", - "foo/d/in", - } - - return &setupEnv{ - ctx: ctx, - driver: d, - expected: expected, - registry: registry, - } -} - -func TestCatalog(t *testing.T) { - env := setupFS(t) - - p := make([]string, 50) - - numFilled, err := env.registry.Repositories(env.ctx, p, "") - - if !testEq(p, env.expected, numFilled) { - t.Errorf("Expected catalog repos err") - } - - if err != io.EOF { - t.Errorf("Catalog has more values which we aren't expecting") - } -} - -func TestCatalogInParts(t *testing.T) { - env := setupFS(t) - - chunkLen := 2 - p := make([]string, chunkLen) - - numFilled, err := env.registry.Repositories(env.ctx, p, "") - if err == io.EOF || numFilled != len(p) { - t.Errorf("Expected more values in catalog") - } - - if !testEq(p, env.expected[0:chunkLen], numFilled) { - t.Errorf("Expected catalog first chunk err") - } - - lastRepo := p[len(p)-1] - numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) - - if err == io.EOF || numFilled != len(p) { - t.Errorf("Expected more values in catalog") - } - - if !testEq(p, env.expected[chunkLen:chunkLen*2], numFilled) { - t.Errorf("Expected catalog second chunk err") - } - - lastRepo = p[len(p)-1] - numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) - - if err != io.EOF { - t.Errorf("Catalog has more values which we aren't expecting") - } - - if !testEq(p, env.expected[chunkLen*2:chunkLen*3-1], numFilled) { - t.Errorf("Expected catalog third chunk err") - } - -} - -func testEq(a, b []string, size int) bool { - for cnt := 0; cnt < size-1; cnt++ { - if a[cnt] != b[cnt] { - return false - } - } - return true -} diff --git a/docs/storage/doc.go b/docs/storage/doc.go deleted file mode 100644 index 387d9234..00000000 --- a/docs/storage/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package storage contains storage services for use in the registry -// application. It should be considered an internal package, as of Go 1.4. -package storage diff --git a/docs/storage/driver/azure/azure.go b/docs/storage/driver/azure/azure.go deleted file mode 100644 index b06b0876..00000000 --- a/docs/storage/driver/azure/azure.go +++ /dev/null @@ -1,482 +0,0 @@ -// Package azure provides a storagedriver.StorageDriver implementation to -// store blobs in Microsoft Azure Blob Storage Service. -package azure - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" - "time" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -const driverName = "azure" - -const ( - paramAccountName = "accountname" - paramAccountKey = "accountkey" - paramContainer = "container" - paramRealm = "realm" - maxChunkSize = 4 * 1024 * 1024 -) - -type driver struct { - client azure.BlobStorageClient - container string -} - -type baseEmbed struct{ base.Base } - -// Driver is a storagedriver.StorageDriver implementation backed by -// Microsoft Azure Blob Storage Service. -type Driver struct{ baseEmbed } - -func init() { - factory.Register(driverName, &azureDriverFactory{}) -} - -type azureDriverFactory struct{} - -func (factory *azureDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -// FromParameters constructs a new Driver with a given parameters map. -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - accountName, ok := parameters[paramAccountName] - if !ok || fmt.Sprint(accountName) == "" { - return nil, fmt.Errorf("No %s parameter provided", paramAccountName) - } - - accountKey, ok := parameters[paramAccountKey] - if !ok || fmt.Sprint(accountKey) == "" { - return nil, fmt.Errorf("No %s parameter provided", paramAccountKey) - } - - container, ok := parameters[paramContainer] - if !ok || fmt.Sprint(container) == "" { - return nil, fmt.Errorf("No %s parameter provided", paramContainer) - } - - realm, ok := parameters[paramRealm] - if !ok || fmt.Sprint(realm) == "" { - realm = azure.DefaultBaseURL - } - - return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm)) -} - -// New constructs a new Driver with the given Azure Storage Account credentials -func New(accountName, accountKey, container, realm string) (*Driver, error) { - api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultAPIVersion, true) - if err != nil { - return nil, err - } - - blobClient := api.GetBlobService() - - // Create registry container - if _, err = blobClient.CreateContainerIfNotExists(container, azure.ContainerAccessTypePrivate); err != nil { - return nil, err - } - - d := &driver{ - client: blobClient, - container: container} - return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil -} - -// Implement the storagedriver.StorageDriver interface. -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - blob, err := d.client.GetBlob(d.container, path) - if err != nil { - if is404(err) { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return nil, err - } - - return ioutil.ReadAll(blob) -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - if _, err := d.client.DeleteBlobIfExists(d.container, path); err != nil { - return err - } - writer, err := d.Writer(ctx, path, false) - if err != nil { - return err - } - defer writer.Close() - _, err = writer.Write(contents) - if err != nil { - return err - } - return writer.Commit() -} - -// Reader retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - if ok, err := d.client.BlobExists(d.container, path); err != nil { - return nil, err - } else if !ok { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - info, err := d.client.GetBlobProperties(d.container, path) - if err != nil { - return nil, err - } - - size := int64(info.ContentLength) - if offset >= size { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - bytesRange := fmt.Sprintf("%v-", offset) - resp, err := d.client.GetBlobRange(d.container, path, bytesRange) - if err != nil { - return nil, err - } - return resp, nil -} - -// Writer returns a FileWriter which will store the content written to it -// at the location designated by "path" after the call to Commit. -func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { - blobExists, err := d.client.BlobExists(d.container, path) - if err != nil { - return nil, err - } - var size int64 - if blobExists { - if append { - blobProperties, err := d.client.GetBlobProperties(d.container, path) - if err != nil { - return nil, err - } - size = blobProperties.ContentLength - } else { - err := d.client.DeleteBlob(d.container, path) - if err != nil { - return nil, err - } - } - } else { - if append { - return nil, storagedriver.PathNotFoundError{Path: path} - } - err := d.client.PutAppendBlob(d.container, path, nil) - if err != nil { - return nil, err - } - } - - return d.newWriter(path, size), nil -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - // Check if the path is a blob - if ok, err := d.client.BlobExists(d.container, path); err != nil { - return nil, err - } else if ok { - blob, err := d.client.GetBlobProperties(d.container, path) - if err != nil { - return nil, err - } - - mtim, err := time.Parse(http.TimeFormat, blob.LastModified) - if err != nil { - return nil, err - } - - return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - Size: int64(blob.ContentLength), - ModTime: mtim, - IsDir: false, - }}, nil - } - - // Check if path is a virtual container - virtContainerPath := path - if !strings.HasSuffix(virtContainerPath, "/") { - virtContainerPath += "/" - } - blobs, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ - Prefix: virtContainerPath, - MaxResults: 1, - }) - if err != nil { - return nil, err - } - if len(blobs.Blobs) > 0 { - // path is a virtual container - return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - IsDir: true, - }}, nil - } - - // path is not a blob or virtual container - return nil, storagedriver.PathNotFoundError{Path: path} -} - -// List returns a list of the objects that are direct descendants of the given -// path. -func (d *driver) List(ctx context.Context, path string) ([]string, error) { - if path == "/" { - path = "" - } - - blobs, err := d.listBlobs(d.container, path) - if err != nil { - return blobs, err - } - - list := directDescendants(blobs, path) - if path != "" && len(list) == 0 { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return list, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - sourceBlobURL := d.client.GetBlobURL(d.container, sourcePath) - err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) - if err != nil { - if is404(err) { - return storagedriver.PathNotFoundError{Path: sourcePath} - } - return err - } - - return d.client.DeleteBlob(d.container, sourcePath) -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - ok, err := d.client.DeleteBlobIfExists(d.container, path) - if err != nil { - return err - } - if ok { - return nil // was a blob and deleted, return - } - - // Not a blob, see if path is a virtual container with blobs - blobs, err := d.listBlobs(d.container, path) - if err != nil { - return err - } - - for _, b := range blobs { - if err = d.client.DeleteBlob(d.container, b); err != nil { - return err - } - } - - if len(blobs) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - return nil -} - -// URLFor returns a publicly accessible URL for the blob stored at given path -// for specified duration by making use of Azure Storage Shared Access Signatures (SAS). -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx for more info. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - expiresTime := time.Now().UTC().Add(20 * time.Minute) // default expiration - expires, ok := options["expiry"] - if ok { - t, ok := expires.(time.Time) - if ok { - expiresTime = t - } - } - return d.client.GetBlobSASURI(d.container, path, expiresTime, "r") -} - -// directDescendants will find direct descendants (blobs or virtual containers) -// of from list of blob paths and will return their full paths. Elements in blobs -// list must be prefixed with a "/" and -// -// Example: direct descendants of "/" in {"/foo", "/bar/1", "/bar/2"} is -// {"/foo", "/bar"} and direct descendants of "bar" is {"/bar/1", "/bar/2"} -func directDescendants(blobs []string, prefix string) []string { - if !strings.HasPrefix(prefix, "/") { // add trailing '/' - prefix = "/" + prefix - } - if !strings.HasSuffix(prefix, "/") { // containerify the path - prefix += "/" - } - - out := make(map[string]bool) - for _, b := range blobs { - if strings.HasPrefix(b, prefix) { - rel := b[len(prefix):] - c := strings.Count(rel, "/") - if c == 0 { - out[b] = true - } else { - out[prefix+rel[:strings.Index(rel, "/")]] = true - } - } - } - - var keys []string - for k := range out { - keys = append(keys, k) - } - return keys -} - -func (d *driver) listBlobs(container, virtPath string) ([]string, error) { - if virtPath != "" && !strings.HasSuffix(virtPath, "/") { // containerify the path - virtPath += "/" - } - - out := []string{} - marker := "" - for { - resp, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ - Marker: marker, - Prefix: virtPath, - }) - - if err != nil { - return out, err - } - - for _, b := range resp.Blobs { - out = append(out, b.Name) - } - - if len(resp.Blobs) == 0 || resp.NextMarker == "" { - break - } - marker = resp.NextMarker - } - return out, nil -} - -func is404(err error) bool { - statusCodeErr, ok := err.(azure.AzureStorageServiceError) - return ok && statusCodeErr.StatusCode == http.StatusNotFound -} - -type writer struct { - driver *driver - path string - size int64 - bw *bufio.Writer - closed bool - committed bool - cancelled bool -} - -func (d *driver) newWriter(path string, size int64) storagedriver.FileWriter { - return &writer{ - driver: d, - path: path, - size: size, - bw: bufio.NewWriterSize(&blockWriter{ - client: d.client, - container: d.container, - path: path, - }, maxChunkSize), - } -} - -func (w *writer) Write(p []byte) (int, error) { - if w.closed { - return 0, fmt.Errorf("already closed") - } else if w.committed { - return 0, fmt.Errorf("already committed") - } else if w.cancelled { - return 0, fmt.Errorf("already cancelled") - } - - n, err := w.bw.Write(p) - w.size += int64(n) - return n, err -} - -func (w *writer) Size() int64 { - return w.size -} - -func (w *writer) Close() error { - if w.closed { - return fmt.Errorf("already closed") - } - w.closed = true - return w.bw.Flush() -} - -func (w *writer) Cancel() error { - if w.closed { - return fmt.Errorf("already closed") - } else if w.committed { - return fmt.Errorf("already committed") - } - w.cancelled = true - return w.driver.client.DeleteBlob(w.driver.container, w.path) -} - -func (w *writer) Commit() error { - if w.closed { - return fmt.Errorf("already closed") - } else if w.committed { - return fmt.Errorf("already committed") - } else if w.cancelled { - return fmt.Errorf("already cancelled") - } - w.committed = true - return w.bw.Flush() -} - -type blockWriter struct { - client azure.BlobStorageClient - container string - path string -} - -func (bw *blockWriter) Write(p []byte) (int, error) { - n := 0 - for offset := 0; offset < len(p); offset += maxChunkSize { - chunkSize := maxChunkSize - if offset+chunkSize > len(p) { - chunkSize = len(p) - offset - } - err := bw.client.AppendBlock(bw.container, bw.path, p[offset:offset+chunkSize]) - if err != nil { - return n, err - } - - n += chunkSize - } - - return n, nil -} diff --git a/docs/storage/driver/azure/azure_test.go b/docs/storage/driver/azure/azure_test.go deleted file mode 100644 index 4a0661b3..00000000 --- a/docs/storage/driver/azure/azure_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package azure - -import ( - "fmt" - "os" - "strings" - "testing" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - . "gopkg.in/check.v1" -) - -const ( - envAccountName = "AZURE_STORAGE_ACCOUNT_NAME" - envAccountKey = "AZURE_STORAGE_ACCOUNT_KEY" - envContainer = "AZURE_STORAGE_CONTAINER" - envRealm = "AZURE_STORAGE_REALM" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } - -func init() { - var ( - accountName string - accountKey string - container string - realm string - ) - - config := []struct { - env string - value *string - }{ - {envAccountName, &accountName}, - {envAccountKey, &accountKey}, - {envContainer, &container}, - {envRealm, &realm}, - } - - missing := []string{} - for _, v := range config { - *v.value = os.Getenv(v.env) - if *v.value == "" { - missing = append(missing, v.env) - } - } - - azureDriverConstructor := func() (storagedriver.StorageDriver, error) { - return New(accountName, accountKey, container, realm) - } - - // Skip Azure storage driver tests if environment variable parameters are not provided - skipCheck := func() string { - if len(missing) > 0 { - return fmt.Sprintf("Must set %s environment variables to run Azure tests", strings.Join(missing, ", ")) - } - return "" - } - - testsuites.RegisterSuite(azureDriverConstructor, skipCheck) -} diff --git a/docs/storage/driver/base/base.go b/docs/storage/driver/base/base.go deleted file mode 100644 index 064bda60..00000000 --- a/docs/storage/driver/base/base.go +++ /dev/null @@ -1,198 +0,0 @@ -// Package base provides a base implementation of the storage driver that can -// be used to implement common checks. The goal is to increase the amount of -// code sharing. -// -// The canonical approach to use this class is to embed in the exported driver -// struct such that calls are proxied through this implementation. First, -// declare the internal driver, as follows: -// -// type driver struct { ... internal ...} -// -// The resulting type should implement StorageDriver such that it can be the -// target of a Base struct. The exported type can then be declared as follows: -// -// type Driver struct { -// Base -// } -// -// Because Driver embeds Base, it effectively implements Base. If the driver -// needs to intercept a call, before going to base, Driver should implement -// that method. Effectively, Driver can intercept calls before coming in and -// driver implements the actual logic. -// -// To further shield the embed from other packages, it is recommended to -// employ a private embed struct: -// -// type baseEmbed struct { -// base.Base -// } -// -// Then, declare driver to embed baseEmbed, rather than Base directly: -// -// type Driver struct { -// baseEmbed -// } -// -// The type now implements StorageDriver, proxying through Base, without -// exporting an unnecessary field. -package base - -import ( - "io" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// Base provides a wrapper around a storagedriver implementation that provides -// common path and bounds checking. -type Base struct { - storagedriver.StorageDriver -} - -// Format errors received from the storage driver -func (base *Base) setDriverName(e error) error { - switch actual := e.(type) { - case nil: - return nil - case storagedriver.ErrUnsupportedMethod: - actual.DriverName = base.StorageDriver.Name() - return actual - case storagedriver.PathNotFoundError: - actual.DriverName = base.StorageDriver.Name() - return actual - case storagedriver.InvalidPathError: - actual.DriverName = base.StorageDriver.Name() - return actual - case storagedriver.InvalidOffsetError: - actual.DriverName = base.StorageDriver.Name() - return actual - default: - storageError := storagedriver.Error{ - DriverName: base.StorageDriver.Name(), - Enclosed: e, - } - - return storageError - } -} - -// GetContent wraps GetContent of underlying storage driver. -func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.GetContent(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - b, e := base.StorageDriver.GetContent(ctx, path) - return b, base.setDriverName(e) -} - -// PutContent wraps PutContent of underlying storage driver. -func (base *Base) PutContent(ctx context.Context, path string, content []byte) error { - ctx, done := context.WithTrace(ctx) - defer done("%s.PutContent(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) { - return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - return base.setDriverName(base.StorageDriver.PutContent(ctx, path, content)) -} - -// Reader wraps Reader of underlying storage driver. -func (base *Base) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.Reader(%q, %d)", base.Name(), path, offset) - - if offset < 0 { - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} - } - - if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - rc, e := base.StorageDriver.Reader(ctx, path, offset) - return rc, base.setDriverName(e) -} - -// Writer wraps Writer of underlying storage driver. -func (base *Base) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.Writer(%q, %v)", base.Name(), path, append) - - if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - writer, e := base.StorageDriver.Writer(ctx, path, append) - return writer, base.setDriverName(e) -} - -// Stat wraps Stat of underlying storage driver. -func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.Stat(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - fi, e := base.StorageDriver.Stat(ctx, path) - return fi, base.setDriverName(e) -} - -// List wraps List of underlying storage driver. -func (base *Base) List(ctx context.Context, path string) ([]string, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.List(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) && path != "/" { - return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - str, e := base.StorageDriver.List(ctx, path) - return str, base.setDriverName(e) -} - -// Move wraps Move of underlying storage driver. -func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error { - ctx, done := context.WithTrace(ctx) - defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath) - - if !storagedriver.PathRegexp.MatchString(sourcePath) { - return storagedriver.InvalidPathError{Path: sourcePath, DriverName: base.StorageDriver.Name()} - } else if !storagedriver.PathRegexp.MatchString(destPath) { - return storagedriver.InvalidPathError{Path: destPath, DriverName: base.StorageDriver.Name()} - } - - return base.setDriverName(base.StorageDriver.Move(ctx, sourcePath, destPath)) -} - -// Delete wraps Delete of underlying storage driver. -func (base *Base) Delete(ctx context.Context, path string) error { - ctx, done := context.WithTrace(ctx) - defer done("%s.Delete(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) { - return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - return base.setDriverName(base.StorageDriver.Delete(ctx, path)) -} - -// URLFor wraps URLFor of underlying storage driver. -func (base *Base) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.URLFor(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) { - return "", storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - str, e := base.StorageDriver.URLFor(ctx, path, options) - return str, base.setDriverName(e) -} diff --git a/docs/storage/driver/base/regulator.go b/docs/storage/driver/base/regulator.go deleted file mode 100644 index 185160a4..00000000 --- a/docs/storage/driver/base/regulator.go +++ /dev/null @@ -1,145 +0,0 @@ -package base - -import ( - "io" - "sync" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -type regulator struct { - storagedriver.StorageDriver - *sync.Cond - - available uint64 -} - -// NewRegulator wraps the given driver and is used to regulate concurrent calls -// to the given storage driver to a maximum of the given limit. This is useful -// for storage drivers that would otherwise create an unbounded number of OS -// threads if allowed to be called unregulated. -func NewRegulator(driver storagedriver.StorageDriver, limit uint64) storagedriver.StorageDriver { - return ®ulator{ - StorageDriver: driver, - Cond: sync.NewCond(&sync.Mutex{}), - available: limit, - } -} - -func (r *regulator) enter() { - r.L.Lock() - for r.available == 0 { - r.Wait() - } - r.available-- - r.L.Unlock() -} - -func (r *regulator) exit() { - r.L.Lock() - // We only need to signal to a waiting FS operation if we're already at the - // limit of threads used - if r.available == 0 { - r.Signal() - } - r.available++ - r.L.Unlock() -} - -// Name returns the human-readable "name" of the driver, useful in error -// messages and logging. By convention, this will just be the registration -// name, but drivers may provide other information here. -func (r *regulator) Name() string { - r.enter() - defer r.exit() - - return r.StorageDriver.Name() -} - -// GetContent retrieves the content stored at "path" as a []byte. -// This should primarily be used for small objects. -func (r *regulator) GetContent(ctx context.Context, path string) ([]byte, error) { - r.enter() - defer r.exit() - - return r.StorageDriver.GetContent(ctx, path) -} - -// PutContent stores the []byte content at a location designated by "path". -// This should primarily be used for small objects. -func (r *regulator) PutContent(ctx context.Context, path string, content []byte) error { - r.enter() - defer r.exit() - - return r.StorageDriver.PutContent(ctx, path, content) -} - -// Reader retrieves an io.ReadCloser for the content stored at "path" -// with a given byte offset. -// May be used to resume reading a stream by providing a nonzero offset. -func (r *regulator) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - r.enter() - defer r.exit() - - return r.StorageDriver.Reader(ctx, path, offset) -} - -// Writer stores the contents of the provided io.ReadCloser at a -// location designated by the given path. -// May be used to resume writing a stream by providing a nonzero offset. -// The offset must be no larger than the CurrentSize for this path. -func (r *regulator) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { - r.enter() - defer r.exit() - - return r.StorageDriver.Writer(ctx, path, append) -} - -// Stat retrieves the FileInfo for the given path, including the current -// size in bytes and the creation time. -func (r *regulator) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - r.enter() - defer r.exit() - - return r.StorageDriver.Stat(ctx, path) -} - -// List returns a list of the objects that are direct descendants of the -//given path. -func (r *regulator) List(ctx context.Context, path string) ([]string, error) { - r.enter() - defer r.exit() - - return r.StorageDriver.List(ctx, path) -} - -// Move moves an object stored at sourcePath to destPath, removing the -// original object. -// Note: This may be no more efficient than a copy followed by a delete for -// many implementations. -func (r *regulator) Move(ctx context.Context, sourcePath string, destPath string) error { - r.enter() - defer r.exit() - - return r.StorageDriver.Move(ctx, sourcePath, destPath) -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (r *regulator) Delete(ctx context.Context, path string) error { - r.enter() - defer r.exit() - - return r.StorageDriver.Delete(ctx, path) -} - -// URLFor returns a URL which may be used to retrieve the content stored at -// the given path, possibly using the given options. -// May return an ErrUnsupportedMethod in certain StorageDriver -// implementations. -func (r *regulator) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - r.enter() - defer r.exit() - - return r.StorageDriver.URLFor(ctx, path, options) -} diff --git a/docs/storage/driver/factory/factory.go b/docs/storage/driver/factory/factory.go deleted file mode 100644 index a9c04ec5..00000000 --- a/docs/storage/driver/factory/factory.go +++ /dev/null @@ -1,64 +0,0 @@ -package factory - -import ( - "fmt" - - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// driverFactories stores an internal mapping between storage driver names and their respective -// factories -var driverFactories = make(map[string]StorageDriverFactory) - -// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces -// Storage drivers should call Register() with a factory to make the driver available by name. -// Individual StorageDriver implementations generally register with the factory via the Register -// func (below) in their init() funcs, and as such they should be imported anonymously before use. -// See below for an example of how to register and get a StorageDriver for S3 -// -// import _ "github.com/docker/distribution/registry/storage/driver/s3-aws" -// s3Driver, err = factory.Create("s3", storageParams) -// // assuming no error, s3Driver is the StorageDriver that communicates with S3 according to storageParams -type StorageDriverFactory interface { - // Create returns a new storagedriver.StorageDriver with the given parameters - // Parameters will vary by driver and may be ignored - // Each parameter key must only consist of lowercase letters and numbers - Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) -} - -// Register makes a storage driver available by the provided name. -// If Register is called twice with the same name or if driver factory is nil, it panics. -// Additionally, it is not concurrency safe. Most Storage Drivers call this function -// in their init() functions. See the documentation for StorageDriverFactory for more. -func Register(name string, factory StorageDriverFactory) { - if factory == nil { - panic("Must not provide nil StorageDriverFactory") - } - _, registered := driverFactories[name] - if registered { - panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name)) - } - - driverFactories[name] = factory -} - -// Create a new storagedriver.StorageDriver with the given name and -// parameters. To use a driver, the StorageDriverFactory must first be -// registered with the given name. If no drivers are found, an -// InvalidStorageDriverError is returned -func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - driverFactory, ok := driverFactories[name] - if !ok { - return nil, InvalidStorageDriverError{name} - } - return driverFactory.Create(parameters) -} - -// InvalidStorageDriverError records an attempt to construct an unregistered storage driver -type InvalidStorageDriverError struct { - Name string -} - -func (err InvalidStorageDriverError) Error() string { - return fmt.Sprintf("StorageDriver not registered: %s", err.Name) -} diff --git a/docs/storage/driver/fileinfo.go b/docs/storage/driver/fileinfo.go deleted file mode 100644 index e5064029..00000000 --- a/docs/storage/driver/fileinfo.go +++ /dev/null @@ -1,79 +0,0 @@ -package driver - -import "time" - -// FileInfo returns information about a given path. Inspired by os.FileInfo, -// it elides the base name method for a full path instead. -type FileInfo interface { - // Path provides the full path of the target of this file info. - Path() string - - // Size returns current length in bytes of the file. The return value can - // be used to write to the end of the file at path. The value is - // meaningless if IsDir returns true. - Size() int64 - - // ModTime returns the modification time for the file. For backends that - // don't have a modification time, the creation time should be returned. - ModTime() time.Time - - // IsDir returns true if the path is a directory. - IsDir() bool -} - -// NOTE(stevvooe): The next two types, FileInfoFields and FileInfoInternal -// should only be used by storagedriver implementations. They should moved to -// a "driver" package, similar to database/sql. - -// FileInfoFields provides the exported fields for implementing FileInfo -// interface in storagedriver implementations. It should be used with -// InternalFileInfo. -type FileInfoFields struct { - // Path provides the full path of the target of this file info. - Path string - - // Size is current length in bytes of the file. The value of this field - // can be used to write to the end of the file at path. The value is - // meaningless if IsDir is set to true. - Size int64 - - // ModTime returns the modification time for the file. For backends that - // don't have a modification time, the creation time should be returned. - ModTime time.Time - - // IsDir returns true if the path is a directory. - IsDir bool -} - -// FileInfoInternal implements the FileInfo interface. This should only be -// used by storagedriver implementations that don't have a specialized -// FileInfo type. -type FileInfoInternal struct { - FileInfoFields -} - -var _ FileInfo = FileInfoInternal{} -var _ FileInfo = &FileInfoInternal{} - -// Path provides the full path of the target of this file info. -func (fi FileInfoInternal) Path() string { - return fi.FileInfoFields.Path -} - -// Size returns current length in bytes of the file. The return value can -// be used to write to the end of the file at path. The value is -// meaningless if IsDir returns true. -func (fi FileInfoInternal) Size() int64 { - return fi.FileInfoFields.Size -} - -// ModTime returns the modification time for the file. For backends that -// don't have a modification time, the creation time should be returned. -func (fi FileInfoInternal) ModTime() time.Time { - return fi.FileInfoFields.ModTime -} - -// IsDir returns true if the path is a directory. -func (fi FileInfoInternal) IsDir() bool { - return fi.FileInfoFields.IsDir -} diff --git a/docs/storage/driver/filesystem/driver.go b/docs/storage/driver/filesystem/driver.go deleted file mode 100644 index 649e2bc2..00000000 --- a/docs/storage/driver/filesystem/driver.go +++ /dev/null @@ -1,440 +0,0 @@ -package filesystem - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "reflect" - "strconv" - "time" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const ( - driverName = "filesystem" - defaultRootDirectory = "/var/lib/registry" - defaultMaxThreads = uint64(100) - - // minThreads is the minimum value for the maxthreads configuration - // parameter. If the driver's parameters are less than this we set - // the parameters to minThreads - minThreads = uint64(25) -) - -// DriverParameters represents all configuration options available for the -// filesystem driver -type DriverParameters struct { - RootDirectory string - MaxThreads uint64 -} - -func init() { - factory.Register(driverName, &filesystemDriverFactory{}) -} - -// filesystemDriverFactory implements the factory.StorageDriverFactory interface -type filesystemDriverFactory struct{} - -func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - rootDirectory string -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by a local -// filesystem. All provided paths will be subpaths of the RootDirectory. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Optional Parameters: -// - rootdirectory -// - maxthreads -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - params, err := fromParametersImpl(parameters) - if err != nil || params == nil { - return nil, err - } - return New(*params), nil -} - -func fromParametersImpl(parameters map[string]interface{}) (*DriverParameters, error) { - var ( - err error - maxThreads = defaultMaxThreads - rootDirectory = defaultRootDirectory - ) - - if parameters != nil { - if rootDir, ok := parameters["rootdirectory"]; ok { - rootDirectory = fmt.Sprint(rootDir) - } - - // Get maximum number of threads for blocking filesystem operations, - // if specified - threads := parameters["maxthreads"] - switch v := threads.(type) { - case string: - if maxThreads, err = strconv.ParseUint(v, 0, 64); err != nil { - return nil, fmt.Errorf("maxthreads parameter must be an integer, %v invalid", threads) - } - case uint64: - maxThreads = v - case int, int32, int64: - val := reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Int() - // If threads is negative casting to uint64 will wrap around and - // give you the hugest thread limit ever. Let's be sensible, here - if val > 0 { - maxThreads = uint64(val) - } - case uint, uint32: - maxThreads = reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Uint() - case nil: - // do nothing - default: - return nil, fmt.Errorf("invalid value for maxthreads: %#v", threads) - } - - if maxThreads < minThreads { - maxThreads = minThreads - } - } - - params := &DriverParameters{ - RootDirectory: rootDirectory, - MaxThreads: maxThreads, - } - return params, nil -} - -// New constructs a new Driver with a given rootDirectory -func New(params DriverParameters) *Driver { - fsDriver := &driver{rootDirectory: params.RootDirectory} - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: base.NewRegulator(fsDriver, params.MaxThreads), - }, - }, - } -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - rc, err := d.Reader(ctx, path, 0) - if err != nil { - return nil, err - } - defer rc.Close() - - p, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - - return p, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error { - writer, err := d.Writer(ctx, subPath, false) - if err != nil { - return err - } - defer writer.Close() - _, err = io.Copy(writer, bytes.NewReader(contents)) - if err != nil { - writer.Cancel() - return err - } - return writer.Commit() -} - -// Reader retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) - if err != nil { - if os.IsNotExist(err) { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - return nil, err - } - - seekPos, err := file.Seek(int64(offset), os.SEEK_SET) - if err != nil { - file.Close() - return nil, err - } else if seekPos < int64(offset) { - file.Close() - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - return file, nil -} - -func (d *driver) Writer(ctx context.Context, subPath string, append bool) (storagedriver.FileWriter, error) { - fullPath := d.fullPath(subPath) - parentDir := path.Dir(fullPath) - if err := os.MkdirAll(parentDir, 0777); err != nil { - return nil, err - } - - fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - return nil, err - } - - var offset int64 - - if !append { - err := fp.Truncate(0) - if err != nil { - fp.Close() - return nil, err - } - } else { - n, err := fp.Seek(0, os.SEEK_END) - if err != nil { - fp.Close() - return nil, err - } - offset = int64(n) - } - - return newFileWriter(fp, offset), nil -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, subPath string) (storagedriver.FileInfo, error) { - fullPath := d.fullPath(subPath) - - fi, err := os.Stat(fullPath) - if err != nil { - if os.IsNotExist(err) { - return nil, storagedriver.PathNotFoundError{Path: subPath} - } - - return nil, err - } - - return fileInfo{ - path: subPath, - FileInfo: fi, - }, nil -} - -// List returns a list of the objects that are direct descendants of the given -// path. -func (d *driver) List(ctx context.Context, subPath string) ([]string, error) { - fullPath := d.fullPath(subPath) - - dir, err := os.Open(fullPath) - if err != nil { - if os.IsNotExist(err) { - return nil, storagedriver.PathNotFoundError{Path: subPath} - } - return nil, err - } - - defer dir.Close() - - fileNames, err := dir.Readdirnames(0) - if err != nil { - return nil, err - } - - keys := make([]string, 0, len(fileNames)) - for _, fileName := range fileNames { - keys = append(keys, path.Join(subPath, fileName)) - } - - return keys, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - source := d.fullPath(sourcePath) - dest := d.fullPath(destPath) - - if _, err := os.Stat(source); os.IsNotExist(err) { - return storagedriver.PathNotFoundError{Path: sourcePath} - } - - if err := os.MkdirAll(path.Dir(dest), 0755); err != nil { - return err - } - - err := os.Rename(source, dest) - return err -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, subPath string) error { - fullPath := d.fullPath(subPath) - - _, err := os.Stat(fullPath) - if err != nil && !os.IsNotExist(err) { - return err - } else if err != nil { - return storagedriver.PathNotFoundError{Path: subPath} - } - - err = os.RemoveAll(fullPath) - return err -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod{} -} - -// fullPath returns the absolute path of a key within the Driver's storage. -func (d *driver) fullPath(subPath string) string { - return path.Join(d.rootDirectory, subPath) -} - -type fileInfo struct { - os.FileInfo - path string -} - -var _ storagedriver.FileInfo = fileInfo{} - -// Path provides the full path of the target of this file info. -func (fi fileInfo) Path() string { - return fi.path -} - -// Size returns current length in bytes of the file. The return value can -// be used to write to the end of the file at path. The value is -// meaningless if IsDir returns true. -func (fi fileInfo) Size() int64 { - if fi.IsDir() { - return 0 - } - - return fi.FileInfo.Size() -} - -// ModTime returns the modification time for the file. For backends that -// don't have a modification time, the creation time should be returned. -func (fi fileInfo) ModTime() time.Time { - return fi.FileInfo.ModTime() -} - -// IsDir returns true if the path is a directory. -func (fi fileInfo) IsDir() bool { - return fi.FileInfo.IsDir() -} - -type fileWriter struct { - file *os.File - size int64 - bw *bufio.Writer - closed bool - committed bool - cancelled bool -} - -func newFileWriter(file *os.File, size int64) *fileWriter { - return &fileWriter{ - file: file, - size: size, - bw: bufio.NewWriter(file), - } -} - -func (fw *fileWriter) Write(p []byte) (int, error) { - if fw.closed { - return 0, fmt.Errorf("already closed") - } else if fw.committed { - return 0, fmt.Errorf("already committed") - } else if fw.cancelled { - return 0, fmt.Errorf("already cancelled") - } - n, err := fw.bw.Write(p) - fw.size += int64(n) - return n, err -} - -func (fw *fileWriter) Size() int64 { - return fw.size -} - -func (fw *fileWriter) Close() error { - if fw.closed { - return fmt.Errorf("already closed") - } - - if err := fw.bw.Flush(); err != nil { - return err - } - - if err := fw.file.Sync(); err != nil { - return err - } - - if err := fw.file.Close(); err != nil { - return err - } - fw.closed = true - return nil -} - -func (fw *fileWriter) Cancel() error { - if fw.closed { - return fmt.Errorf("already closed") - } - - fw.cancelled = true - fw.file.Close() - return os.Remove(fw.file.Name()) -} - -func (fw *fileWriter) Commit() error { - if fw.closed { - return fmt.Errorf("already closed") - } else if fw.committed { - return fmt.Errorf("already committed") - } else if fw.cancelled { - return fmt.Errorf("already cancelled") - } - - if err := fw.bw.Flush(); err != nil { - return err - } - - if err := fw.file.Sync(); err != nil { - return err - } - - fw.committed = true - return nil -} diff --git a/docs/storage/driver/filesystem/driver_test.go b/docs/storage/driver/filesystem/driver_test.go deleted file mode 100644 index 3be85923..00000000 --- a/docs/storage/driver/filesystem/driver_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package filesystem - -import ( - "io/ioutil" - "os" - "reflect" - "testing" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - . "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } - -func init() { - root, err := ioutil.TempDir("", "driver-") - if err != nil { - panic(err) - } - defer os.Remove(root) - - driver, err := FromParameters(map[string]interface{}{ - "rootdirectory": root, - }) - if err != nil { - panic(err) - } - - testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { - return driver, nil - }, testsuites.NeverSkip) -} - -func TestFromParametersImpl(t *testing.T) { - - tests := []struct { - params map[string]interface{} // techincally the yaml can contain anything - expected DriverParameters - pass bool - }{ - // check we use default threads and root dirs - { - params: map[string]interface{}{}, - expected: DriverParameters{ - RootDirectory: defaultRootDirectory, - MaxThreads: defaultMaxThreads, - }, - pass: true, - }, - // Testing initiation with a string maxThreads which can't be parsed - { - params: map[string]interface{}{ - "maxthreads": "fail", - }, - expected: DriverParameters{}, - pass: false, - }, - { - params: map[string]interface{}{ - "maxthreads": "100", - }, - expected: DriverParameters{ - RootDirectory: defaultRootDirectory, - MaxThreads: uint64(100), - }, - pass: true, - }, - { - params: map[string]interface{}{ - "maxthreads": 100, - }, - expected: DriverParameters{ - RootDirectory: defaultRootDirectory, - MaxThreads: uint64(100), - }, - pass: true, - }, - // check that we use minimum thread counts - { - params: map[string]interface{}{ - "maxthreads": 1, - }, - expected: DriverParameters{ - RootDirectory: defaultRootDirectory, - MaxThreads: minThreads, - }, - pass: true, - }, - } - - for _, item := range tests { - params, err := fromParametersImpl(item.params) - - if !item.pass { - // We only need to assert that expected failures have an error - if err == nil { - t.Fatalf("expected error configuring filesystem driver with invalid param: %+v", item.params) - } - continue - } - - if err != nil { - t.Fatalf("unexpected error creating filesystem driver: %s", err) - } - // Note that we get a pointer to params back - if !reflect.DeepEqual(*params, item.expected) { - t.Fatalf("unexpected params from filesystem driver. expected %+v, got %+v", item.expected, params) - } - } - -} diff --git a/docs/storage/driver/gcs/doc.go b/docs/storage/driver/gcs/doc.go deleted file mode 100644 index 0f23ea78..00000000 --- a/docs/storage/driver/gcs/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package gcs implements the Google Cloud Storage driver backend. Support can be -// enabled by including the "include_gcs" build tag. -package gcs diff --git a/docs/storage/driver/gcs/gcs.go b/docs/storage/driver/gcs/gcs.go deleted file mode 100644 index 1369c280..00000000 --- a/docs/storage/driver/gcs/gcs.go +++ /dev/null @@ -1,873 +0,0 @@ -// Package gcs provides a storagedriver.StorageDriver implementation to -// store blobs in Google cloud storage. -// -// This package leverages the google.golang.org/cloud/storage client library -//for interfacing with gcs. -// -// Because gcs is a key, value store the Stat call does not support last modification -// time for directories (directories are an abstraction for key, value stores) -// -// Note that the contents of incomplete uploads are not accessible even though -// Stat returns their length -// -// +build include_gcs - -package gcs - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net/http" - "net/url" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "golang.org/x/oauth2/jwt" - "google.golang.org/api/googleapi" - "google.golang.org/cloud" - "google.golang.org/cloud/storage" - - "github.com/Sirupsen/logrus" - - ctx "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const ( - driverName = "gcs" - dummyProjectID = "" - - uploadSessionContentType = "application/x-docker-upload-session" - minChunkSize = 256 * 1024 - defaultChunkSize = 20 * minChunkSize - - maxTries = 5 -) - -var rangeHeader = regexp.MustCompile(`^bytes=([0-9])+-([0-9]+)$`) - -// driverParameters is a struct that encapsulates all of the driver parameters after all values have been set -type driverParameters struct { - bucket string - config *jwt.Config - email string - privateKey []byte - client *http.Client - rootDirectory string - chunkSize int -} - -func init() { - factory.Register(driverName, &gcsDriverFactory{}) -} - -// gcsDriverFactory implements the factory.StorageDriverFactory interface -type gcsDriverFactory struct{} - -// Create StorageDriver from parameters -func (factory *gcsDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -// driver is a storagedriver.StorageDriver implementation backed by GCS -// Objects are stored at absolute keys in the provided bucket. -type driver struct { - client *http.Client - bucket string - email string - privateKey []byte - rootDirectory string - chunkSize int -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - bucket -func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - bucket, ok := parameters["bucket"] - if !ok || fmt.Sprint(bucket) == "" { - return nil, fmt.Errorf("No bucket parameter provided") - } - - rootDirectory, ok := parameters["rootdirectory"] - if !ok { - rootDirectory = "" - } - - chunkSize := defaultChunkSize - chunkSizeParam, ok := parameters["chunksize"] - if ok { - switch v := chunkSizeParam.(type) { - case string: - vv, err := strconv.Atoi(v) - if err != nil { - return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) - } - chunkSize = vv - case int, uint, int32, uint32, uint64, int64: - chunkSize = int(reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int()) - default: - return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) - } - - if chunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) - } - - if chunkSize%minChunkSize != 0 { - return nil, fmt.Errorf("chunksize should be a multiple of %d", minChunkSize) - } - } - - var ts oauth2.TokenSource - jwtConf := new(jwt.Config) - if keyfile, ok := parameters["keyfile"]; ok { - jsonKey, err := ioutil.ReadFile(fmt.Sprint(keyfile)) - if err != nil { - return nil, err - } - jwtConf, err = google.JWTConfigFromJSON(jsonKey, storage.ScopeFullControl) - if err != nil { - return nil, err - } - ts = jwtConf.TokenSource(context.Background()) - } else { - var err error - ts, err = google.DefaultTokenSource(context.Background(), storage.ScopeFullControl) - if err != nil { - return nil, err - } - } - - params := driverParameters{ - bucket: fmt.Sprint(bucket), - rootDirectory: fmt.Sprint(rootDirectory), - email: jwtConf.Email, - privateKey: jwtConf.PrivateKey, - client: oauth2.NewClient(context.Background(), ts), - chunkSize: chunkSize, - } - - return New(params) -} - -// New constructs a new driver -func New(params driverParameters) (storagedriver.StorageDriver, error) { - rootDirectory := strings.Trim(params.rootDirectory, "/") - if rootDirectory != "" { - rootDirectory += "/" - } - if params.chunkSize <= 0 || params.chunkSize%minChunkSize != 0 { - return nil, fmt.Errorf("Invalid chunksize: %d is not a positive multiple of %d", params.chunkSize, minChunkSize) - } - d := &driver{ - bucket: params.bucket, - rootDirectory: rootDirectory, - email: params.email, - privateKey: params.privateKey, - client: params.client, - chunkSize: params.chunkSize, - } - - return &base.Base{ - StorageDriver: d, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -// This should primarily be used for small objects. -func (d *driver) GetContent(context ctx.Context, path string) ([]byte, error) { - gcsContext := d.context(context) - name := d.pathToKey(path) - var rc io.ReadCloser - err := retry(func() error { - var err error - rc, err = storage.NewReader(gcsContext, d.bucket, name) - return err - }) - if err == storage.ErrObjectNotExist { - return nil, storagedriver.PathNotFoundError{Path: path} - } - if err != nil { - return nil, err - } - defer rc.Close() - - p, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - return p, nil -} - -// PutContent stores the []byte content at a location designated by "path". -// This should primarily be used for small objects. -func (d *driver) PutContent(context ctx.Context, path string, contents []byte) error { - return retry(func() error { - wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) - wc.ContentType = "application/octet-stream" - return putContentsClose(wc, contents) - }) -} - -// Reader retrieves an io.ReadCloser for the content stored at "path" -// with a given byte offset. -// May be used to resume reading a stream by providing a nonzero offset. -func (d *driver) Reader(context ctx.Context, path string, offset int64) (io.ReadCloser, error) { - res, err := getObject(d.client, d.bucket, d.pathToKey(path), offset) - if err != nil { - if res != nil { - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return nil, storagedriver.PathNotFoundError{Path: path} - } - - if res.StatusCode == http.StatusRequestedRangeNotSatisfiable { - res.Body.Close() - obj, err := storageStatObject(d.context(context), d.bucket, d.pathToKey(path)) - if err != nil { - return nil, err - } - if offset == int64(obj.Size) { - return ioutil.NopCloser(bytes.NewReader([]byte{})), nil - } - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - } - return nil, err - } - if res.Header.Get("Content-Type") == uploadSessionContentType { - defer res.Body.Close() - return nil, storagedriver.PathNotFoundError{Path: path} - } - return res.Body, nil -} - -func getObject(client *http.Client, bucket string, name string, offset int64) (*http.Response, error) { - // copied from google.golang.org/cloud/storage#NewReader : - // to set the additional "Range" header - u := &url.URL{ - Scheme: "https", - Host: "storage.googleapis.com", - Path: fmt.Sprintf("/%s/%s", bucket, name), - } - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return nil, err - } - if offset > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%v-", offset)) - } - var res *http.Response - err = retry(func() error { - var err error - res, err = client.Do(req) - return err - }) - if err != nil { - return nil, err - } - return res, googleapi.CheckMediaResponse(res) -} - -// Writer returns a FileWriter which will store the content written to it -// at the location designated by "path" after the call to Commit. -func (d *driver) Writer(context ctx.Context, path string, append bool) (storagedriver.FileWriter, error) { - writer := &writer{ - client: d.client, - bucket: d.bucket, - name: d.pathToKey(path), - buffer: make([]byte, d.chunkSize), - } - - if append { - err := writer.init(path) - if err != nil { - return nil, err - } - } - return writer, nil -} - -type writer struct { - client *http.Client - bucket string - name string - size int64 - offset int64 - closed bool - sessionURI string - buffer []byte - buffSize int -} - -// Cancel removes any written content from this FileWriter. -func (w *writer) Cancel() error { - w.closed = true - err := storageDeleteObject(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) - if err != nil { - if status, ok := err.(*googleapi.Error); ok { - if status.Code == http.StatusNotFound { - err = nil - } - } - } - return err -} - -func (w *writer) Close() error { - if w.closed { - return nil - } - w.closed = true - - err := w.writeChunk() - if err != nil { - return err - } - - // Copy the remaining bytes from the buffer to the upload session - // Normally buffSize will be smaller than minChunkSize. However, in the - // unlikely event that the upload session failed to start, this number could be higher. - // In this case we can safely clip the remaining bytes to the minChunkSize - if w.buffSize > minChunkSize { - w.buffSize = minChunkSize - } - - // commit the writes by updating the upload session - err = retry(func() error { - wc := storage.NewWriter(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) - wc.ContentType = uploadSessionContentType - wc.Metadata = map[string]string{ - "Session-URI": w.sessionURI, - "Offset": strconv.FormatInt(w.offset, 10), - } - return putContentsClose(wc, w.buffer[0:w.buffSize]) - }) - if err != nil { - return err - } - w.size = w.offset + int64(w.buffSize) - w.buffSize = 0 - return nil -} - -func putContentsClose(wc *storage.Writer, contents []byte) error { - size := len(contents) - var nn int - var err error - for nn < size { - n, err := wc.Write(contents[nn:size]) - nn += n - if err != nil { - break - } - } - if err != nil { - wc.CloseWithError(err) - return err - } - return wc.Close() -} - -// Commit flushes all content written to this FileWriter and makes it -// available for future calls to StorageDriver.GetContent and -// StorageDriver.Reader. -func (w *writer) Commit() error { - - if err := w.checkClosed(); err != nil { - return err - } - w.closed = true - - // no session started yet just perform a simple upload - if w.sessionURI == "" { - err := retry(func() error { - wc := storage.NewWriter(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) - wc.ContentType = "application/octet-stream" - return putContentsClose(wc, w.buffer[0:w.buffSize]) - }) - if err != nil { - return err - } - w.size = w.offset + int64(w.buffSize) - w.buffSize = 0 - return nil - } - size := w.offset + int64(w.buffSize) - var nn int - // loop must be performed at least once to ensure the file is committed even when - // the buffer is empty - for { - n, err := putChunk(w.client, w.sessionURI, w.buffer[nn:w.buffSize], w.offset, size) - nn += int(n) - w.offset += n - w.size = w.offset - if err != nil { - w.buffSize = copy(w.buffer, w.buffer[nn:w.buffSize]) - return err - } - if nn == w.buffSize { - break - } - } - w.buffSize = 0 - return nil -} - -func (w *writer) checkClosed() error { - if w.closed { - return fmt.Errorf("Writer already closed") - } - return nil -} - -func (w *writer) writeChunk() error { - var err error - // chunks can be uploaded only in multiples of minChunkSize - // chunkSize is a multiple of minChunkSize less than or equal to buffSize - chunkSize := w.buffSize - (w.buffSize % minChunkSize) - if chunkSize == 0 { - return nil - } - // if their is no sessionURI yet, obtain one by starting the session - if w.sessionURI == "" { - w.sessionURI, err = startSession(w.client, w.bucket, w.name) - } - if err != nil { - return err - } - nn, err := putChunk(w.client, w.sessionURI, w.buffer[0:chunkSize], w.offset, -1) - w.offset += nn - if w.offset > w.size { - w.size = w.offset - } - // shift the remaining bytes to the start of the buffer - w.buffSize = copy(w.buffer, w.buffer[int(nn):w.buffSize]) - - return err -} - -func (w *writer) Write(p []byte) (int, error) { - err := w.checkClosed() - if err != nil { - return 0, err - } - - var nn int - for nn < len(p) { - n := copy(w.buffer[w.buffSize:], p[nn:]) - w.buffSize += n - if w.buffSize == cap(w.buffer) { - err = w.writeChunk() - if err != nil { - break - } - } - nn += n - } - return nn, err -} - -// Size returns the number of bytes written to this FileWriter. -func (w *writer) Size() int64 { - return w.size -} - -func (w *writer) init(path string) error { - res, err := getObject(w.client, w.bucket, w.name, 0) - if err != nil { - return err - } - defer res.Body.Close() - if res.Header.Get("Content-Type") != uploadSessionContentType { - return storagedriver.PathNotFoundError{Path: path} - } - offset, err := strconv.ParseInt(res.Header.Get("X-Goog-Meta-Offset"), 10, 64) - if err != nil { - return err - } - buffer, err := ioutil.ReadAll(res.Body) - if err != nil { - return err - } - w.sessionURI = res.Header.Get("X-Goog-Meta-Session-URI") - w.buffSize = copy(w.buffer, buffer) - w.offset = offset - w.size = offset + int64(w.buffSize) - return nil -} - -type request func() error - -func retry(req request) error { - backoff := time.Second - var err error - for i := 0; i < maxTries; i++ { - err = req() - if err == nil { - return nil - } - - status, ok := err.(*googleapi.Error) - if !ok || (status.Code != 429 && status.Code < http.StatusInternalServerError) { - return err - } - - time.Sleep(backoff - time.Second + (time.Duration(rand.Int31n(1000)) * time.Millisecond)) - if i <= 4 { - backoff = backoff * 2 - } - } - return err -} - -// Stat retrieves the FileInfo for the given path, including the current -// size in bytes and the creation time. -func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, error) { - var fi storagedriver.FileInfoFields - //try to get as file - gcsContext := d.context(context) - obj, err := storageStatObject(gcsContext, d.bucket, d.pathToKey(path)) - if err == nil { - if obj.ContentType == uploadSessionContentType { - return nil, storagedriver.PathNotFoundError{Path: path} - } - fi = storagedriver.FileInfoFields{ - Path: path, - Size: obj.Size, - ModTime: obj.Updated, - IsDir: false, - } - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil - } - //try to get as folder - dirpath := d.pathToDirKey(path) - - var query *storage.Query - query = &storage.Query{} - query.Prefix = dirpath - query.MaxResults = 1 - - objects, err := storageListObjects(gcsContext, d.bucket, query) - if err != nil { - return nil, err - } - if len(objects.Results) < 1 { - return nil, storagedriver.PathNotFoundError{Path: path} - } - fi = storagedriver.FileInfoFields{ - Path: path, - IsDir: true, - } - obj = objects.Results[0] - if obj.Name == dirpath { - fi.Size = obj.Size - fi.ModTime = obj.Updated - } - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the -//given path. -func (d *driver) List(context ctx.Context, path string) ([]string, error) { - var query *storage.Query - query = &storage.Query{} - query.Delimiter = "/" - query.Prefix = d.pathToDirKey(path) - list := make([]string, 0, 64) - for { - objects, err := storageListObjects(d.context(context), d.bucket, query) - if err != nil { - return nil, err - } - for _, object := range objects.Results { - // GCS does not guarantee strong consistency between - // DELETE and LIST operations. Check that the object is not deleted, - // and filter out any objects with a non-zero time-deleted - if object.Deleted.IsZero() && object.ContentType != uploadSessionContentType { - list = append(list, d.keyToPath(object.Name)) - } - } - for _, subpath := range objects.Prefixes { - subpath = d.keyToPath(subpath) - list = append(list, subpath) - } - query = objects.Next - if query == nil { - break - } - } - if path != "/" && len(list) == 0 { - // Treat empty response as missing directory, since we don't actually - // have directories in Google Cloud Storage. - return nil, storagedriver.PathNotFoundError{Path: path} - } - return list, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the -// original object. -func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) error { - gcsContext := d.context(context) - _, err := storageCopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) - if err != nil { - if status, ok := err.(*googleapi.Error); ok { - if status.Code == http.StatusNotFound { - return storagedriver.PathNotFoundError{Path: sourcePath} - } - } - return err - } - err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) - // if deleting the file fails, log the error, but do not fail; the file was successfully copied, - // and the original should eventually be cleaned when purging the uploads folder. - if err != nil { - logrus.Infof("error deleting file: %v due to %v", sourcePath, err) - } - return nil -} - -// listAll recursively lists all names of objects stored at "prefix" and its subpaths. -func (d *driver) listAll(context context.Context, prefix string) ([]string, error) { - list := make([]string, 0, 64) - query := &storage.Query{} - query.Prefix = prefix - query.Versions = false - for { - objects, err := storageListObjects(d.context(context), d.bucket, query) - if err != nil { - return nil, err - } - for _, obj := range objects.Results { - // GCS does not guarantee strong consistency between - // DELETE and LIST operations. Check that the object is not deleted, - // and filter out any objects with a non-zero time-deleted - if obj.Deleted.IsZero() { - list = append(list, obj.Name) - } - } - query = objects.Next - if query == nil { - break - } - } - return list, nil -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(context ctx.Context, path string) error { - prefix := d.pathToDirKey(path) - gcsContext := d.context(context) - keys, err := d.listAll(gcsContext, prefix) - if err != nil { - return err - } - if len(keys) > 0 { - sort.Sort(sort.Reverse(sort.StringSlice(keys))) - for _, key := range keys { - err := storageDeleteObject(gcsContext, d.bucket, key) - // GCS only guarantees eventual consistency, so listAll might return - // paths that no longer exist. If this happens, just ignore any not - // found error - if status, ok := err.(*googleapi.Error); ok { - if status.Code == http.StatusNotFound { - err = nil - } - } - if err != nil { - return err - } - } - return nil - } - err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(path)) - if err != nil { - if status, ok := err.(*googleapi.Error); ok { - if status.Code == http.StatusNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - } - } - return err -} - -func storageDeleteObject(context context.Context, bucket string, name string) error { - return retry(func() error { - return storage.DeleteObject(context, bucket, name) - }) -} - -func storageStatObject(context context.Context, bucket string, name string) (*storage.Object, error) { - var obj *storage.Object - err := retry(func() error { - var err error - obj, err = storage.StatObject(context, bucket, name) - return err - }) - return obj, err -} - -func storageListObjects(context context.Context, bucket string, q *storage.Query) (*storage.Objects, error) { - var objs *storage.Objects - err := retry(func() error { - var err error - objs, err = storage.ListObjects(context, bucket, q) - return err - }) - return objs, err -} - -func storageCopyObject(context context.Context, srcBucket, srcName string, destBucket, destName string, attrs *storage.ObjectAttrs) (*storage.Object, error) { - var obj *storage.Object - err := retry(func() error { - var err error - obj, err = storage.CopyObject(context, srcBucket, srcName, destBucket, destName, attrs) - return err - }) - return obj, err -} - -// URLFor returns a URL which may be used to retrieve the content stored at -// the given path, possibly using the given options. -// Returns ErrUnsupportedMethod if this driver has no privateKey -func (d *driver) URLFor(context ctx.Context, path string, options map[string]interface{}) (string, error) { - if d.privateKey == nil { - return "", storagedriver.ErrUnsupportedMethod{} - } - - name := d.pathToKey(path) - methodString := "GET" - method, ok := options["method"] - if ok { - methodString, ok = method.(string) - if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", storagedriver.ErrUnsupportedMethod{} - } - } - - expiresTime := time.Now().Add(20 * time.Minute) - expires, ok := options["expiry"] - if ok { - et, ok := expires.(time.Time) - if ok { - expiresTime = et - } - } - - opts := &storage.SignedURLOptions{ - GoogleAccessID: d.email, - PrivateKey: d.privateKey, - Method: methodString, - Expires: expiresTime, - } - return storage.SignedURL(d.bucket, name, opts) -} - -func startSession(client *http.Client, bucket string, name string) (uri string, err error) { - u := &url.URL{ - Scheme: "https", - Host: "www.googleapis.com", - Path: fmt.Sprintf("/upload/storage/v1/b/%v/o", bucket), - RawQuery: fmt.Sprintf("uploadType=resumable&name=%v", name), - } - err = retry(func() error { - req, err := http.NewRequest("POST", u.String(), nil) - if err != nil { - return err - } - req.Header.Set("X-Upload-Content-Type", "application/octet-stream") - req.Header.Set("Content-Length", "0") - resp, err := client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - err = googleapi.CheckMediaResponse(resp) - if err != nil { - return err - } - uri = resp.Header.Get("Location") - return nil - }) - return uri, err -} - -func putChunk(client *http.Client, sessionURI string, chunk []byte, from int64, totalSize int64) (int64, error) { - bytesPut := int64(0) - err := retry(func() error { - req, err := http.NewRequest("PUT", sessionURI, bytes.NewReader(chunk)) - if err != nil { - return err - } - length := int64(len(chunk)) - to := from + length - 1 - size := "*" - if totalSize >= 0 { - size = strconv.FormatInt(totalSize, 10) - } - req.Header.Set("Content-Type", "application/octet-stream") - if from == to+1 { - req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", size)) - } else { - req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", from, to, size)) - } - req.Header.Set("Content-Length", strconv.FormatInt(length, 10)) - - resp, err := client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - if totalSize < 0 && resp.StatusCode == 308 { - groups := rangeHeader.FindStringSubmatch(resp.Header.Get("Range")) - end, err := strconv.ParseInt(groups[2], 10, 64) - if err != nil { - return err - } - bytesPut = end - from + 1 - return nil - } - err = googleapi.CheckMediaResponse(resp) - if err != nil { - return err - } - bytesPut = to - from + 1 - return nil - }) - return bytesPut, err -} - -func (d *driver) context(context ctx.Context) context.Context { - return cloud.WithContext(context, dummyProjectID, d.client) -} - -func (d *driver) pathToKey(path string) string { - return strings.TrimRight(d.rootDirectory+strings.TrimLeft(path, "/"), "/") -} - -func (d *driver) pathToDirKey(path string) string { - return d.pathToKey(path) + "/" -} - -func (d *driver) keyToPath(key string) string { - return "/" + strings.Trim(strings.TrimPrefix(key, d.rootDirectory), "/") -} diff --git a/docs/storage/driver/gcs/gcs_test.go b/docs/storage/driver/gcs/gcs_test.go deleted file mode 100644 index f2808d5f..00000000 --- a/docs/storage/driver/gcs/gcs_test.go +++ /dev/null @@ -1,311 +0,0 @@ -// +build include_gcs - -package gcs - -import ( - "io/ioutil" - "os" - "testing" - - "fmt" - ctx "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "google.golang.org/api/googleapi" - "google.golang.org/cloud/storage" - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -var gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error) -var skipGCS func() string - -func init() { - bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET") - credentials := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") - - // Skip GCS storage driver tests if environment variable parameters are not provided - skipGCS = func() string { - if bucket == "" || credentials == "" { - return "The following environment variables must be set to enable these tests: REGISTRY_STORAGE_GCS_BUCKET, GOOGLE_APPLICATION_CREDENTIALS" - } - return "" - } - - if skipGCS() != "" { - return - } - - root, err := ioutil.TempDir("", "driver-") - if err != nil { - panic(err) - } - defer os.Remove(root) - var ts oauth2.TokenSource - var email string - var privateKey []byte - - ts, err = google.DefaultTokenSource(ctx.Background(), storage.ScopeFullControl) - if err != nil { - // Assume that the file contents are within the environment variable since it exists - // but does not contain a valid file path - jwtConfig, err := google.JWTConfigFromJSON([]byte(credentials), storage.ScopeFullControl) - if err != nil { - panic(fmt.Sprintf("Error reading JWT config : %s", err)) - } - email = jwtConfig.Email - privateKey = []byte(jwtConfig.PrivateKey) - if len(privateKey) == 0 { - panic("Error reading JWT config : missing private_key property") - } - if email == "" { - panic("Error reading JWT config : missing client_email property") - } - ts = jwtConfig.TokenSource(ctx.Background()) - } - - gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) { - parameters := driverParameters{ - bucket: bucket, - rootDirectory: root, - email: email, - privateKey: privateKey, - client: oauth2.NewClient(ctx.Background(), ts), - chunkSize: defaultChunkSize, - } - - return New(parameters) - } - - testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { - return gcsDriverConstructor(root) - }, skipGCS) -} - -// Test Committing a FileWriter without having called Write -func TestCommitEmpty(t *testing.T) { - if skipGCS() != "" { - t.Skip(skipGCS()) - } - - validRoot, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(validRoot) - - driver, err := gcsDriverConstructor(validRoot) - if err != nil { - t.Fatalf("unexpected error creating rooted driver: %v", err) - } - - filename := "/test" - ctx := ctx.Background() - - writer, err := driver.Writer(ctx, filename, false) - defer driver.Delete(ctx, filename) - if err != nil { - t.Fatalf("driver.Writer: unexpected error: %v", err) - } - err = writer.Commit() - if err != nil { - t.Fatalf("writer.Commit: unexpected error: %v", err) - } - err = writer.Close() - if err != nil { - t.Fatalf("writer.Close: unexpected error: %v", err) - } - if writer.Size() != 0 { - t.Fatalf("writer.Size: %d != 0", writer.Size()) - } - readContents, err := driver.GetContent(ctx, filename) - if err != nil { - t.Fatalf("driver.GetContent: unexpected error: %v", err) - } - if len(readContents) != 0 { - t.Fatalf("len(driver.GetContent(..)): %d != 0", len(readContents)) - } -} - -// Test Committing a FileWriter after having written exactly -// defaultChunksize bytes. -func TestCommit(t *testing.T) { - if skipGCS() != "" { - t.Skip(skipGCS()) - } - - validRoot, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(validRoot) - - driver, err := gcsDriverConstructor(validRoot) - if err != nil { - t.Fatalf("unexpected error creating rooted driver: %v", err) - } - - filename := "/test" - ctx := ctx.Background() - - contents := make([]byte, defaultChunkSize) - writer, err := driver.Writer(ctx, filename, false) - defer driver.Delete(ctx, filename) - if err != nil { - t.Fatalf("driver.Writer: unexpected error: %v", err) - } - _, err = writer.Write(contents) - if err != nil { - t.Fatalf("writer.Write: unexpected error: %v", err) - } - err = writer.Commit() - if err != nil { - t.Fatalf("writer.Commit: unexpected error: %v", err) - } - err = writer.Close() - if err != nil { - t.Fatalf("writer.Close: unexpected error: %v", err) - } - if writer.Size() != int64(len(contents)) { - t.Fatalf("writer.Size: %d != %d", writer.Size(), len(contents)) - } - readContents, err := driver.GetContent(ctx, filename) - if err != nil { - t.Fatalf("driver.GetContent: unexpected error: %v", err) - } - if len(readContents) != len(contents) { - t.Fatalf("len(driver.GetContent(..)): %d != %d", len(readContents), len(contents)) - } -} - -func TestRetry(t *testing.T) { - if skipGCS() != "" { - t.Skip(skipGCS()) - } - - assertError := func(expected string, observed error) { - observedMsg := "" - if observed != nil { - observedMsg = observed.Error() - } - if observedMsg != expected { - t.Fatalf("expected %v, observed %v\n", expected, observedMsg) - } - } - - err := retry(func() error { - return &googleapi.Error{ - Code: 503, - Message: "google api error", - } - }) - assertError("googleapi: Error 503: google api error", err) - - err = retry(func() error { - return &googleapi.Error{ - Code: 404, - Message: "google api error", - } - }) - assertError("googleapi: Error 404: google api error", err) - - err = retry(func() error { - return fmt.Errorf("error") - }) - assertError("error", err) -} - -func TestEmptyRootList(t *testing.T) { - if skipGCS() != "" { - t.Skip(skipGCS()) - } - - validRoot, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(validRoot) - - rootedDriver, err := gcsDriverConstructor(validRoot) - if err != nil { - t.Fatalf("unexpected error creating rooted driver: %v", err) - } - - emptyRootDriver, err := gcsDriverConstructor("") - if err != nil { - t.Fatalf("unexpected error creating empty root driver: %v", err) - } - - slashRootDriver, err := gcsDriverConstructor("/") - if err != nil { - t.Fatalf("unexpected error creating slash root driver: %v", err) - } - - filename := "/test" - contents := []byte("contents") - ctx := ctx.Background() - err = rootedDriver.PutContent(ctx, filename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer func() { - err := rootedDriver.Delete(ctx, filename) - if err != nil { - t.Fatalf("failed to remove %v due to %v\n", filename, err) - } - }() - keys, err := emptyRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } - - keys, err = slashRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } -} - -// TestMoveDirectory checks that moving a directory returns an error. -func TestMoveDirectory(t *testing.T) { - if skipGCS() != "" { - t.Skip(skipGCS()) - } - - validRoot, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(validRoot) - - driver, err := gcsDriverConstructor(validRoot) - if err != nil { - t.Fatalf("unexpected error creating rooted driver: %v", err) - } - - ctx := ctx.Background() - contents := []byte("contents") - // Create a regular file. - err = driver.PutContent(ctx, "/parent/dir/foo", contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer func() { - err := driver.Delete(ctx, "/parent") - if err != nil { - t.Fatalf("failed to remove /parent due to %v\n", err) - } - }() - - err = driver.Move(ctx, "/parent/dir", "/parent/other") - if err == nil { - t.Fatalf("Moving directory /parent/dir /parent/other should have return a non-nil error\n") - } -} diff --git a/docs/storage/driver/inmemory/driver.go b/docs/storage/driver/inmemory/driver.go deleted file mode 100644 index eb2fd1cf..00000000 --- a/docs/storage/driver/inmemory/driver.go +++ /dev/null @@ -1,312 +0,0 @@ -package inmemory - -import ( - "fmt" - "io" - "io/ioutil" - "sync" - "time" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "inmemory" - -func init() { - factory.Register(driverName, &inMemoryDriverFactory{}) -} - -// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface. -type inMemoryDriverFactory struct{} - -func (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return New(), nil -} - -type driver struct { - root *dir - mutex sync.RWMutex -} - -// baseEmbed allows us to hide the Base embed. -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by a local map. -// Intended solely for example and testing purposes. -type Driver struct { - baseEmbed // embedded, hidden base driver. -} - -var _ storagedriver.StorageDriver = &Driver{} - -// New constructs a new Driver. -func New() *Driver { - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: &driver{ - root: &dir{ - common: common{ - p: "/", - mod: time.Now(), - }, - }, - }, - }, - }, - } -} - -// Implement the storagedriver.StorageDriver interface. - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - rc, err := d.Reader(ctx, path, 0) - if err != nil { - return nil, err - } - defer rc.Close() - - return ioutil.ReadAll(rc) -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, p string, contents []byte) error { - d.mutex.Lock() - defer d.mutex.Unlock() - - normalized := normalize(p) - - f, err := d.root.mkfile(normalized) - if err != nil { - // TODO(stevvooe): Again, we need to clarify when this is not a - // directory in StorageDriver API. - return fmt.Errorf("not a file") - } - - f.truncate() - f.WriteAt(contents, 0) - - return nil -} - -// Reader retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - if offset < 0 { - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - normalized := normalize(path) - found := d.root.find(normalized) - - if found.path() != normalized { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - if found.isdir() { - return nil, fmt.Errorf("%q is a directory", path) - } - - return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil -} - -// Writer returns a FileWriter which will store the content written to it -// at the location designated by "path" after the call to Commit. -func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { - d.mutex.Lock() - defer d.mutex.Unlock() - - normalized := normalize(path) - - f, err := d.root.mkfile(normalized) - if err != nil { - return nil, fmt.Errorf("not a file") - } - - if !append { - f.truncate() - } - - return d.newWriter(f), nil -} - -// Stat returns info about the provided path. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - normalized := normalize(path) - found := d.root.find(normalized) - - if found.path() != normalized { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - fi := storagedriver.FileInfoFields{ - Path: path, - IsDir: found.isdir(), - ModTime: found.modtime(), - } - - if !fi.IsDir { - fi.Size = int64(len(found.(*file).data)) - } - - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the given -// path. -func (d *driver) List(ctx context.Context, path string) ([]string, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - normalized := normalize(path) - - found := d.root.find(normalized) - - if !found.isdir() { - return nil, fmt.Errorf("not a directory") // TODO(stevvooe): Need error type for this... - } - - entries, err := found.(*dir).list(normalized) - - if err != nil { - switch err { - case errNotExists: - return nil, storagedriver.PathNotFoundError{Path: path} - case errIsNotDir: - return nil, fmt.Errorf("not a directory") - default: - return nil, err - } - } - - return entries, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - d.mutex.Lock() - defer d.mutex.Unlock() - - normalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath) - - err := d.root.move(normalizedSrc, normalizedDst) - switch err { - case errNotExists: - return storagedriver.PathNotFoundError{Path: destPath} - default: - return err - } -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - d.mutex.Lock() - defer d.mutex.Unlock() - - normalized := normalize(path) - - err := d.root.delete(normalized) - switch err { - case errNotExists: - return storagedriver.PathNotFoundError{Path: path} - default: - return err - } -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod{} -} - -type writer struct { - d *driver - f *file - closed bool - committed bool - cancelled bool -} - -func (d *driver) newWriter(f *file) storagedriver.FileWriter { - return &writer{ - d: d, - f: f, - } -} - -func (w *writer) Write(p []byte) (int, error) { - if w.closed { - return 0, fmt.Errorf("already closed") - } else if w.committed { - return 0, fmt.Errorf("already committed") - } else if w.cancelled { - return 0, fmt.Errorf("already cancelled") - } - - w.d.mutex.Lock() - defer w.d.mutex.Unlock() - - return w.f.WriteAt(p, int64(len(w.f.data))) -} - -func (w *writer) Size() int64 { - w.d.mutex.RLock() - defer w.d.mutex.RUnlock() - - return int64(len(w.f.data)) -} - -func (w *writer) Close() error { - if w.closed { - return fmt.Errorf("already closed") - } - w.closed = true - return nil -} - -func (w *writer) Cancel() error { - if w.closed { - return fmt.Errorf("already closed") - } else if w.committed { - return fmt.Errorf("already committed") - } - w.cancelled = true - - w.d.mutex.Lock() - defer w.d.mutex.Unlock() - - return w.d.root.delete(w.f.path()) -} - -func (w *writer) Commit() error { - if w.closed { - return fmt.Errorf("already closed") - } else if w.committed { - return fmt.Errorf("already committed") - } else if w.cancelled { - return fmt.Errorf("already cancelled") - } - w.committed = true - return nil -} diff --git a/docs/storage/driver/inmemory/driver_test.go b/docs/storage/driver/inmemory/driver_test.go deleted file mode 100644 index dbc1916f..00000000 --- a/docs/storage/driver/inmemory/driver_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package inmemory - -import ( - "testing" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -func init() { - inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { - return New(), nil - } - testsuites.RegisterSuite(inmemoryDriverConstructor, testsuites.NeverSkip) -} diff --git a/docs/storage/driver/inmemory/mfs.go b/docs/storage/driver/inmemory/mfs.go deleted file mode 100644 index cdefacfd..00000000 --- a/docs/storage/driver/inmemory/mfs.go +++ /dev/null @@ -1,338 +0,0 @@ -package inmemory - -import ( - "fmt" - "io" - "path" - "sort" - "strings" - "time" -) - -var ( - errExists = fmt.Errorf("exists") - errNotExists = fmt.Errorf("notexists") - errIsNotDir = fmt.Errorf("notdir") - errIsDir = fmt.Errorf("isdir") -) - -type node interface { - name() string - path() string - isdir() bool - modtime() time.Time -} - -// dir is the central type for the memory-based storagedriver. All operations -// are dispatched from a root dir. -type dir struct { - common - - // TODO(stevvooe): Use sorted slice + search. - children map[string]node -} - -var _ node = &dir{} - -func (d *dir) isdir() bool { - return true -} - -// add places the node n into dir d. -func (d *dir) add(n node) { - if d.children == nil { - d.children = make(map[string]node) - } - - d.children[n.name()] = n - d.mod = time.Now() -} - -// find searches for the node, given path q in dir. If the node is found, it -// will be returned. If the node is not found, the closet existing parent. If -// the node is found, the returned (node).path() will match q. -func (d *dir) find(q string) node { - q = strings.Trim(q, "/") - i := strings.Index(q, "/") - - if q == "" { - return d - } - - if i == 0 { - panic("shouldn't happen, no root paths") - } - - var component string - if i < 0 { - // No more path components - component = q - } else { - component = q[:i] - } - - child, ok := d.children[component] - if !ok { - // Node was not found. Return p and the current node. - return d - } - - if child.isdir() { - // traverse down! - q = q[i+1:] - return child.(*dir).find(q) - } - - return child -} - -func (d *dir) list(p string) ([]string, error) { - n := d.find(p) - - if n.path() != p { - return nil, errNotExists - } - - if !n.isdir() { - return nil, errIsNotDir - } - - var children []string - for _, child := range n.(*dir).children { - children = append(children, child.path()) - } - - sort.Strings(children) - return children, nil -} - -// mkfile or return the existing one. returns an error if it exists and is a -// directory. Essentially, this is open or create. -func (d *dir) mkfile(p string) (*file, error) { - n := d.find(p) - if n.path() == p { - if n.isdir() { - return nil, errIsDir - } - - return n.(*file), nil - } - - dirpath, filename := path.Split(p) - // Make any non-existent directories - n, err := d.mkdirs(dirpath) - if err != nil { - return nil, err - } - - dd := n.(*dir) - n = &file{ - common: common{ - p: path.Join(dd.path(), filename), - mod: time.Now(), - }, - } - - dd.add(n) - return n.(*file), nil -} - -// mkdirs creates any missing directory entries in p and returns the result. -func (d *dir) mkdirs(p string) (*dir, error) { - p = normalize(p) - - n := d.find(p) - - if !n.isdir() { - // Found something there - return nil, errIsNotDir - } - - if n.path() == p { - return n.(*dir), nil - } - - dd := n.(*dir) - - relative := strings.Trim(strings.TrimPrefix(p, n.path()), "/") - - if relative == "" { - return dd, nil - } - - components := strings.Split(relative, "/") - for _, component := range components { - d, err := dd.mkdir(component) - - if err != nil { - // This should actually never happen, since there are no children. - return nil, err - } - dd = d - } - - return dd, nil -} - -// mkdir creates a child directory under d with the given name. -func (d *dir) mkdir(name string) (*dir, error) { - if name == "" { - return nil, fmt.Errorf("invalid dirname") - } - - _, ok := d.children[name] - if ok { - return nil, errExists - } - - child := &dir{ - common: common{ - p: path.Join(d.path(), name), - mod: time.Now(), - }, - } - d.add(child) - d.mod = time.Now() - - return child, nil -} - -func (d *dir) move(src, dst string) error { - dstDirname, _ := path.Split(dst) - - dp, err := d.mkdirs(dstDirname) - if err != nil { - return err - } - - srcDirname, srcFilename := path.Split(src) - sp := d.find(srcDirname) - - if normalize(srcDirname) != normalize(sp.path()) { - return errNotExists - } - - spd, ok := sp.(*dir) - if !ok { - return errIsNotDir // paranoid. - } - - s, ok := spd.children[srcFilename] - if !ok { - return errNotExists - } - - delete(spd.children, srcFilename) - - switch n := s.(type) { - case *dir: - n.p = dst - case *file: - n.p = dst - } - - dp.add(s) - - return nil -} - -func (d *dir) delete(p string) error { - dirname, filename := path.Split(p) - parent := d.find(dirname) - - if normalize(dirname) != normalize(parent.path()) { - return errNotExists - } - - if _, ok := parent.(*dir).children[filename]; !ok { - return errNotExists - } - - delete(parent.(*dir).children, filename) - return nil -} - -// dump outputs a primitive directory structure to stdout. -func (d *dir) dump(indent string) { - fmt.Println(indent, d.name()+"/") - - for _, child := range d.children { - if child.isdir() { - child.(*dir).dump(indent + "\t") - } else { - fmt.Println(indent, child.name()) - } - - } -} - -func (d *dir) String() string { - return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children) -} - -// file stores actual data in the fs tree. It acts like an open, seekable file -// where operations are conducted through ReadAt and WriteAt. Use it with -// SectionReader for the best effect. -type file struct { - common - data []byte -} - -var _ node = &file{} - -func (f *file) isdir() bool { - return false -} - -func (f *file) truncate() { - f.data = f.data[:0] -} - -func (f *file) sectionReader(offset int64) io.Reader { - return io.NewSectionReader(f, offset, int64(len(f.data))-offset) -} - -func (f *file) ReadAt(p []byte, offset int64) (n int, err error) { - return copy(p, f.data[offset:]), nil -} - -func (f *file) WriteAt(p []byte, offset int64) (n int, err error) { - off := int(offset) - if cap(f.data) < off+len(p) { - data := make([]byte, len(f.data), off+len(p)) - copy(data, f.data) - f.data = data - } - - f.mod = time.Now() - f.data = f.data[:off+len(p)] - - return copy(f.data[off:off+len(p)], p), nil -} - -func (f *file) String() string { - return fmt.Sprintf("&file{path: %q}", f.p) -} - -// common provides shared fields and methods for node implementations. -type common struct { - p string - mod time.Time -} - -func (c *common) name() string { - _, name := path.Split(c.p) - return name -} - -func (c *common) path() string { - return c.p -} - -func (c *common) modtime() time.Time { - return c.mod -} - -func normalize(p string) string { - return "/" + strings.Trim(p, "/") -} diff --git a/docs/storage/driver/middleware/cloudfront/middleware.go b/docs/storage/driver/middleware/cloudfront/middleware.go deleted file mode 100644 index b0618d1a..00000000 --- a/docs/storage/driver/middleware/cloudfront/middleware.go +++ /dev/null @@ -1,136 +0,0 @@ -// Package middleware - cloudfront wrapper for storage libs -// N.B. currently only works with S3, not arbitrary sites -// -package middleware - -import ( - "crypto/x509" - "encoding/pem" - "fmt" - "io/ioutil" - "net/url" - "strings" - "time" - - "github.com/aws/aws-sdk-go/service/cloudfront/sign" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" -) - -// cloudFrontStorageMiddleware provides a simple implementation of layerHandler that -// constructs temporary signed CloudFront URLs from the storagedriver layer URL, -// then issues HTTP Temporary Redirects to this CloudFront content URL. -type cloudFrontStorageMiddleware struct { - storagedriver.StorageDriver - urlSigner *sign.URLSigner - baseURL string - duration time.Duration -} - -var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} - -// newCloudFrontLayerHandler constructs and returns a new CloudFront -// LayerHandler implementation. -// Required options: baseurl, privatekey, keypairid -func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { - base, ok := options["baseurl"] - if !ok { - return nil, fmt.Errorf("no baseurl provided") - } - baseURL, ok := base.(string) - if !ok { - return nil, fmt.Errorf("baseurl must be a string") - } - if !strings.Contains(baseURL, "://") { - baseURL = "https://" + baseURL - } - if !strings.HasSuffix(baseURL, "/") { - baseURL += "/" - } - if _, err := url.Parse(baseURL); err != nil { - return nil, fmt.Errorf("invalid baseurl: %v", err) - } - pk, ok := options["privatekey"] - if !ok { - return nil, fmt.Errorf("no privatekey provided") - } - pkPath, ok := pk.(string) - if !ok { - return nil, fmt.Errorf("privatekey must be a string") - } - kpid, ok := options["keypairid"] - if !ok { - return nil, fmt.Errorf("no keypairid provided") - } - keypairID, ok := kpid.(string) - if !ok { - return nil, fmt.Errorf("keypairid must be a string") - } - - pkBytes, err := ioutil.ReadFile(pkPath) - if err != nil { - return nil, fmt.Errorf("failed to read privatekey file: %s", err) - } - - block, _ := pem.Decode([]byte(pkBytes)) - if block == nil { - return nil, fmt.Errorf("failed to decode private key as an rsa private key") - } - privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return nil, err - } - - urlSigner := sign.NewURLSigner(keypairID, privateKey) - - duration := 20 * time.Minute - d, ok := options["duration"] - if ok { - switch d := d.(type) { - case time.Duration: - duration = d - case string: - dur, err := time.ParseDuration(d) - if err != nil { - return nil, fmt.Errorf("invalid duration: %s", err) - } - duration = dur - } - } - - return &cloudFrontStorageMiddleware{ - StorageDriver: storageDriver, - urlSigner: urlSigner, - baseURL: baseURL, - duration: duration, - }, nil -} - -// S3BucketKeyer is any type that is capable of returning the S3 bucket key -// which should be cached by AWS CloudFront. -type S3BucketKeyer interface { - S3BucketKey(path string) string -} - -// Resolve returns an http.Handler which can serve the contents of the given -// Layer, or an error if not supported by the storagedriver. -func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - // TODO(endophage): currently only supports S3 - keyer, ok := lh.StorageDriver.(S3BucketKeyer) - if !ok { - context.GetLogger(ctx).Warn("the CloudFront middleware does not support this backend storage driver") - return lh.StorageDriver.URLFor(ctx, path, options) - } - - cfURL, err := lh.urlSigner.Sign(lh.baseURL+keyer.S3BucketKey(path), time.Now().Add(lh.duration)) - if err != nil { - return "", err - } - return cfURL, nil -} - -// init registers the cloudfront layerHandler backend. -func init() { - storagemiddleware.Register("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) -} diff --git a/docs/storage/driver/middleware/redirect/middleware.go b/docs/storage/driver/middleware/redirect/middleware.go deleted file mode 100644 index 20cd7daa..00000000 --- a/docs/storage/driver/middleware/redirect/middleware.go +++ /dev/null @@ -1,50 +0,0 @@ -package middleware - -import ( - "fmt" - "net/url" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" -) - -type redirectStorageMiddleware struct { - storagedriver.StorageDriver - scheme string - host string -} - -var _ storagedriver.StorageDriver = &redirectStorageMiddleware{} - -func newRedirectStorageMiddleware(sd storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { - o, ok := options["baseurl"] - if !ok { - return nil, fmt.Errorf("no baseurl provided") - } - b, ok := o.(string) - if !ok { - return nil, fmt.Errorf("baseurl must be a string") - } - u, err := url.Parse(b) - if err != nil { - return nil, fmt.Errorf("unable to parse redirect baseurl: %s", b) - } - if u.Scheme == "" { - return nil, fmt.Errorf("no scheme specified for redirect baseurl") - } - if u.Host == "" { - return nil, fmt.Errorf("no host specified for redirect baseurl") - } - - return &redirectStorageMiddleware{StorageDriver: sd, scheme: u.Scheme, host: u.Host}, nil -} - -func (r *redirectStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - u := &url.URL{Scheme: r.scheme, Host: r.host, Path: path} - return u.String(), nil -} - -func init() { - storagemiddleware.Register("redirect", storagemiddleware.InitFunc(newRedirectStorageMiddleware)) -} diff --git a/docs/storage/driver/middleware/redirect/middleware_test.go b/docs/storage/driver/middleware/redirect/middleware_test.go deleted file mode 100644 index 1eb6309f..00000000 --- a/docs/storage/driver/middleware/redirect/middleware_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package middleware - -import ( - "testing" - - check "gopkg.in/check.v1" -) - -func Test(t *testing.T) { check.TestingT(t) } - -type MiddlewareSuite struct{} - -var _ = check.Suite(&MiddlewareSuite{}) - -func (s *MiddlewareSuite) TestNoConfig(c *check.C) { - options := make(map[string]interface{}) - _, err := newRedirectStorageMiddleware(nil, options) - c.Assert(err, check.ErrorMatches, "no baseurl provided") -} - -func (s *MiddlewareSuite) TestMissingScheme(c *check.C) { - options := make(map[string]interface{}) - options["baseurl"] = "example.com" - _, err := newRedirectStorageMiddleware(nil, options) - c.Assert(err, check.ErrorMatches, "no scheme specified for redirect baseurl") -} - -func (s *MiddlewareSuite) TestHttpsPort(c *check.C) { - options := make(map[string]interface{}) - options["baseurl"] = "https://example.com:5443" - middleware, err := newRedirectStorageMiddleware(nil, options) - c.Assert(err, check.Equals, nil) - - m, ok := middleware.(*redirectStorageMiddleware) - c.Assert(ok, check.Equals, true) - c.Assert(m.scheme, check.Equals, "https") - c.Assert(m.host, check.Equals, "example.com:5443") - - url, err := middleware.URLFor(nil, "/rick/data", nil) - c.Assert(err, check.Equals, nil) - c.Assert(url, check.Equals, "https://example.com:5443/rick/data") -} - -func (s *MiddlewareSuite) TestHTTP(c *check.C) { - options := make(map[string]interface{}) - options["baseurl"] = "http://example.com" - middleware, err := newRedirectStorageMiddleware(nil, options) - c.Assert(err, check.Equals, nil) - - m, ok := middleware.(*redirectStorageMiddleware) - c.Assert(ok, check.Equals, true) - c.Assert(m.scheme, check.Equals, "http") - c.Assert(m.host, check.Equals, "example.com") - - url, err := middleware.URLFor(nil, "morty/data", nil) - c.Assert(err, check.Equals, nil) - c.Assert(url, check.Equals, "http://example.com/morty/data") -} diff --git a/docs/storage/driver/middleware/storagemiddleware.go b/docs/storage/driver/middleware/storagemiddleware.go deleted file mode 100644 index 7e40a8dd..00000000 --- a/docs/storage/driver/middleware/storagemiddleware.go +++ /dev/null @@ -1,39 +0,0 @@ -package storagemiddleware - -import ( - "fmt" - - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// InitFunc is the type of a StorageMiddleware factory function and is -// used to register the constructor for different StorageMiddleware backends. -type InitFunc func(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) - -var storageMiddlewares map[string]InitFunc - -// Register is used to register an InitFunc for -// a StorageMiddleware backend with the given name. -func Register(name string, initFunc InitFunc) error { - if storageMiddlewares == nil { - storageMiddlewares = make(map[string]InitFunc) - } - if _, exists := storageMiddlewares[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - storageMiddlewares[name] = initFunc - - return nil -} - -// Get constructs a StorageMiddleware with the given options using the named backend. -func Get(name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) { - if storageMiddlewares != nil { - if initFunc, exists := storageMiddlewares[name]; exists { - return initFunc(storageDriver, options) - } - } - - return nil, fmt.Errorf("no storage middleware registered with name: %s", name) -} diff --git a/docs/storage/driver/oss/doc.go b/docs/storage/driver/oss/doc.go deleted file mode 100644 index d1bc932f..00000000 --- a/docs/storage/driver/oss/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package oss implements the Aliyun OSS Storage driver backend. Support can be -// enabled by including the "include_oss" build tag. -package oss diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go deleted file mode 100644 index 7ae70334..00000000 --- a/docs/storage/driver/oss/oss.go +++ /dev/null @@ -1,670 +0,0 @@ -// Package oss provides a storagedriver.StorageDriver implementation to -// store blobs in Aliyun OSS cloud storage. -// -// This package leverages the denverdino/aliyungo client library for interfacing with -// oss. -// -// Because OSS is a key, value store the Stat call does not support last modification -// time for directories (directories are an abstraction for key, value stores) -// -// +build include_oss - -package oss - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "time" - - "github.com/docker/distribution/context" - - "github.com/Sirupsen/logrus" - "github.com/denverdino/aliyungo/oss" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "oss" - -// minChunkSize defines the minimum multipart upload chunk size -// OSS API requires multipart upload chunks to be at least 5MB -const minChunkSize = 5 << 20 - -const defaultChunkSize = 2 * minChunkSize -const defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk - -// listMax is the largest amount of objects you can request from OSS in a list call -const listMax = 1000 - -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set -type DriverParameters struct { - AccessKeyID string - AccessKeySecret string - Bucket string - Region oss.Region - Internal bool - Encrypt bool - Secure bool - ChunkSize int64 - RootDirectory string - Endpoint string -} - -func init() { - factory.Register(driverName, &ossDriverFactory{}) -} - -// ossDriverFactory implements the factory.StorageDriverFactory interface -type ossDriverFactory struct{} - -func (factory *ossDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - Client *oss.Client - Bucket *oss.Bucket - ChunkSize int64 - Encrypt bool - RootDirectory string -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Aliyun OSS -// Objects are stored at absolute keys in the provided bucket. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - accesskey -// - secretkey -// - region -// - bucket -// - encrypt -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - // Providing no values for these is valid in case the user is authenticating - - accessKey, ok := parameters["accesskeyid"] - if !ok { - return nil, fmt.Errorf("No accesskeyid parameter provided") - } - secretKey, ok := parameters["accesskeysecret"] - if !ok { - return nil, fmt.Errorf("No accesskeysecret parameter provided") - } - - regionName, ok := parameters["region"] - if !ok || fmt.Sprint(regionName) == "" { - return nil, fmt.Errorf("No region parameter provided") - } - - bucket, ok := parameters["bucket"] - if !ok || fmt.Sprint(bucket) == "" { - return nil, fmt.Errorf("No bucket parameter provided") - } - - internalBool := false - internal, ok := parameters["internal"] - if ok { - internalBool, ok = internal.(bool) - if !ok { - return nil, fmt.Errorf("The internal parameter should be a boolean") - } - } - - encryptBool := false - encrypt, ok := parameters["encrypt"] - if ok { - encryptBool, ok = encrypt.(bool) - if !ok { - return nil, fmt.Errorf("The encrypt parameter should be a boolean") - } - } - - secureBool := true - secure, ok := parameters["secure"] - if ok { - secureBool, ok = secure.(bool) - if !ok { - return nil, fmt.Errorf("The secure parameter should be a boolean") - } - } - - chunkSize := int64(defaultChunkSize) - chunkSizeParam, ok := parameters["chunksize"] - if ok { - switch v := chunkSizeParam.(type) { - case string: - vv, err := strconv.ParseInt(v, 0, 64) - if err != nil { - return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) - } - chunkSize = vv - case int64: - chunkSize = v - case int, uint, int32, uint32, uint64: - chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() - default: - return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) - } - - if chunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) - } - } - - rootDirectory, ok := parameters["rootdirectory"] - if !ok { - rootDirectory = "" - } - - endpoint, ok := parameters["endpoint"] - if !ok { - endpoint = "" - } - - params := DriverParameters{ - AccessKeyID: fmt.Sprint(accessKey), - AccessKeySecret: fmt.Sprint(secretKey), - Bucket: fmt.Sprint(bucket), - Region: oss.Region(fmt.Sprint(regionName)), - ChunkSize: chunkSize, - RootDirectory: fmt.Sprint(rootDirectory), - Encrypt: encryptBool, - Secure: secureBool, - Internal: internalBool, - Endpoint: fmt.Sprint(endpoint), - } - - return New(params) -} - -// New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and -// bucketName -func New(params DriverParameters) (*Driver, error) { - - client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) - client.SetEndpoint(params.Endpoint) - bucket := client.Bucket(params.Bucket) - client.SetDebug(false) - - // Validate that the given credentials have at least read permissions in the - // given bucket scope. - if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { - return nil, err - } - - // TODO(tg123): Currently multipart uploads have no timestamps, so this would be unwise - // if you initiated a new OSS client while another one is running on the same bucket. - - d := &driver{ - Client: client, - Bucket: bucket, - ChunkSize: params.ChunkSize, - Encrypt: params.Encrypt, - RootDirectory: params.RootDirectory, - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - content, err := d.Bucket.Get(d.ossPath(path)) - if err != nil { - return nil, parseError(path, err) - } - return content, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - return parseError(path, d.Bucket.Put(d.ossPath(path), contents, d.getContentType(), getPermissions(), d.getOptions())) -} - -// Reader retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - headers := make(http.Header) - headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") - - resp, err := d.Bucket.GetResponseWithHeaders(d.ossPath(path), headers) - if err != nil { - return nil, parseError(path, err) - } - - // Due to Aliyun OSS API, status 200 and whole object will be return instead of an - // InvalidRange error when range is invalid. - // - // OSS sever will always return http.StatusPartialContent if range is acceptable. - if resp.StatusCode != http.StatusPartialContent { - resp.Body.Close() - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - return resp.Body, nil -} - -// Writer returns a FileWriter which will store the content written to it -// at the location designated by "path" after the call to Commit. -func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { - key := d.ossPath(path) - if !append { - // TODO (brianbland): cancel other uploads at this path - multi, err := d.Bucket.InitMulti(key, d.getContentType(), getPermissions(), d.getOptions()) - if err != nil { - return nil, err - } - return d.newWriter(key, multi, nil), nil - } - multis, _, err := d.Bucket.ListMulti(key, "") - if err != nil { - return nil, parseError(path, err) - } - for _, multi := range multis { - if key != multi.Key { - continue - } - parts, err := multi.ListParts() - if err != nil { - return nil, parseError(path, err) - } - var multiSize int64 - for _, part := range parts { - multiSize += part.Size - } - return d.newWriter(key, multi, parts), nil - } - return nil, storagedriver.PathNotFoundError{Path: path} -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - listResponse, err := d.Bucket.List(d.ossPath(path), "", "", 1) - if err != nil { - return nil, err - } - - fi := storagedriver.FileInfoFields{ - Path: path, - } - - if len(listResponse.Contents) == 1 { - if listResponse.Contents[0].Key != d.ossPath(path) { - fi.IsDir = true - } else { - fi.IsDir = false - fi.Size = listResponse.Contents[0].Size - - timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) - if err != nil { - return nil, err - } - fi.ModTime = timestamp - } - } else if len(listResponse.CommonPrefixes) == 1 { - fi.IsDir = true - } else { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, opath string) ([]string, error) { - path := opath - if path != "/" && opath[len(path)-1] != '/' { - path = path + "/" - } - - // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". - // In those cases, there is no root prefix to replace and we must actually add a "/" to all - // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp - prefix := "" - if d.ossPath("") == "" { - prefix = "/" - } - - listResponse, err := d.Bucket.List(d.ossPath(path), "/", "", listMax) - if err != nil { - return nil, parseError(opath, err) - } - - files := []string{} - directories := []string{} - - for { - for _, key := range listResponse.Contents { - files = append(files, strings.Replace(key.Key, d.ossPath(""), prefix, 1)) - } - - for _, commonPrefix := range listResponse.CommonPrefixes { - directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.ossPath(""), prefix, 1)) - } - - if listResponse.IsTruncated { - listResponse, err = d.Bucket.List(d.ossPath(path), "/", listResponse.NextMarker, listMax) - if err != nil { - return nil, err - } - } else { - break - } - } - - if opath != "/" { - if len(files) == 0 && len(directories) == 0 { - // Treat empty response as missing directory, since we don't actually - // have directories in s3. - return nil, storagedriver.PathNotFoundError{Path: opath} - } - } - - return append(files, directories...), nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - logrus.Infof("Move from %s to %s", d.ossPath(sourcePath), d.ossPath(destPath)) - - err := d.Bucket.CopyLargeFile(d.ossPath(sourcePath), d.ossPath(destPath), - d.getContentType(), - getPermissions(), - oss.Options{}) - if err != nil { - logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err) - return parseError(sourcePath, err) - } - - return d.Delete(ctx, sourcePath) -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - listResponse, err := d.Bucket.List(d.ossPath(path), "", "", listMax) - if err != nil || len(listResponse.Contents) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - - ossObjects := make([]oss.Object, listMax) - - for len(listResponse.Contents) > 0 { - for index, key := range listResponse.Contents { - ossObjects[index].Key = key.Key - } - - err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:len(listResponse.Contents)]}) - if err != nil { - return nil - } - - listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax) - if err != nil { - return err - } - } - - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - methodString := "GET" - method, ok := options["method"] - if ok { - methodString, ok = method.(string) - if !ok || (methodString != "GET") { - return "", storagedriver.ErrUnsupportedMethod{} - } - } - - expiresTime := time.Now().Add(20 * time.Minute) - - expires, ok := options["expiry"] - if ok { - et, ok := expires.(time.Time) - if ok { - expiresTime = et - } - } - logrus.Infof("methodString: %s, expiresTime: %v", methodString, expiresTime) - signedURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) - logrus.Infof("signed URL: %s", signedURL) - return signedURL, nil -} - -func (d *driver) ossPath(path string) string { - return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") -} - -func parseError(path string, err error) error { - if ossErr, ok := err.(*oss.Error); ok && ossErr.StatusCode == http.StatusNotFound && (ossErr.Code == "NoSuchKey" || ossErr.Code == "") { - return storagedriver.PathNotFoundError{Path: path} - } - - return err -} - -func hasCode(err error, code string) bool { - ossErr, ok := err.(*oss.Error) - return ok && ossErr.Code == code -} - -func (d *driver) getOptions() oss.Options { - return oss.Options{ServerSideEncryption: d.Encrypt} -} - -func getPermissions() oss.ACL { - return oss.Private -} - -func (d *driver) getContentType() string { - return "application/octet-stream" -} - -// writer attempts to upload parts to S3 in a buffered fashion where the last -// part is at least as large as the chunksize, so the multipart upload could be -// cleanly resumed in the future. This is violated if Close is called after less -// than a full chunk is written. -type writer struct { - driver *driver - key string - multi *oss.Multi - parts []oss.Part - size int64 - readyPart []byte - pendingPart []byte - closed bool - committed bool - cancelled bool -} - -func (d *driver) newWriter(key string, multi *oss.Multi, parts []oss.Part) storagedriver.FileWriter { - var size int64 - for _, part := range parts { - size += part.Size - } - return &writer{ - driver: d, - key: key, - multi: multi, - parts: parts, - size: size, - } -} - -func (w *writer) Write(p []byte) (int, error) { - if w.closed { - return 0, fmt.Errorf("already closed") - } else if w.committed { - return 0, fmt.Errorf("already committed") - } else if w.cancelled { - return 0, fmt.Errorf("already cancelled") - } - - // If the last written part is smaller than minChunkSize, we need to make a - // new multipart upload :sadface: - if len(w.parts) > 0 && int(w.parts[len(w.parts)-1].Size) < minChunkSize { - err := w.multi.Complete(w.parts) - if err != nil { - w.multi.Abort() - return 0, err - } - - multi, err := w.driver.Bucket.InitMulti(w.key, w.driver.getContentType(), getPermissions(), w.driver.getOptions()) - if err != nil { - return 0, err - } - w.multi = multi - - // If the entire written file is smaller than minChunkSize, we need to make - // a new part from scratch :double sad face: - if w.size < minChunkSize { - contents, err := w.driver.Bucket.Get(w.key) - if err != nil { - return 0, err - } - w.parts = nil - w.readyPart = contents - } else { - // Otherwise we can use the old file as the new first part - _, part, err := multi.PutPartCopy(1, oss.CopyOptions{}, w.driver.Bucket.Name+"/"+w.key) - if err != nil { - return 0, err - } - w.parts = []oss.Part{part} - } - } - - var n int - - for len(p) > 0 { - // If no parts are ready to write, fill up the first part - if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { - if len(p) >= neededBytes { - w.readyPart = append(w.readyPart, p[:neededBytes]...) - n += neededBytes - p = p[neededBytes:] - } else { - w.readyPart = append(w.readyPart, p...) - n += len(p) - p = nil - } - } - - if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { - if len(p) >= neededBytes { - w.pendingPart = append(w.pendingPart, p[:neededBytes]...) - n += neededBytes - p = p[neededBytes:] - err := w.flushPart() - if err != nil { - w.size += int64(n) - return n, err - } - } else { - w.pendingPart = append(w.pendingPart, p...) - n += len(p) - p = nil - } - } - } - w.size += int64(n) - return n, nil -} - -func (w *writer) Size() int64 { - return w.size -} - -func (w *writer) Close() error { - if w.closed { - return fmt.Errorf("already closed") - } - w.closed = true - return w.flushPart() -} - -func (w *writer) Cancel() error { - if w.closed { - return fmt.Errorf("already closed") - } else if w.committed { - return fmt.Errorf("already committed") - } - w.cancelled = true - err := w.multi.Abort() - return err -} - -func (w *writer) Commit() error { - if w.closed { - return fmt.Errorf("already closed") - } else if w.committed { - return fmt.Errorf("already committed") - } else if w.cancelled { - return fmt.Errorf("already cancelled") - } - err := w.flushPart() - if err != nil { - return err - } - w.committed = true - err = w.multi.Complete(w.parts) - if err != nil { - w.multi.Abort() - return err - } - return nil -} - -// flushPart flushes buffers to write a part to S3. -// Only called by Write (with both buffers full) and Close/Commit (always) -func (w *writer) flushPart() error { - if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { - // nothing to write - return nil - } - if len(w.pendingPart) < int(w.driver.ChunkSize) { - // closing with a small pending part - // combine ready and pending to avoid writing a small part - w.readyPart = append(w.readyPart, w.pendingPart...) - w.pendingPart = nil - } - - part, err := w.multi.PutPart(len(w.parts)+1, bytes.NewReader(w.readyPart)) - if err != nil { - return err - } - w.parts = append(w.parts, part) - w.readyPart = w.pendingPart - w.pendingPart = nil - return nil -} diff --git a/docs/storage/driver/oss/oss_test.go b/docs/storage/driver/oss/oss_test.go deleted file mode 100644 index fbae5d9c..00000000 --- a/docs/storage/driver/oss/oss_test.go +++ /dev/null @@ -1,144 +0,0 @@ -// +build include_oss - -package oss - -import ( - "io/ioutil" - - alioss "github.com/denverdino/aliyungo/oss" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - //"log" - "os" - "strconv" - "testing" - - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -var ossDriverConstructor func(rootDirectory string) (*Driver, error) - -var skipCheck func() string - -func init() { - accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID") - secretKey := os.Getenv("ALIYUN_ACCESS_KEY_SECRET") - bucket := os.Getenv("OSS_BUCKET") - region := os.Getenv("OSS_REGION") - internal := os.Getenv("OSS_INTERNAL") - encrypt := os.Getenv("OSS_ENCRYPT") - secure := os.Getenv("OSS_SECURE") - endpoint := os.Getenv("OSS_ENDPOINT") - root, err := ioutil.TempDir("", "driver-") - if err != nil { - panic(err) - } - defer os.Remove(root) - - ossDriverConstructor = func(rootDirectory string) (*Driver, error) { - encryptBool := false - if encrypt != "" { - encryptBool, err = strconv.ParseBool(encrypt) - if err != nil { - return nil, err - } - } - - secureBool := false - if secure != "" { - secureBool, err = strconv.ParseBool(secure) - if err != nil { - return nil, err - } - } - - internalBool := false - if internal != "" { - internalBool, err = strconv.ParseBool(internal) - if err != nil { - return nil, err - } - } - - parameters := DriverParameters{ - AccessKeyID: accessKey, - AccessKeySecret: secretKey, - Bucket: bucket, - Region: alioss.Region(region), - Internal: internalBool, - ChunkSize: minChunkSize, - RootDirectory: rootDirectory, - Encrypt: encryptBool, - Secure: secureBool, - Endpoint: endpoint, - } - - return New(parameters) - } - - // Skip OSS storage driver tests if environment variable parameters are not provided - skipCheck = func() string { - if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { - return "Must set ALIYUN_ACCESS_KEY_ID, ALIYUN_ACCESS_KEY_SECRET, OSS_REGION, OSS_BUCKET, and OSS_ENCRYPT to run OSS tests" - } - return "" - } - - testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { - return ossDriverConstructor(root) - }, skipCheck) -} - -func TestEmptyRootList(t *testing.T) { - if skipCheck() != "" { - t.Skip(skipCheck()) - } - - validRoot, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(validRoot) - - rootedDriver, err := ossDriverConstructor(validRoot) - if err != nil { - t.Fatalf("unexpected error creating rooted driver: %v", err) - } - - emptyRootDriver, err := ossDriverConstructor("") - if err != nil { - t.Fatalf("unexpected error creating empty root driver: %v", err) - } - - slashRootDriver, err := ossDriverConstructor("/") - if err != nil { - t.Fatalf("unexpected error creating slash root driver: %v", err) - } - - filename := "/test" - contents := []byte("contents") - ctx := context.Background() - err = rootedDriver.PutContent(ctx, filename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer rootedDriver.Delete(ctx, filename) - - keys, err := emptyRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } - - keys, err = slashRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } -} diff --git a/docs/storage/driver/s3-aws/s3.go b/docs/storage/driver/s3-aws/s3.go deleted file mode 100644 index 1240ec17..00000000 --- a/docs/storage/driver/s3-aws/s3.go +++ /dev/null @@ -1,977 +0,0 @@ -// Package s3 provides a storagedriver.StorageDriver implementation to -// store blobs in Amazon S3 cloud storage. -// -// This package leverages the official aws client library for interfacing with -// S3. -// -// Because S3 is a key, value store the Stat call does not support last modification -// time for directories (directories are an abstraction for key, value stores) -// -// Keep in mind that S3 guarantees only read-after-write consistency for new -// objects, but no read-after-update or list-after-write consistency. -package s3 - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/client/transport" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "s3aws" - -// minChunkSize defines the minimum multipart upload chunk size -// S3 API requires multipart upload chunks to be at least 5MB -const minChunkSize = 5 << 20 - -const defaultChunkSize = 2 * minChunkSize - -// listMax is the largest amount of objects you can request from S3 in a list call -const listMax = 1000 - -// validRegions maps known s3 region identifiers to region descriptors -var validRegions = map[string]struct{}{} - -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set -type DriverParameters struct { - AccessKey string - SecretKey string - Bucket string - Region string - RegionEndpoint string - Encrypt bool - KeyID string - Secure bool - ChunkSize int64 - RootDirectory string - StorageClass string - UserAgent string -} - -func init() { - for _, region := range []string{ - "us-east-1", - "us-west-1", - "us-west-2", - "eu-west-1", - "eu-central-1", - "ap-southeast-1", - "ap-southeast-2", - "ap-northeast-1", - "ap-northeast-2", - "sa-east-1", - "cn-north-1", - "us-gov-west-1", - } { - validRegions[region] = struct{}{} - } - - // Register this as the default s3 driver in addition to s3aws - factory.Register("s3", &s3DriverFactory{}) - factory.Register(driverName, &s3DriverFactory{}) -} - -// s3DriverFactory implements the factory.StorageDriverFactory interface -type s3DriverFactory struct{} - -func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - S3 *s3.S3 - Bucket string - ChunkSize int64 - Encrypt bool - KeyID string - RootDirectory string - StorageClass string -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 -// Objects are stored at absolute keys in the provided bucket. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - accesskey -// - secretkey -// - region -// - bucket -// - encrypt -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - // Providing no values for these is valid in case the user is authenticating - // with an IAM on an ec2 instance (in which case the instance credentials will - // be summoned when GetAuth is called) - accessKey := parameters["accesskey"] - if accessKey == nil { - accessKey = "" - } - secretKey := parameters["secretkey"] - if secretKey == nil { - secretKey = "" - } - - regionEndpoint := parameters["regionendpoint"] - if regionEndpoint == nil { - regionEndpoint = "" - } - - regionName, ok := parameters["region"] - if regionName == nil || fmt.Sprint(regionName) == "" { - return nil, fmt.Errorf("No region parameter provided") - } - region := fmt.Sprint(regionName) - // Don't check the region value if a custom endpoint is provided. - if regionEndpoint == "" { - if _, ok = validRegions[region]; !ok { - return nil, fmt.Errorf("Invalid region provided: %v", region) - } - } - - bucket := parameters["bucket"] - if bucket == nil || fmt.Sprint(bucket) == "" { - return nil, fmt.Errorf("No bucket parameter provided") - } - - encryptBool := false - encrypt := parameters["encrypt"] - switch encrypt := encrypt.(type) { - case string: - b, err := strconv.ParseBool(encrypt) - if err != nil { - return nil, fmt.Errorf("The encrypt parameter should be a boolean") - } - encryptBool = b - case bool: - encryptBool = encrypt - case nil: - // do nothing - default: - return nil, fmt.Errorf("The encrypt parameter should be a boolean") - } - - secureBool := true - secure := parameters["secure"] - switch secure := secure.(type) { - case string: - b, err := strconv.ParseBool(secure) - if err != nil { - return nil, fmt.Errorf("The secure parameter should be a boolean") - } - secureBool = b - case bool: - secureBool = secure - case nil: - // do nothing - default: - return nil, fmt.Errorf("The secure parameter should be a boolean") - } - - keyID := parameters["keyid"] - if keyID == nil { - keyID = "" - } - - chunkSize := int64(defaultChunkSize) - chunkSizeParam := parameters["chunksize"] - switch v := chunkSizeParam.(type) { - case string: - vv, err := strconv.ParseInt(v, 0, 64) - if err != nil { - return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) - } - chunkSize = vv - case int64: - chunkSize = v - case int, uint, int32, uint32, uint64: - chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() - case nil: - // do nothing - default: - return nil, fmt.Errorf("invalid value for chunksize: %#v", chunkSizeParam) - } - - if chunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) - } - - rootDirectory := parameters["rootdirectory"] - if rootDirectory == nil { - rootDirectory = "" - } - - storageClass := s3.StorageClassStandard - storageClassParam := parameters["storageclass"] - if storageClassParam != nil { - storageClassString, ok := storageClassParam.(string) - if !ok { - return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) - } - // All valid storage class parameters are UPPERCASE, so be a bit more flexible here - storageClassString = strings.ToUpper(storageClassString) - if storageClassString != s3.StorageClassStandard && storageClassString != s3.StorageClassReducedRedundancy { - return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) - } - storageClass = storageClassString - } - - userAgent := parameters["useragent"] - if userAgent == nil { - userAgent = "" - } - - params := DriverParameters{ - fmt.Sprint(accessKey), - fmt.Sprint(secretKey), - fmt.Sprint(bucket), - region, - fmt.Sprint(regionEndpoint), - encryptBool, - fmt.Sprint(keyID), - secureBool, - chunkSize, - fmt.Sprint(rootDirectory), - storageClass, - fmt.Sprint(userAgent), - } - - return New(params) -} - -// New constructs a new Driver with the given AWS credentials, region, encryption flag, and -// bucketName -func New(params DriverParameters) (*Driver, error) { - awsConfig := aws.NewConfig() - var creds *credentials.Credentials - if params.RegionEndpoint == "" { - creds = credentials.NewChainCredentials([]credentials.Provider{ - &credentials.StaticProvider{ - Value: credentials.Value{ - AccessKeyID: params.AccessKey, - SecretAccessKey: params.SecretKey, - }, - }, - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{}, - &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, - }) - - } else { - creds = credentials.NewChainCredentials([]credentials.Provider{ - &credentials.StaticProvider{ - Value: credentials.Value{ - AccessKeyID: params.AccessKey, - SecretAccessKey: params.SecretKey, - }, - }, - &credentials.EnvProvider{}, - }) - awsConfig.WithS3ForcePathStyle(true) - awsConfig.WithEndpoint(params.RegionEndpoint) - } - - awsConfig.WithCredentials(creds) - awsConfig.WithRegion(params.Region) - awsConfig.WithDisableSSL(!params.Secure) - - if params.UserAgent != "" { - awsConfig.WithHTTPClient(&http.Client{ - Transport: transport.NewTransport(http.DefaultTransport, transport.NewHeaderRequestModifier(http.Header{http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}})), - }) - } - - s3obj := s3.New(session.New(awsConfig)) - - // TODO Currently multipart uploads have no timestamps, so this would be unwise - // if you initiated a new s3driver while another one is running on the same bucket. - // multis, _, err := bucket.ListMulti("", "") - // if err != nil { - // return nil, err - // } - - // for _, multi := range multis { - // err := multi.Abort() - // //TODO appropriate to do this error checking? - // if err != nil { - // return nil, err - // } - // } - - d := &driver{ - S3: s3obj, - Bucket: params.Bucket, - ChunkSize: params.ChunkSize, - Encrypt: params.Encrypt, - KeyID: params.KeyID, - RootDirectory: params.RootDirectory, - StorageClass: params.StorageClass, - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - reader, err := d.Reader(ctx, path, 0) - if err != nil { - return nil, err - } - return ioutil.ReadAll(reader) -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - _, err := d.S3.PutObject(&s3.PutObjectInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(d.s3Path(path)), - ContentType: d.getContentType(), - ACL: d.getACL(), - ServerSideEncryption: d.getEncryptionMode(), - SSEKMSKeyId: d.getSSEKMSKeyID(), - StorageClass: d.getStorageClass(), - Body: bytes.NewReader(contents), - }) - return parseError(path, err) -} - -// Reader retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - resp, err := d.S3.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(d.s3Path(path)), - Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"), - }) - - if err != nil { - if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "InvalidRange" { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - return nil, parseError(path, err) - } - return resp.Body, nil -} - -// Writer returns a FileWriter which will store the content written to it -// at the location designated by "path" after the call to Commit. -func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { - key := d.s3Path(path) - if !append { - // TODO (brianbland): cancel other uploads at this path - resp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(key), - ContentType: d.getContentType(), - ACL: d.getACL(), - ServerSideEncryption: d.getEncryptionMode(), - SSEKMSKeyId: d.getSSEKMSKeyID(), - StorageClass: d.getStorageClass(), - }) - if err != nil { - return nil, err - } - return d.newWriter(key, *resp.UploadId, nil), nil - } - resp, err := d.S3.ListMultipartUploads(&s3.ListMultipartUploadsInput{ - Bucket: aws.String(d.Bucket), - Prefix: aws.String(key), - }) - if err != nil { - return nil, parseError(path, err) - } - - for _, multi := range resp.Uploads { - if key != *multi.Key { - continue - } - resp, err := d.S3.ListParts(&s3.ListPartsInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(key), - UploadId: multi.UploadId, - }) - if err != nil { - return nil, parseError(path, err) - } - var multiSize int64 - for _, part := range resp.Parts { - multiSize += *part.Size - } - return d.newWriter(key, *multi.UploadId, resp.Parts), nil - } - return nil, storagedriver.PathNotFoundError{Path: path} -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(d.Bucket), - Prefix: aws.String(d.s3Path(path)), - MaxKeys: aws.Int64(1), - }) - if err != nil { - return nil, err - } - - fi := storagedriver.FileInfoFields{ - Path: path, - } - - if len(resp.Contents) == 1 { - if *resp.Contents[0].Key != d.s3Path(path) { - fi.IsDir = true - } else { - fi.IsDir = false - fi.Size = *resp.Contents[0].Size - fi.ModTime = *resp.Contents[0].LastModified - } - } else if len(resp.CommonPrefixes) == 1 { - fi.IsDir = true - } else { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, opath string) ([]string, error) { - path := opath - if path != "/" && path[len(path)-1] != '/' { - path = path + "/" - } - - // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". - // In those cases, there is no root prefix to replace and we must actually add a "/" to all - // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp - prefix := "" - if d.s3Path("") == "" { - prefix = "/" - } - - resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(d.Bucket), - Prefix: aws.String(d.s3Path(path)), - Delimiter: aws.String("/"), - MaxKeys: aws.Int64(listMax), - }) - if err != nil { - return nil, parseError(opath, err) - } - - files := []string{} - directories := []string{} - - for { - for _, key := range resp.Contents { - files = append(files, strings.Replace(*key.Key, d.s3Path(""), prefix, 1)) - } - - for _, commonPrefix := range resp.CommonPrefixes { - commonPrefix := *commonPrefix.Prefix - directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) - } - - if *resp.IsTruncated { - resp, err = d.S3.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(d.Bucket), - Prefix: aws.String(d.s3Path(path)), - Delimiter: aws.String("/"), - MaxKeys: aws.Int64(listMax), - Marker: resp.NextMarker, - }) - if err != nil { - return nil, err - } - } else { - break - } - } - - if opath != "/" { - if len(files) == 0 && len(directories) == 0 { - // Treat empty response as missing directory, since we don't actually - // have directories in s3. - return nil, storagedriver.PathNotFoundError{Path: opath} - } - } - - return append(files, directories...), nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - /* This is terrible, but aws doesn't have an actual move. */ - _, err := d.S3.CopyObject(&s3.CopyObjectInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(d.s3Path(destPath)), - ContentType: d.getContentType(), - ACL: d.getACL(), - ServerSideEncryption: d.getEncryptionMode(), - SSEKMSKeyId: d.getSSEKMSKeyID(), - StorageClass: d.getStorageClass(), - CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)), - }) - if err != nil { - return parseError(sourcePath, err) - } - - return d.Delete(ctx, sourcePath) -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -// We must be careful since S3 does not guarantee read after delete consistency -func (d *driver) Delete(ctx context.Context, path string) error { - s3Objects := make([]*s3.ObjectIdentifier, 0, listMax) - listObjectsInput := &s3.ListObjectsInput{ - Bucket: aws.String(d.Bucket), - Prefix: aws.String(d.s3Path(path)), - } - for { - // list all the objects - resp, err := d.S3.ListObjects(listObjectsInput) - - // resp.Contents can only be empty on the first call - // if there were no more results to return after the first call, resp.IsTruncated would have been false - // and the loop would be exited without recalling ListObjects - if err != nil || len(resp.Contents) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - - for _, key := range resp.Contents { - s3Objects = append(s3Objects, &s3.ObjectIdentifier{ - Key: key.Key, - }) - } - - // resp.Contents must have at least one element or we would have returned not found - listObjectsInput.Marker = resp.Contents[len(resp.Contents)-1].Key - - // from the s3 api docs, IsTruncated "specifies whether (true) or not (false) all of the results were returned" - // if everything has been returned, break - if resp.IsTruncated == nil || !*resp.IsTruncated { - break - } - } - - // need to chunk objects into groups of 1000 per s3 restrictions - total := len(s3Objects) - for i := 0; i < total; i += 1000 { - _, err := d.S3.DeleteObjects(&s3.DeleteObjectsInput{ - Bucket: aws.String(d.Bucket), - Delete: &s3.Delete{ - Objects: s3Objects[i:min(i+1000, total)], - Quiet: aws.Bool(false), - }, - }) - if err != nil { - return err - } - } - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - methodString := "GET" - method, ok := options["method"] - if ok { - methodString, ok = method.(string) - if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", storagedriver.ErrUnsupportedMethod{} - } - } - - expiresIn := 20 * time.Minute - expires, ok := options["expiry"] - if ok { - et, ok := expires.(time.Time) - if ok { - expiresIn = et.Sub(time.Now()) - } - } - - var req *request.Request - - switch methodString { - case "GET": - req, _ = d.S3.GetObjectRequest(&s3.GetObjectInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(d.s3Path(path)), - }) - case "HEAD": - req, _ = d.S3.HeadObjectRequest(&s3.HeadObjectInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(d.s3Path(path)), - }) - default: - panic("unreachable") - } - - return req.Presign(expiresIn) -} - -func (d *driver) s3Path(path string) string { - return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") -} - -// S3BucketKey returns the s3 bucket key for the given storage driver path. -func (d *Driver) S3BucketKey(path string) string { - return d.StorageDriver.(*driver).s3Path(path) -} - -func parseError(path string, err error) error { - if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "NoSuchKey" { - return storagedriver.PathNotFoundError{Path: path} - } - - return err -} - -func (d *driver) getEncryptionMode() *string { - if !d.Encrypt { - return nil - } - if d.KeyID == "" { - return aws.String("AES256") - } - return aws.String("aws:kms") -} - -func (d *driver) getSSEKMSKeyID() *string { - if d.KeyID != "" { - return aws.String(d.KeyID) - } - return nil -} - -func (d *driver) getContentType() *string { - return aws.String("application/octet-stream") -} - -func (d *driver) getACL() *string { - return aws.String("private") -} - -func (d *driver) getStorageClass() *string { - return aws.String(d.StorageClass) -} - -// writer attempts to upload parts to S3 in a buffered fashion where the last -// part is at least as large as the chunksize, so the multipart upload could be -// cleanly resumed in the future. This is violated if Close is called after less -// than a full chunk is written. -type writer struct { - driver *driver - key string - uploadID string - parts []*s3.Part - size int64 - readyPart []byte - pendingPart []byte - closed bool - committed bool - cancelled bool -} - -func (d *driver) newWriter(key, uploadID string, parts []*s3.Part) storagedriver.FileWriter { - var size int64 - for _, part := range parts { - size += *part.Size - } - return &writer{ - driver: d, - key: key, - uploadID: uploadID, - parts: parts, - size: size, - } -} - -type completedParts []*s3.CompletedPart - -func (a completedParts) Len() int { return len(a) } -func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } - -func (w *writer) Write(p []byte) (int, error) { - if w.closed { - return 0, fmt.Errorf("already closed") - } else if w.committed { - return 0, fmt.Errorf("already committed") - } else if w.cancelled { - return 0, fmt.Errorf("already cancelled") - } - - // If the last written part is smaller than minChunkSize, we need to make a - // new multipart upload :sadface: - if len(w.parts) > 0 && int(*w.parts[len(w.parts)-1].Size) < minChunkSize { - var completedUploadedParts completedParts - for _, part := range w.parts { - completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - sort.Sort(completedUploadedParts) - - _, err := w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ - Bucket: aws.String(w.driver.Bucket), - Key: aws.String(w.key), - UploadId: aws.String(w.uploadID), - MultipartUpload: &s3.CompletedMultipartUpload{ - Parts: completedUploadedParts, - }, - }) - if err != nil { - w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ - Bucket: aws.String(w.driver.Bucket), - Key: aws.String(w.key), - UploadId: aws.String(w.uploadID), - }) - return 0, err - } - - resp, err := w.driver.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ - Bucket: aws.String(w.driver.Bucket), - Key: aws.String(w.key), - ContentType: w.driver.getContentType(), - ACL: w.driver.getACL(), - ServerSideEncryption: w.driver.getEncryptionMode(), - StorageClass: w.driver.getStorageClass(), - }) - if err != nil { - return 0, err - } - w.uploadID = *resp.UploadId - - // If the entire written file is smaller than minChunkSize, we need to make - // a new part from scratch :double sad face: - if w.size < minChunkSize { - resp, err := w.driver.S3.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(w.driver.Bucket), - Key: aws.String(w.key), - }) - defer resp.Body.Close() - if err != nil { - return 0, err - } - w.parts = nil - w.readyPart, err = ioutil.ReadAll(resp.Body) - if err != nil { - return 0, err - } - } else { - // Otherwise we can use the old file as the new first part - copyPartResp, err := w.driver.S3.UploadPartCopy(&s3.UploadPartCopyInput{ - Bucket: aws.String(w.driver.Bucket), - CopySource: aws.String(w.driver.Bucket + "/" + w.key), - Key: aws.String(w.key), - PartNumber: aws.Int64(1), - UploadId: resp.UploadId, - }) - if err != nil { - return 0, err - } - w.parts = []*s3.Part{ - { - ETag: copyPartResp.CopyPartResult.ETag, - PartNumber: aws.Int64(1), - Size: aws.Int64(w.size), - }, - } - } - } - - var n int - - for len(p) > 0 { - // If no parts are ready to write, fill up the first part - if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { - if len(p) >= neededBytes { - w.readyPart = append(w.readyPart, p[:neededBytes]...) - n += neededBytes - p = p[neededBytes:] - } else { - w.readyPart = append(w.readyPart, p...) - n += len(p) - p = nil - } - } - - if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { - if len(p) >= neededBytes { - w.pendingPart = append(w.pendingPart, p[:neededBytes]...) - n += neededBytes - p = p[neededBytes:] - err := w.flushPart() - if err != nil { - w.size += int64(n) - return n, err - } - } else { - w.pendingPart = append(w.pendingPart, p...) - n += len(p) - p = nil - } - } - } - w.size += int64(n) - return n, nil -} - -func (w *writer) Size() int64 { - return w.size -} - -func (w *writer) Close() error { - if w.closed { - return fmt.Errorf("already closed") - } - w.closed = true - return w.flushPart() -} - -func (w *writer) Cancel() error { - if w.closed { - return fmt.Errorf("already closed") - } else if w.committed { - return fmt.Errorf("already committed") - } - w.cancelled = true - _, err := w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ - Bucket: aws.String(w.driver.Bucket), - Key: aws.String(w.key), - UploadId: aws.String(w.uploadID), - }) - return err -} - -func (w *writer) Commit() error { - if w.closed { - return fmt.Errorf("already closed") - } else if w.committed { - return fmt.Errorf("already committed") - } else if w.cancelled { - return fmt.Errorf("already cancelled") - } - err := w.flushPart() - if err != nil { - return err - } - w.committed = true - - var completedUploadedParts completedParts - for _, part := range w.parts { - completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - sort.Sort(completedUploadedParts) - - _, err = w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ - Bucket: aws.String(w.driver.Bucket), - Key: aws.String(w.key), - UploadId: aws.String(w.uploadID), - MultipartUpload: &s3.CompletedMultipartUpload{ - Parts: completedUploadedParts, - }, - }) - if err != nil { - w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ - Bucket: aws.String(w.driver.Bucket), - Key: aws.String(w.key), - UploadId: aws.String(w.uploadID), - }) - return err - } - return nil -} - -// flushPart flushes buffers to write a part to S3. -// Only called by Write (with both buffers full) and Close/Commit (always) -func (w *writer) flushPart() error { - if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { - // nothing to write - return nil - } - if len(w.pendingPart) < int(w.driver.ChunkSize) { - // closing with a small pending part - // combine ready and pending to avoid writing a small part - w.readyPart = append(w.readyPart, w.pendingPart...) - w.pendingPart = nil - } - - partNumber := aws.Int64(int64(len(w.parts) + 1)) - resp, err := w.driver.S3.UploadPart(&s3.UploadPartInput{ - Bucket: aws.String(w.driver.Bucket), - Key: aws.String(w.key), - PartNumber: partNumber, - UploadId: aws.String(w.uploadID), - Body: bytes.NewReader(w.readyPart), - }) - if err != nil { - return err - } - w.parts = append(w.parts, &s3.Part{ - ETag: resp.ETag, - PartNumber: partNumber, - Size: aws.Int64(int64(len(w.readyPart))), - }) - w.readyPart = w.pendingPart - w.pendingPart = nil - return nil -} diff --git a/docs/storage/driver/s3-aws/s3_test.go b/docs/storage/driver/s3-aws/s3_test.go deleted file mode 100644 index 70358763..00000000 --- a/docs/storage/driver/s3-aws/s3_test.go +++ /dev/null @@ -1,238 +0,0 @@ -package s3 - -import ( - "io/ioutil" - "os" - "strconv" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -var s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error) -var skipS3 func() string - -func init() { - accessKey := os.Getenv("AWS_ACCESS_KEY") - secretKey := os.Getenv("AWS_SECRET_KEY") - bucket := os.Getenv("S3_BUCKET") - encrypt := os.Getenv("S3_ENCRYPT") - keyID := os.Getenv("S3_KEY_ID") - secure := os.Getenv("S3_SECURE") - region := os.Getenv("AWS_REGION") - root, err := ioutil.TempDir("", "driver-") - regionEndpoint := os.Getenv("REGION_ENDPOINT") - if err != nil { - panic(err) - } - defer os.Remove(root) - - s3DriverConstructor = func(rootDirectory, storageClass string) (*Driver, error) { - encryptBool := false - if encrypt != "" { - encryptBool, err = strconv.ParseBool(encrypt) - if err != nil { - return nil, err - } - } - - secureBool := true - if secure != "" { - secureBool, err = strconv.ParseBool(secure) - if err != nil { - return nil, err - } - } - - parameters := DriverParameters{ - accessKey, - secretKey, - bucket, - region, - regionEndpoint, - encryptBool, - keyID, - secureBool, - minChunkSize, - rootDirectory, - storageClass, - driverName + "-test", - } - - return New(parameters) - } - - // Skip S3 storage driver tests if environment variable parameters are not provided - skipS3 = func() string { - if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { - return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" - } - return "" - } - - testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { - return s3DriverConstructor(root, s3.StorageClassStandard) - }, skipS3) -} - -func TestEmptyRootList(t *testing.T) { - if skipS3() != "" { - t.Skip(skipS3()) - } - - validRoot, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(validRoot) - - rootedDriver, err := s3DriverConstructor(validRoot, s3.StorageClassStandard) - if err != nil { - t.Fatalf("unexpected error creating rooted driver: %v", err) - } - - emptyRootDriver, err := s3DriverConstructor("", s3.StorageClassStandard) - if err != nil { - t.Fatalf("unexpected error creating empty root driver: %v", err) - } - - slashRootDriver, err := s3DriverConstructor("/", s3.StorageClassStandard) - if err != nil { - t.Fatalf("unexpected error creating slash root driver: %v", err) - } - - filename := "/test" - contents := []byte("contents") - ctx := context.Background() - err = rootedDriver.PutContent(ctx, filename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer rootedDriver.Delete(ctx, filename) - - keys, err := emptyRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } - - keys, err = slashRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } -} - -func TestStorageClass(t *testing.T) { - if skipS3() != "" { - t.Skip(skipS3()) - } - - rootDir, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(rootDir) - - standardDriver, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) - if err != nil { - t.Fatalf("unexpected error creating driver with standard storage: %v", err) - } - - rrDriver, err := s3DriverConstructor(rootDir, s3.StorageClassReducedRedundancy) - if err != nil { - t.Fatalf("unexpected error creating driver with reduced redundancy storage: %v", err) - } - - standardFilename := "/test-standard" - rrFilename := "/test-rr" - contents := []byte("contents") - ctx := context.Background() - - err = standardDriver.PutContent(ctx, standardFilename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer standardDriver.Delete(ctx, standardFilename) - - err = rrDriver.PutContent(ctx, rrFilename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer rrDriver.Delete(ctx, rrFilename) - - standardDriverUnwrapped := standardDriver.Base.StorageDriver.(*driver) - resp, err := standardDriverUnwrapped.S3.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(standardDriverUnwrapped.Bucket), - Key: aws.String(standardDriverUnwrapped.s3Path(standardFilename)), - }) - if err != nil { - t.Fatalf("unexpected error retrieving standard storage file: %v", err) - } - defer resp.Body.Close() - // Amazon only populates this header value for non-standard storage classes - if resp.StorageClass != nil { - t.Fatalf("unexpected storage class for standard file: %v", resp.StorageClass) - } - - rrDriverUnwrapped := rrDriver.Base.StorageDriver.(*driver) - resp, err = rrDriverUnwrapped.S3.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(rrDriverUnwrapped.Bucket), - Key: aws.String(rrDriverUnwrapped.s3Path(rrFilename)), - }) - if err != nil { - t.Fatalf("unexpected error retrieving reduced-redundancy storage file: %v", err) - } - defer resp.Body.Close() - if resp.StorageClass == nil { - t.Fatalf("unexpected storage class for reduced-redundancy file: %v", s3.StorageClassStandard) - } else if *resp.StorageClass != s3.StorageClassReducedRedundancy { - t.Fatalf("unexpected storage class for reduced-redundancy file: %v", *resp.StorageClass) - } - -} - -func TestOverThousandBlobs(t *testing.T) { - if skipS3() != "" { - t.Skip(skipS3()) - } - - rootDir, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(rootDir) - - standardDriver, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) - if err != nil { - t.Fatalf("unexpected error creating driver with standard storage: %v", err) - } - - ctx := context.Background() - for i := 0; i < 1005; i++ { - filename := "/thousandfiletest/file" + strconv.Itoa(i) - contents := []byte("contents") - err = standardDriver.PutContent(ctx, filename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - } - - // cant actually verify deletion because read-after-delete is inconsistent, but can ensure no errors - err = standardDriver.Delete(ctx, "/thousandfiletest") - if err != nil { - t.Fatalf("unexpected error deleting thousand files: %v", err) - } -} diff --git a/docs/storage/driver/s3-goamz/s3.go b/docs/storage/driver/s3-goamz/s3.go deleted file mode 100644 index aa2d31b7..00000000 --- a/docs/storage/driver/s3-goamz/s3.go +++ /dev/null @@ -1,746 +0,0 @@ -// Package s3 provides a storagedriver.StorageDriver implementation to -// store blobs in Amazon S3 cloud storage. -// -// This package leverages the docker/goamz client library for interfacing with -// S3. It is intended to be deprecated in favor of the s3-aws driver -// implementation. -// -// Because S3 is a key, value store the Stat call does not support last modification -// time for directories (directories are an abstraction for key, value stores) -// -// Keep in mind that S3 guarantees only read-after-write consistency for new -// objects, but no read-after-update or list-after-write consistency. -package s3 - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "time" - - "github.com/docker/goamz/aws" - "github.com/docker/goamz/s3" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/client/transport" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "s3goamz" - -// minChunkSize defines the minimum multipart upload chunk size -// S3 API requires multipart upload chunks to be at least 5MB -const minChunkSize = 5 << 20 - -const defaultChunkSize = 2 * minChunkSize - -// listMax is the largest amount of objects you can request from S3 in a list call -const listMax = 1000 - -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set -type DriverParameters struct { - AccessKey string - SecretKey string - Bucket string - Region aws.Region - Encrypt bool - Secure bool - V4Auth bool - ChunkSize int64 - RootDirectory string - StorageClass s3.StorageClass - UserAgent string -} - -func init() { - factory.Register(driverName, &s3DriverFactory{}) -} - -// s3DriverFactory implements the factory.StorageDriverFactory interface -type s3DriverFactory struct{} - -func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - S3 *s3.S3 - Bucket *s3.Bucket - ChunkSize int64 - Encrypt bool - RootDirectory string - StorageClass s3.StorageClass -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 -// Objects are stored at absolute keys in the provided bucket. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - accesskey -// - secretkey -// - region -// - bucket -// - encrypt -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - // Providing no values for these is valid in case the user is authenticating - // with an IAM on an ec2 instance (in which case the instance credentials will - // be summoned when GetAuth is called) - accessKey := parameters["accesskey"] - if accessKey == nil { - accessKey = "" - } - - secretKey := parameters["secretkey"] - if secretKey == nil { - secretKey = "" - } - - regionName := parameters["region"] - if regionName == nil || fmt.Sprint(regionName) == "" { - return nil, fmt.Errorf("No region parameter provided") - } - region := aws.GetRegion(fmt.Sprint(regionName)) - if region.Name == "" { - return nil, fmt.Errorf("Invalid region provided: %v", region) - } - - bucket := parameters["bucket"] - if bucket == nil || fmt.Sprint(bucket) == "" { - return nil, fmt.Errorf("No bucket parameter provided") - } - - encryptBool := false - encrypt := parameters["encrypt"] - switch encrypt := encrypt.(type) { - case string: - b, err := strconv.ParseBool(encrypt) - if err != nil { - return nil, fmt.Errorf("The encrypt parameter should be a boolean") - } - encryptBool = b - case bool: - encryptBool = encrypt - case nil: - // do nothing - default: - return nil, fmt.Errorf("The encrypt parameter should be a boolean") - } - - secureBool := true - secure := parameters["secure"] - switch secure := secure.(type) { - case string: - b, err := strconv.ParseBool(secure) - if err != nil { - return nil, fmt.Errorf("The secure parameter should be a boolean") - } - secureBool = b - case bool: - secureBool = secure - case nil: - // do nothing - default: - return nil, fmt.Errorf("The secure parameter should be a boolean") - } - - v4AuthBool := false - v4Auth := parameters["v4auth"] - switch v4Auth := v4Auth.(type) { - case string: - b, err := strconv.ParseBool(v4Auth) - if err != nil { - return nil, fmt.Errorf("The v4auth parameter should be a boolean") - } - v4AuthBool = b - case bool: - v4AuthBool = v4Auth - case nil: - // do nothing - default: - return nil, fmt.Errorf("The v4auth parameter should be a boolean") - } - - chunkSize := int64(defaultChunkSize) - chunkSizeParam := parameters["chunksize"] - switch v := chunkSizeParam.(type) { - case string: - vv, err := strconv.ParseInt(v, 0, 64) - if err != nil { - return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) - } - chunkSize = vv - case int64: - chunkSize = v - case int, uint, int32, uint32, uint64: - chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() - case nil: - // do nothing - default: - return nil, fmt.Errorf("invalid value for chunksize: %#v", chunkSizeParam) - } - - if chunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) - } - - rootDirectory := parameters["rootdirectory"] - if rootDirectory == nil { - rootDirectory = "" - } - - storageClass := s3.StandardStorage - storageClassParam := parameters["storageclass"] - if storageClassParam != nil { - storageClassString, ok := storageClassParam.(string) - if !ok { - return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []s3.StorageClass{s3.StandardStorage, s3.ReducedRedundancy}, storageClassParam) - } - // All valid storage class parameters are UPPERCASE, so be a bit more flexible here - storageClassCasted := s3.StorageClass(strings.ToUpper(storageClassString)) - if storageClassCasted != s3.StandardStorage && storageClassCasted != s3.ReducedRedundancy { - return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []s3.StorageClass{s3.StandardStorage, s3.ReducedRedundancy}, storageClassParam) - } - storageClass = storageClassCasted - } - - userAgent := parameters["useragent"] - if userAgent == nil { - userAgent = "" - } - - params := DriverParameters{ - fmt.Sprint(accessKey), - fmt.Sprint(secretKey), - fmt.Sprint(bucket), - region, - encryptBool, - secureBool, - v4AuthBool, - chunkSize, - fmt.Sprint(rootDirectory), - storageClass, - fmt.Sprint(userAgent), - } - - return New(params) -} - -// New constructs a new Driver with the given AWS credentials, region, encryption flag, and -// bucketName -func New(params DriverParameters) (*Driver, error) { - auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{}) - if err != nil { - return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err) - } - - if !params.Secure { - params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1) - } - - s3obj := s3.New(auth, params.Region) - - if params.UserAgent != "" { - s3obj.Client = &http.Client{ - Transport: transport.NewTransport(http.DefaultTransport, - transport.NewHeaderRequestModifier(http.Header{ - http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}, - }), - ), - } - } - - if params.V4Auth { - s3obj.Signature = aws.V4Signature - } else { - if params.Region.Name == "eu-central-1" { - return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication") - } - } - - bucket := s3obj.Bucket(params.Bucket) - - // TODO Currently multipart uploads have no timestamps, so this would be unwise - // if you initiated a new s3driver while another one is running on the same bucket. - // multis, _, err := bucket.ListMulti("", "") - // if err != nil { - // return nil, err - // } - - // for _, multi := range multis { - // err := multi.Abort() - // //TODO appropriate to do this error checking? - // if err != nil { - // return nil, err - // } - // } - - d := &driver{ - S3: s3obj, - Bucket: bucket, - ChunkSize: params.ChunkSize, - Encrypt: params.Encrypt, - RootDirectory: params.RootDirectory, - StorageClass: params.StorageClass, - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - content, err := d.Bucket.Get(d.s3Path(path)) - if err != nil { - return nil, parseError(path, err) - } - return content, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) -} - -// Reader retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - headers := make(http.Header) - headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") - - resp, err := d.Bucket.GetResponseWithHeaders(d.s3Path(path), headers) - if err != nil { - if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "InvalidRange" { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - return nil, parseError(path, err) - } - return resp.Body, nil -} - -// Writer returns a FileWriter which will store the content written to it -// at the location designated by "path" after the call to Commit. -func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { - key := d.s3Path(path) - if !append { - // TODO (brianbland): cancel other uploads at this path - multi, err := d.Bucket.InitMulti(key, d.getContentType(), getPermissions(), d.getOptions()) - if err != nil { - return nil, err - } - return d.newWriter(key, multi, nil), nil - } - multis, _, err := d.Bucket.ListMulti(key, "") - if err != nil { - return nil, parseError(path, err) - } - for _, multi := range multis { - if key != multi.Key { - continue - } - parts, err := multi.ListParts() - if err != nil { - return nil, parseError(path, err) - } - var multiSize int64 - for _, part := range parts { - multiSize += part.Size - } - return d.newWriter(key, multi, parts), nil - } - return nil, storagedriver.PathNotFoundError{Path: path} -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1) - if err != nil { - return nil, err - } - - fi := storagedriver.FileInfoFields{ - Path: path, - } - - if len(listResponse.Contents) == 1 { - if listResponse.Contents[0].Key != d.s3Path(path) { - fi.IsDir = true - } else { - fi.IsDir = false - fi.Size = listResponse.Contents[0].Size - - timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) - if err != nil { - return nil, err - } - fi.ModTime = timestamp - } - } else if len(listResponse.CommonPrefixes) == 1 { - fi.IsDir = true - } else { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, opath string) ([]string, error) { - path := opath - if path != "/" && path[len(path)-1] != '/' { - path = path + "/" - } - - // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". - // In those cases, there is no root prefix to replace and we must actually add a "/" to all - // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp - prefix := "" - if d.s3Path("") == "" { - prefix = "/" - } - - listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax) - if err != nil { - return nil, parseError(opath, err) - } - - files := []string{} - directories := []string{} - - for { - for _, key := range listResponse.Contents { - files = append(files, strings.Replace(key.Key, d.s3Path(""), prefix, 1)) - } - - for _, commonPrefix := range listResponse.CommonPrefixes { - directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) - } - - if listResponse.IsTruncated { - listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) - if err != nil { - return nil, err - } - } else { - break - } - } - - if opath != "/" { - if len(files) == 0 && len(directories) == 0 { - // Treat empty response as missing directory, since we don't actually - // have directories in s3. - return nil, storagedriver.PathNotFoundError{Path: opath} - } - } - - return append(files, directories...), nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - /* This is terrible, but aws doesn't have an actual move. */ - _, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(), - s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath)) - if err != nil { - return parseError(sourcePath, err) - } - - return d.Delete(ctx, sourcePath) -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - listResponse, err := d.Bucket.List(d.s3Path(path), "", "", listMax) - if err != nil || len(listResponse.Contents) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - - s3Objects := make([]s3.Object, listMax) - - for len(listResponse.Contents) > 0 { - for index, key := range listResponse.Contents { - s3Objects[index].Key = key.Key - } - - err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:len(listResponse.Contents)]}) - if err != nil { - return nil - } - - listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax) - if err != nil { - return err - } - } - - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - methodString := "GET" - method, ok := options["method"] - if ok { - methodString, ok = method.(string) - if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", storagedriver.ErrUnsupportedMethod{} - } - } - - expiresTime := time.Now().Add(20 * time.Minute) - expires, ok := options["expiry"] - if ok { - et, ok := expires.(time.Time) - if ok { - expiresTime = et - } - } - - return d.Bucket.SignedURLWithMethod(methodString, d.s3Path(path), expiresTime, nil, nil), nil -} - -func (d *driver) s3Path(path string) string { - return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") -} - -// S3BucketKey returns the s3 bucket key for the given storage driver path. -func (d *Driver) S3BucketKey(path string) string { - return d.StorageDriver.(*driver).s3Path(path) -} - -func parseError(path string, err error) error { - if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { - return storagedriver.PathNotFoundError{Path: path} - } - - return err -} - -func hasCode(err error, code string) bool { - s3err, ok := err.(*aws.Error) - return ok && s3err.Code == code -} - -func (d *driver) getOptions() s3.Options { - return s3.Options{ - SSE: d.Encrypt, - StorageClass: d.StorageClass, - } -} - -func getPermissions() s3.ACL { - return s3.Private -} - -func (d *driver) getContentType() string { - return "application/octet-stream" -} - -// writer attempts to upload parts to S3 in a buffered fashion where the last -// part is at least as large as the chunksize, so the multipart upload could be -// cleanly resumed in the future. This is violated if Close is called after less -// than a full chunk is written. -type writer struct { - driver *driver - key string - multi *s3.Multi - parts []s3.Part - size int64 - readyPart []byte - pendingPart []byte - closed bool - committed bool - cancelled bool -} - -func (d *driver) newWriter(key string, multi *s3.Multi, parts []s3.Part) storagedriver.FileWriter { - var size int64 - for _, part := range parts { - size += part.Size - } - return &writer{ - driver: d, - key: key, - multi: multi, - parts: parts, - size: size, - } -} - -func (w *writer) Write(p []byte) (int, error) { - if w.closed { - return 0, fmt.Errorf("already closed") - } else if w.committed { - return 0, fmt.Errorf("already committed") - } else if w.cancelled { - return 0, fmt.Errorf("already cancelled") - } - - // If the last written part is smaller than minChunkSize, we need to make a - // new multipart upload :sadface: - if len(w.parts) > 0 && int(w.parts[len(w.parts)-1].Size) < minChunkSize { - err := w.multi.Complete(w.parts) - if err != nil { - w.multi.Abort() - return 0, err - } - - multi, err := w.driver.Bucket.InitMulti(w.key, w.driver.getContentType(), getPermissions(), w.driver.getOptions()) - if err != nil { - return 0, err - } - w.multi = multi - - // If the entire written file is smaller than minChunkSize, we need to make - // a new part from scratch :double sad face: - if w.size < minChunkSize { - contents, err := w.driver.Bucket.Get(w.key) - if err != nil { - return 0, err - } - w.parts = nil - w.readyPart = contents - } else { - // Otherwise we can use the old file as the new first part - _, part, err := multi.PutPartCopy(1, s3.CopyOptions{}, w.driver.Bucket.Name+"/"+w.key) - if err != nil { - return 0, err - } - w.parts = []s3.Part{part} - } - } - - var n int - - for len(p) > 0 { - // If no parts are ready to write, fill up the first part - if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { - if len(p) >= neededBytes { - w.readyPart = append(w.readyPart, p[:neededBytes]...) - n += neededBytes - p = p[neededBytes:] - } else { - w.readyPart = append(w.readyPart, p...) - n += len(p) - p = nil - } - } - - if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { - if len(p) >= neededBytes { - w.pendingPart = append(w.pendingPart, p[:neededBytes]...) - n += neededBytes - p = p[neededBytes:] - err := w.flushPart() - if err != nil { - w.size += int64(n) - return n, err - } - } else { - w.pendingPart = append(w.pendingPart, p...) - n += len(p) - p = nil - } - } - } - w.size += int64(n) - return n, nil -} - -func (w *writer) Size() int64 { - return w.size -} - -func (w *writer) Close() error { - if w.closed { - return fmt.Errorf("already closed") - } - w.closed = true - return w.flushPart() -} - -func (w *writer) Cancel() error { - if w.closed { - return fmt.Errorf("already closed") - } else if w.committed { - return fmt.Errorf("already committed") - } - w.cancelled = true - err := w.multi.Abort() - return err -} - -func (w *writer) Commit() error { - if w.closed { - return fmt.Errorf("already closed") - } else if w.committed { - return fmt.Errorf("already committed") - } else if w.cancelled { - return fmt.Errorf("already cancelled") - } - err := w.flushPart() - if err != nil { - return err - } - w.committed = true - err = w.multi.Complete(w.parts) - if err != nil { - w.multi.Abort() - return err - } - return nil -} - -// flushPart flushes buffers to write a part to S3. -// Only called by Write (with both buffers full) and Close/Commit (always) -func (w *writer) flushPart() error { - if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { - // nothing to write - return nil - } - if len(w.pendingPart) < int(w.driver.ChunkSize) { - // closing with a small pending part - // combine ready and pending to avoid writing a small part - w.readyPart = append(w.readyPart, w.pendingPart...) - w.pendingPart = nil - } - - part, err := w.multi.PutPart(len(w.parts)+1, bytes.NewReader(w.readyPart)) - if err != nil { - return err - } - w.parts = append(w.parts, part) - w.readyPart = w.pendingPart - w.pendingPart = nil - return nil -} diff --git a/docs/storage/driver/s3-goamz/s3_test.go b/docs/storage/driver/s3-goamz/s3_test.go deleted file mode 100644 index 352ec3f5..00000000 --- a/docs/storage/driver/s3-goamz/s3_test.go +++ /dev/null @@ -1,201 +0,0 @@ -package s3 - -import ( - "io/ioutil" - "os" - "strconv" - "testing" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - "github.com/docker/goamz/aws" - "github.com/docker/goamz/s3" - - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -var s3DriverConstructor func(rootDirectory string, storageClass s3.StorageClass) (*Driver, error) -var skipS3 func() string - -func init() { - accessKey := os.Getenv("AWS_ACCESS_KEY") - secretKey := os.Getenv("AWS_SECRET_KEY") - bucket := os.Getenv("S3_BUCKET") - encrypt := os.Getenv("S3_ENCRYPT") - secure := os.Getenv("S3_SECURE") - v4auth := os.Getenv("S3_USE_V4_AUTH") - region := os.Getenv("AWS_REGION") - root, err := ioutil.TempDir("", "driver-") - if err != nil { - panic(err) - } - defer os.Remove(root) - - s3DriverConstructor = func(rootDirectory string, storageClass s3.StorageClass) (*Driver, error) { - encryptBool := false - if encrypt != "" { - encryptBool, err = strconv.ParseBool(encrypt) - if err != nil { - return nil, err - } - } - - secureBool := true - if secure != "" { - secureBool, err = strconv.ParseBool(secure) - if err != nil { - return nil, err - } - } - - v4AuthBool := false - if v4auth != "" { - v4AuthBool, err = strconv.ParseBool(v4auth) - if err != nil { - return nil, err - } - } - - parameters := DriverParameters{ - accessKey, - secretKey, - bucket, - aws.GetRegion(region), - encryptBool, - secureBool, - v4AuthBool, - minChunkSize, - rootDirectory, - storageClass, - driverName + "-test", - } - - return New(parameters) - } - - // Skip S3 storage driver tests if environment variable parameters are not provided - skipS3 = func() string { - if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { - return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" - } - return "" - } - - testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { - return s3DriverConstructor(root, s3.StandardStorage) - }, skipS3) -} - -func TestEmptyRootList(t *testing.T) { - if skipS3() != "" { - t.Skip(skipS3()) - } - - validRoot, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(validRoot) - - rootedDriver, err := s3DriverConstructor(validRoot, s3.StandardStorage) - if err != nil { - t.Fatalf("unexpected error creating rooted driver: %v", err) - } - - emptyRootDriver, err := s3DriverConstructor("", s3.StandardStorage) - if err != nil { - t.Fatalf("unexpected error creating empty root driver: %v", err) - } - - slashRootDriver, err := s3DriverConstructor("/", s3.StandardStorage) - if err != nil { - t.Fatalf("unexpected error creating slash root driver: %v", err) - } - - filename := "/test" - contents := []byte("contents") - ctx := context.Background() - err = rootedDriver.PutContent(ctx, filename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer rootedDriver.Delete(ctx, filename) - - keys, err := emptyRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } - - keys, err = slashRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } -} - -func TestStorageClass(t *testing.T) { - if skipS3() != "" { - t.Skip(skipS3()) - } - - rootDir, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(rootDir) - - standardDriver, err := s3DriverConstructor(rootDir, s3.StandardStorage) - if err != nil { - t.Fatalf("unexpected error creating driver with standard storage: %v", err) - } - - rrDriver, err := s3DriverConstructor(rootDir, s3.ReducedRedundancy) - if err != nil { - t.Fatalf("unexpected error creating driver with reduced redundancy storage: %v", err) - } - - standardFilename := "/test-standard" - rrFilename := "/test-rr" - contents := []byte("contents") - ctx := context.Background() - - err = standardDriver.PutContent(ctx, standardFilename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer standardDriver.Delete(ctx, standardFilename) - - err = rrDriver.PutContent(ctx, rrFilename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer rrDriver.Delete(ctx, rrFilename) - - standardDriverUnwrapped := standardDriver.Base.StorageDriver.(*driver) - resp, err := standardDriverUnwrapped.Bucket.GetResponse(standardDriverUnwrapped.s3Path(standardFilename)) - if err != nil { - t.Fatalf("unexpected error retrieving standard storage file: %v", err) - } - defer resp.Body.Close() - // Amazon only populates this header value for non-standard storage classes - if storageClass := resp.Header.Get("x-amz-storage-class"); storageClass != "" { - t.Fatalf("unexpected storage class for standard file: %v", storageClass) - } - - rrDriverUnwrapped := rrDriver.Base.StorageDriver.(*driver) - resp, err = rrDriverUnwrapped.Bucket.GetResponse(rrDriverUnwrapped.s3Path(rrFilename)) - if err != nil { - t.Fatalf("unexpected error retrieving reduced-redundancy storage file: %v", err) - } - defer resp.Body.Close() - if storageClass := resp.Header.Get("x-amz-storage-class"); storageClass != string(s3.ReducedRedundancy) { - t.Fatalf("unexpected storage class for reduced-redundancy file: %v", storageClass) - } -} diff --git a/docs/storage/driver/storagedriver.go b/docs/storage/driver/storagedriver.go deleted file mode 100644 index 548a17d8..00000000 --- a/docs/storage/driver/storagedriver.go +++ /dev/null @@ -1,165 +0,0 @@ -package driver - -import ( - "fmt" - "io" - "regexp" - "strconv" - "strings" - - "github.com/docker/distribution/context" -) - -// Version is a string representing the storage driver version, of the form -// Major.Minor. -// The registry must accept storage drivers with equal major version and greater -// minor version, but may not be compatible with older storage driver versions. -type Version string - -// Major returns the major (primary) component of a version. -func (version Version) Major() uint { - majorPart := strings.Split(string(version), ".")[0] - major, _ := strconv.ParseUint(majorPart, 10, 0) - return uint(major) -} - -// Minor returns the minor (secondary) component of a version. -func (version Version) Minor() uint { - minorPart := strings.Split(string(version), ".")[1] - minor, _ := strconv.ParseUint(minorPart, 10, 0) - return uint(minor) -} - -// CurrentVersion is the current storage driver Version. -const CurrentVersion Version = "0.1" - -// StorageDriver defines methods that a Storage Driver must implement for a -// filesystem-like key/value object storage. Storage Drivers are automatically -// registered via an internal registration mechanism, and generally created -// via the StorageDriverFactory interface (https://godoc.org/github.com/docker/distribution/registry/storage/driver/factory). -// Please see the aforementioned factory package for example code showing how to get an instance -// of a StorageDriver -type StorageDriver interface { - // Name returns the human-readable "name" of the driver, useful in error - // messages and logging. By convention, this will just be the registration - // name, but drivers may provide other information here. - Name() string - - // GetContent retrieves the content stored at "path" as a []byte. - // This should primarily be used for small objects. - GetContent(ctx context.Context, path string) ([]byte, error) - - // PutContent stores the []byte content at a location designated by "path". - // This should primarily be used for small objects. - PutContent(ctx context.Context, path string, content []byte) error - - // Reader retrieves an io.ReadCloser for the content stored at "path" - // with a given byte offset. - // May be used to resume reading a stream by providing a nonzero offset. - Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) - - // Writer returns a FileWriter which will store the content written to it - // at the location designated by "path" after the call to Commit. - Writer(ctx context.Context, path string, append bool) (FileWriter, error) - - // Stat retrieves the FileInfo for the given path, including the current - // size in bytes and the creation time. - Stat(ctx context.Context, path string) (FileInfo, error) - - // List returns a list of the objects that are direct descendants of the - //given path. - List(ctx context.Context, path string) ([]string, error) - - // Move moves an object stored at sourcePath to destPath, removing the - // original object. - // Note: This may be no more efficient than a copy followed by a delete for - // many implementations. - Move(ctx context.Context, sourcePath string, destPath string) error - - // Delete recursively deletes all objects stored at "path" and its subpaths. - Delete(ctx context.Context, path string) error - - // URLFor returns a URL which may be used to retrieve the content stored at - // the given path, possibly using the given options. - // May return an ErrUnsupportedMethod in certain StorageDriver - // implementations. - URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) -} - -// FileWriter provides an abstraction for an opened writable file-like object in -// the storage backend. The FileWriter must flush all content written to it on -// the call to Close, but is only required to make its content readable on a -// call to Commit. -type FileWriter interface { - io.WriteCloser - - // Size returns the number of bytes written to this FileWriter. - Size() int64 - - // Cancel removes any written content from this FileWriter. - Cancel() error - - // Commit flushes all content written to this FileWriter and makes it - // available for future calls to StorageDriver.GetContent and - // StorageDriver.Reader. - Commit() error -} - -// PathRegexp is the regular expression which each file path must match. A -// file path is absolute, beginning with a slash and containing a positive -// number of path components separated by slashes, where each component is -// restricted to alphanumeric characters or a period, underscore, or -// hyphen. -var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) - -// ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. -type ErrUnsupportedMethod struct { - DriverName string -} - -func (err ErrUnsupportedMethod) Error() string { - return fmt.Sprintf("%s: unsupported method", err.DriverName) -} - -// PathNotFoundError is returned when operating on a nonexistent path. -type PathNotFoundError struct { - Path string - DriverName string -} - -func (err PathNotFoundError) Error() string { - return fmt.Sprintf("%s: Path not found: %s", err.DriverName, err.Path) -} - -// InvalidPathError is returned when the provided path is malformed. -type InvalidPathError struct { - Path string - DriverName string -} - -func (err InvalidPathError) Error() string { - return fmt.Sprintf("%s: invalid path: %s", err.DriverName, err.Path) -} - -// InvalidOffsetError is returned when attempting to read or write from an -// invalid offset. -type InvalidOffsetError struct { - Path string - Offset int64 - DriverName string -} - -func (err InvalidOffsetError) Error() string { - return fmt.Sprintf("%s: invalid offset: %d for path: %s", err.DriverName, err.Offset, err.Path) -} - -// Error is a catch-all error type which captures an error string and -// the driver type on which it occurred. -type Error struct { - DriverName string - Enclosed error -} - -func (err Error) Error() string { - return fmt.Sprintf("%s: %s", err.DriverName, err.Enclosed) -} diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go deleted file mode 100644 index 4191b8ba..00000000 --- a/docs/storage/driver/swift/swift.go +++ /dev/null @@ -1,837 +0,0 @@ -// Package swift provides a storagedriver.StorageDriver implementation to -// store blobs in Openstack Swift object storage. -// -// This package leverages the ncw/swift client library for interfacing with -// Swift. -// -// It supports both TempAuth authentication and Keystone authentication -// (up to version 3). -// -// As Swift has a limit on the size of a single uploaded object (by default -// this is 5GB), the driver makes use of the Swift Large Object Support -// (http://docs.openstack.org/developer/swift/overview_large_objects.html). -// Only one container is used for both manifests and data objects. Manifests -// are stored in the 'files' pseudo directory, data objects are stored under -// 'segments'. -package swift - -import ( - "bufio" - "bytes" - "crypto/rand" - "crypto/sha1" - "crypto/tls" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/mitchellh/mapstructure" - "github.com/ncw/swift" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" - "github.com/docker/distribution/version" -) - -const driverName = "swift" - -// defaultChunkSize defines the default size of a segment -const defaultChunkSize = 20 * 1024 * 1024 - -// minChunkSize defines the minimum size of a segment -const minChunkSize = 1 << 20 - -// contentType defines the Content-Type header associated with stored segments -const contentType = "application/octet-stream" - -// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded -var readAfterWriteTimeout = 15 * time.Second - -// readAfterWriteWait defines the time to sleep between two retries -var readAfterWriteWait = 200 * time.Millisecond - -// Parameters A struct that encapsulates all of the driver parameters after all values have been set -type Parameters struct { - Username string - Password string - AuthURL string - Tenant string - TenantID string - Domain string - DomainID string - TrustID string - Region string - AuthVersion int - Container string - Prefix string - EndpointType string - InsecureSkipVerify bool - ChunkSize int - SecretKey string - AccessKey string - TempURLContainerKey bool - TempURLMethods []string -} - -// swiftInfo maps the JSON structure returned by Swift /info endpoint -type swiftInfo struct { - Swift struct { - Version string `mapstructure:"version"` - } - Tempurl struct { - Methods []string `mapstructure:"methods"` - } -} - -func init() { - factory.Register(driverName, &swiftDriverFactory{}) -} - -// swiftDriverFactory implements the factory.StorageDriverFactory interface -type swiftDriverFactory struct{} - -func (factory *swiftDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - Conn swift.Connection - Container string - Prefix string - BulkDeleteSupport bool - ChunkSize int - SecretKey string - AccessKey string - TempURLContainerKey bool - TempURLMethods []string -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Openstack Swift -// Objects are stored at absolute keys in the provided container. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - username -// - password -// - authurl -// - container -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - params := Parameters{ - ChunkSize: defaultChunkSize, - InsecureSkipVerify: false, - } - - if err := mapstructure.Decode(parameters, ¶ms); err != nil { - return nil, err - } - - if params.Username == "" { - return nil, fmt.Errorf("No username parameter provided") - } - - if params.Password == "" { - return nil, fmt.Errorf("No password parameter provided") - } - - if params.AuthURL == "" { - return nil, fmt.Errorf("No authurl parameter provided") - } - - if params.Container == "" { - return nil, fmt.Errorf("No container parameter provided") - } - - if params.ChunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize) - } - - return New(params) -} - -// New constructs a new Driver with the given Openstack Swift credentials and container name -func New(params Parameters) (*Driver, error) { - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - MaxIdleConnsPerHost: 2048, - TLSClientConfig: &tls.Config{InsecureSkipVerify: params.InsecureSkipVerify}, - } - - ct := swift.Connection{ - UserName: params.Username, - ApiKey: params.Password, - AuthUrl: params.AuthURL, - Region: params.Region, - AuthVersion: params.AuthVersion, - UserAgent: "distribution/" + version.Version, - Tenant: params.Tenant, - TenantId: params.TenantID, - Domain: params.Domain, - DomainId: params.DomainID, - TrustId: params.TrustID, - EndpointType: swift.EndpointType(params.EndpointType), - Transport: transport, - ConnectTimeout: 60 * time.Second, - Timeout: 15 * 60 * time.Second, - } - err := ct.Authenticate() - if err != nil { - return nil, fmt.Errorf("Swift authentication failed: %s", err) - } - - if _, _, err := ct.Container(params.Container); err == swift.ContainerNotFound { - if err := ct.ContainerCreate(params.Container, nil); err != nil { - return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) - } - } else if err != nil { - return nil, fmt.Errorf("Failed to retrieve info about container %s (%s)", params.Container, err) - } - - d := &driver{ - Conn: ct, - Container: params.Container, - Prefix: params.Prefix, - ChunkSize: params.ChunkSize, - TempURLMethods: make([]string, 0), - AccessKey: params.AccessKey, - } - - info := swiftInfo{} - if config, err := d.Conn.QueryInfo(); err == nil { - _, d.BulkDeleteSupport = config["bulk_delete"] - - if err := mapstructure.Decode(config, &info); err == nil { - d.TempURLContainerKey = info.Swift.Version >= "2.3.0" - d.TempURLMethods = info.Tempurl.Methods - } - } else { - d.TempURLContainerKey = params.TempURLContainerKey - d.TempURLMethods = params.TempURLMethods - } - - if len(d.TempURLMethods) > 0 { - secretKey := params.SecretKey - if secretKey == "" { - secretKey, _ = generateSecret() - } - - // Since Swift 2.2.2, we can now set secret keys on containers - // in addition to the account secret keys. Use them in preference. - if d.TempURLContainerKey { - _, containerHeaders, err := d.Conn.Container(d.Container) - if err != nil { - return nil, fmt.Errorf("Failed to fetch container info %s (%s)", d.Container, err) - } - - d.SecretKey = containerHeaders["X-Container-Meta-Temp-Url-Key"] - if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { - m := swift.Metadata{} - m["temp-url-key"] = secretKey - if d.Conn.ContainerUpdate(d.Container, m.ContainerHeaders()); err == nil { - d.SecretKey = secretKey - } - } - } else { - // Use the account secret key - _, accountHeaders, err := d.Conn.Account() - if err != nil { - return nil, fmt.Errorf("Failed to fetch account info (%s)", err) - } - - d.SecretKey = accountHeaders["X-Account-Meta-Temp-Url-Key"] - if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { - m := swift.Metadata{} - m["temp-url-key"] = secretKey - if err := d.Conn.AccountUpdate(m.AccountHeaders()); err == nil { - d.SecretKey = secretKey - } - } - } - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - content, err := d.Conn.ObjectGetBytes(d.Container, d.swiftPath(path)) - if err == swift.ObjectNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return content, err -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, contentType) - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err -} - -// Reader retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - headers := make(swift.Headers) - headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" - - file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) - if err == swift.ObjectNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - return file, err -} - -// Writer returns a FileWriter which will store the content written to it -// at the location designated by "path" after the call to Commit. -func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { - var ( - segments []swift.Object - segmentsPath string - err error - ) - - if !append { - segmentsPath, err = d.swiftSegmentPath(path) - if err != nil { - return nil, err - } - } else { - info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) - if err == swift.ObjectNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } else if err != nil { - return nil, err - } - manifest, ok := headers["X-Object-Manifest"] - if !ok { - segmentsPath, err = d.swiftSegmentPath(path) - if err != nil { - return nil, err - } - if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegmentPath(segmentsPath, len(segments))); err != nil { - return nil, err - } - segments = []swift.Object{info} - } else { - _, segmentsPath = parseManifest(manifest) - if segments, err = d.getAllSegments(segmentsPath); err != nil { - return nil, err - } - } - } - - return d.newWriter(path, segmentsPath, segments), nil -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - swiftPath := d.swiftPath(path) - opts := &swift.ObjectsOpts{ - Prefix: swiftPath, - Delimiter: '/', - } - - objects, err := d.Conn.ObjectsAll(d.Container, opts) - if err != nil { - if err == swift.ContainerNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return nil, err - } - - fi := storagedriver.FileInfoFields{ - Path: strings.TrimPrefix(strings.TrimSuffix(swiftPath, "/"), d.swiftPath("/")), - } - - for _, obj := range objects { - if obj.PseudoDirectory && obj.Name == swiftPath+"/" { - fi.IsDir = true - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil - } else if obj.Name == swiftPath { - // The file exists. But on Swift 1.12, the 'bytes' field is always 0 so - // we need to do a separate HEAD request. - break - } - } - - //Don't trust an empty `objects` slice. A container listing can be - //outdated. For files, we can make a HEAD request on the object which - //reports existence (at least) much more reliably. - info, _, err := d.Conn.Object(d.Container, swiftPath) - if err != nil { - if err == swift.ObjectNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return nil, err - } - fi.IsDir = false - fi.Size = info.Bytes - fi.ModTime = info.LastModified - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, path string) ([]string, error) { - var files []string - - prefix := d.swiftPath(path) - if prefix != "" { - prefix += "/" - } - - opts := &swift.ObjectsOpts{ - Prefix: prefix, - Delimiter: '/', - } - - objects, err := d.Conn.ObjectsAll(d.Container, opts) - for _, obj := range objects { - files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) - } - - if err == swift.ContainerNotFound || (len(objects) == 0 && path != "/") { - return files, storagedriver.PathNotFoundError{Path: path} - } - return files, err -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - _, headers, err := d.Conn.Object(d.Container, d.swiftPath(sourcePath)) - if err == nil { - if manifest, ok := headers["X-Object-Manifest"]; ok { - if err = d.createManifest(destPath, manifest); err != nil { - return err - } - err = d.Conn.ObjectDelete(d.Container, d.swiftPath(sourcePath)) - } else { - err = d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), d.Container, d.swiftPath(destPath)) - } - } - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: sourcePath} - } - return err -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - opts := swift.ObjectsOpts{ - Prefix: d.swiftPath(path) + "/", - } - - objects, err := d.Conn.ObjectsAll(d.Container, &opts) - if err != nil { - if err == swift.ContainerNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - - for _, obj := range objects { - if obj.PseudoDirectory { - continue - } - if _, headers, err := d.Conn.Object(d.Container, obj.Name); err == nil { - manifest, ok := headers["X-Object-Manifest"] - if ok { - _, prefix := parseManifest(manifest) - segments, err := d.getAllSegments(prefix) - if err != nil { - return err - } - objects = append(objects, segments...) - } - } else { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: obj.Name} - } - return err - } - } - - if d.BulkDeleteSupport && len(objects) > 0 { - filenames := make([]string, len(objects)) - for i, obj := range objects { - filenames[i] = obj.Name - } - _, err = d.Conn.BulkDelete(d.Container, filenames) - // Don't fail on ObjectNotFound because eventual consistency - // makes this situation normal. - if err != nil && err != swift.Forbidden && err != swift.ObjectNotFound { - if err == swift.ContainerNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - } else { - for _, obj := range objects { - if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: obj.Name} - } - return err - } - } - } - - _, _, err = d.Conn.Object(d.Container, d.swiftPath(path)) - if err == nil { - if err := d.Conn.ObjectDelete(d.Container, d.swiftPath(path)); err != nil { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - } else if err == swift.ObjectNotFound { - if len(objects) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - } else { - return err - } - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - if d.SecretKey == "" { - return "", storagedriver.ErrUnsupportedMethod{} - } - - methodString := "GET" - method, ok := options["method"] - if ok { - if methodString, ok = method.(string); !ok { - return "", storagedriver.ErrUnsupportedMethod{} - } - } - - if methodString == "HEAD" { - // A "HEAD" request on a temporary URL is allowed if the - // signature was generated with "GET", "POST" or "PUT" - methodString = "GET" - } - - supported := false - for _, method := range d.TempURLMethods { - if method == methodString { - supported = true - break - } - } - - if !supported { - return "", storagedriver.ErrUnsupportedMethod{} - } - - expiresTime := time.Now().Add(20 * time.Minute) - expires, ok := options["expiry"] - if ok { - et, ok := expires.(time.Time) - if ok { - expiresTime = et - } - } - - tempURL := d.Conn.ObjectTempUrl(d.Container, d.swiftPath(path), d.SecretKey, methodString, expiresTime) - - if d.AccessKey != "" { - // On HP Cloud, the signature must be in the form of tenant_id:access_key:signature - url, _ := url.Parse(tempURL) - query := url.Query() - query.Set("temp_url_sig", fmt.Sprintf("%s:%s:%s", d.Conn.TenantId, d.AccessKey, query.Get("temp_url_sig"))) - url.RawQuery = query.Encode() - tempURL = url.String() - } - - return tempURL, nil -} - -func (d *driver) swiftPath(path string) string { - return strings.TrimLeft(strings.TrimRight(d.Prefix+"/files"+path, "/"), "/") -} - -func (d *driver) swiftSegmentPath(path string) (string, error) { - checksum := sha1.New() - random := make([]byte, 32) - if _, err := rand.Read(random); err != nil { - return "", err - } - path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) - return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil -} - -func (d *driver) getAllSegments(path string) ([]swift.Object, error) { - //a simple container listing works 99.9% of the time - segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) - if err != nil { - if err == swift.ContainerNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return nil, err - } - - //build a lookup table by object name - hasObjectName := make(map[string]struct{}) - for _, segment := range segments { - hasObjectName[segment.Name] = struct{}{} - } - - //The container listing might be outdated (i.e. not contain all existing - //segment objects yet) because of temporary inconsistency (Swift is only - //eventually consistent!). Check its completeness. - segmentNumber := 0 - for { - segmentNumber++ - segmentPath := getSegmentPath(path, segmentNumber) - - if _, seen := hasObjectName[segmentPath]; seen { - continue - } - - //This segment is missing in the container listing. Use a more reliable - //request to check its existence. (HEAD requests on segments are - //guaranteed to return the correct metadata, except for the pathological - //case of an outage of large parts of the Swift cluster or its network, - //since every segment is only written once.) - segment, _, err := d.Conn.Object(d.Container, segmentPath) - switch err { - case nil: - //found new segment -> keep going, more might be missing - segments = append(segments, segment) - continue - case swift.ObjectNotFound: - //This segment is missing. Since we upload segments sequentially, - //there won't be any more segments after it. - return segments, nil - default: - return nil, err //unexpected error - } - } -} - -func (d *driver) createManifest(path string, segments string) error { - headers := make(swift.Headers) - headers["X-Object-Manifest"] = segments - manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", contentType, headers) - if err != nil { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - if err := manifest.Close(); err != nil { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - return nil -} - -func parseManifest(manifest string) (container string, prefix string) { - components := strings.SplitN(manifest, "/", 2) - container = components[0] - if len(components) > 1 { - prefix = components[1] - } - return container, prefix -} - -func generateSecret() (string, error) { - var secretBytes [32]byte - if _, err := rand.Read(secretBytes[:]); err != nil { - return "", fmt.Errorf("could not generate random bytes for Swift secret key: %v", err) - } - return hex.EncodeToString(secretBytes[:]), nil -} - -func getSegmentPath(segmentsPath string, partNumber int) string { - return fmt.Sprintf("%s/%016d", segmentsPath, partNumber) -} - -type writer struct { - driver *driver - path string - segmentsPath string - size int64 - bw *bufio.Writer - closed bool - committed bool - cancelled bool -} - -func (d *driver) newWriter(path, segmentsPath string, segments []swift.Object) storagedriver.FileWriter { - var size int64 - for _, segment := range segments { - size += segment.Bytes - } - return &writer{ - driver: d, - path: path, - segmentsPath: segmentsPath, - size: size, - bw: bufio.NewWriterSize(&segmentWriter{ - conn: d.Conn, - container: d.Container, - segmentsPath: segmentsPath, - segmentNumber: len(segments) + 1, - maxChunkSize: d.ChunkSize, - }, d.ChunkSize), - } -} - -func (w *writer) Write(p []byte) (int, error) { - if w.closed { - return 0, fmt.Errorf("already closed") - } else if w.committed { - return 0, fmt.Errorf("already committed") - } else if w.cancelled { - return 0, fmt.Errorf("already cancelled") - } - - n, err := w.bw.Write(p) - w.size += int64(n) - return n, err -} - -func (w *writer) Size() int64 { - return w.size -} - -func (w *writer) Close() error { - if w.closed { - return fmt.Errorf("already closed") - } - - if err := w.bw.Flush(); err != nil { - return err - } - - if !w.committed && !w.cancelled { - if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil { - return err - } - if err := w.waitForSegmentsToShowUp(); err != nil { - return err - } - } - w.closed = true - - return nil -} - -func (w *writer) Cancel() error { - if w.closed { - return fmt.Errorf("already closed") - } else if w.committed { - return fmt.Errorf("already committed") - } - w.cancelled = true - return w.driver.Delete(context.Background(), w.path) -} - -func (w *writer) Commit() error { - if w.closed { - return fmt.Errorf("already closed") - } else if w.committed { - return fmt.Errorf("already committed") - } else if w.cancelled { - return fmt.Errorf("already cancelled") - } - - if err := w.bw.Flush(); err != nil { - return err - } - - if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil { - return err - } - - w.committed = true - return w.waitForSegmentsToShowUp() -} - -func (w *writer) waitForSegmentsToShowUp() error { - var err error - waitingTime := readAfterWriteWait - endTime := time.Now().Add(readAfterWriteTimeout) - - for { - var info swift.Object - if info, _, err = w.driver.Conn.Object(w.driver.Container, w.driver.swiftPath(w.path)); err == nil { - if info.Bytes == w.size { - break - } - err = fmt.Errorf("Timeout expired while waiting for segments of %s to show up", w.path) - } - if time.Now().Add(waitingTime).After(endTime) { - break - } - time.Sleep(waitingTime) - waitingTime *= 2 - } - - return err -} - -type segmentWriter struct { - conn swift.Connection - container string - segmentsPath string - segmentNumber int - maxChunkSize int -} - -func (sw *segmentWriter) Write(p []byte) (int, error) { - n := 0 - for offset := 0; offset < len(p); offset += sw.maxChunkSize { - chunkSize := sw.maxChunkSize - if offset+chunkSize > len(p) { - chunkSize = len(p) - offset - } - _, err := sw.conn.ObjectPut(sw.container, getSegmentPath(sw.segmentsPath, sw.segmentNumber), bytes.NewReader(p[offset:offset+chunkSize]), false, "", contentType, nil) - if err != nil { - return n, err - } - - sw.segmentNumber++ - n += chunkSize - } - - return n, nil -} diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go deleted file mode 100644 index 8979bd33..00000000 --- a/docs/storage/driver/swift/swift_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package swift - -import ( - "io/ioutil" - "os" - "strconv" - "strings" - "testing" - - "github.com/ncw/swift/swifttest" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -var swiftDriverConstructor func(prefix string) (*Driver, error) - -func init() { - var ( - username string - password string - authURL string - tenant string - tenantID string - domain string - domainID string - trustID string - container string - region string - AuthVersion int - endpointType string - insecureSkipVerify bool - secretKey string - accessKey string - containerKey bool - tempURLMethods []string - - swiftServer *swifttest.SwiftServer - err error - ) - username = os.Getenv("SWIFT_USERNAME") - password = os.Getenv("SWIFT_PASSWORD") - authURL = os.Getenv("SWIFT_AUTH_URL") - tenant = os.Getenv("SWIFT_TENANT_NAME") - tenantID = os.Getenv("SWIFT_TENANT_ID") - domain = os.Getenv("SWIFT_DOMAIN_NAME") - domainID = os.Getenv("SWIFT_DOMAIN_ID") - trustID = os.Getenv("SWIFT_TRUST_ID") - container = os.Getenv("SWIFT_CONTAINER_NAME") - region = os.Getenv("SWIFT_REGION_NAME") - AuthVersion, _ = strconv.Atoi(os.Getenv("SWIFT_AUTH_VERSION")) - endpointType = os.Getenv("SWIFT_ENDPOINT_TYPE") - insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) - secretKey = os.Getenv("SWIFT_SECRET_KEY") - accessKey = os.Getenv("SWIFT_ACCESS_KEY") - containerKey, _ = strconv.ParseBool(os.Getenv("SWIFT_TEMPURL_CONTAINERKEY")) - tempURLMethods = strings.Split(os.Getenv("SWIFT_TEMPURL_METHODS"), ",") - - if username == "" || password == "" || authURL == "" || container == "" { - if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { - panic(err) - } - username = "swifttest" - password = "swifttest" - authURL = swiftServer.AuthURL - container = "test" - } - - prefix, err := ioutil.TempDir("", "driver-") - if err != nil { - panic(err) - } - defer os.Remove(prefix) - - swiftDriverConstructor = func(root string) (*Driver, error) { - parameters := Parameters{ - username, - password, - authURL, - tenant, - tenantID, - domain, - domainID, - trustID, - region, - AuthVersion, - container, - root, - endpointType, - insecureSkipVerify, - defaultChunkSize, - secretKey, - accessKey, - containerKey, - tempURLMethods, - } - - return New(parameters) - } - - driverConstructor := func() (storagedriver.StorageDriver, error) { - return swiftDriverConstructor(prefix) - } - - testsuites.RegisterSuite(driverConstructor, testsuites.NeverSkip) -} - -func TestEmptyRootList(t *testing.T) { - validRoot, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(validRoot) - - rootedDriver, err := swiftDriverConstructor(validRoot) - if err != nil { - t.Fatalf("unexpected error creating rooted driver: %v", err) - } - - emptyRootDriver, err := swiftDriverConstructor("") - if err != nil { - t.Fatalf("unexpected error creating empty root driver: %v", err) - } - - slashRootDriver, err := swiftDriverConstructor("/") - if err != nil { - t.Fatalf("unexpected error creating slash root driver: %v", err) - } - - filename := "/test" - contents := []byte("contents") - ctx := context.Background() - err = rootedDriver.PutContent(ctx, filename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - - keys, err := emptyRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } - - keys, err = slashRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } - - // Create an object with a path nested under the existing object - err = rootedDriver.PutContent(ctx, filename+"/file1", contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - - err = rootedDriver.Delete(ctx, filename) - if err != nil { - t.Fatalf("failed to delete: %v", err) - } - - keys, err = rootedDriver.List(ctx, "/") - if err != nil { - t.Fatalf("failed to list objects after deletion: %v", err) - } - - if len(keys) != 0 { - t.Fatal("delete did not remove nested objects") - } -} diff --git a/docs/storage/driver/testdriver/testdriver.go b/docs/storage/driver/testdriver/testdriver.go deleted file mode 100644 index 988e5d33..00000000 --- a/docs/storage/driver/testdriver/testdriver.go +++ /dev/null @@ -1,71 +0,0 @@ -package testdriver - -import ( - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/factory" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -const driverName = "testdriver" - -func init() { - factory.Register(driverName, &testDriverFactory{}) -} - -// testDriverFactory implements the factory.StorageDriverFactory interface. -type testDriverFactory struct{} - -func (factory *testDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return New(), nil -} - -// TestDriver is a StorageDriver for testing purposes. The Writer returned by this driver -// simulates the case where Write operations are buffered. This causes the value returned by Size to lag -// behind until Close (or Commit, or Cancel) is called. -type TestDriver struct { - storagedriver.StorageDriver -} - -type testFileWriter struct { - storagedriver.FileWriter - prevchunk []byte -} - -var _ storagedriver.StorageDriver = &TestDriver{} - -// New constructs a new StorageDriver for testing purposes. The Writer returned by this driver -// simulates the case where Write operations are buffered. This causes the value returned by Size to lag -// behind until Close (or Commit, or Cancel) is called. -func New() *TestDriver { - return &TestDriver{StorageDriver: inmemory.New()} -} - -// Writer returns a FileWriter which will store the content written to it -// at the location designated by "path" after the call to Commit. -func (td *TestDriver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { - fw, err := td.StorageDriver.Writer(ctx, path, append) - return &testFileWriter{FileWriter: fw}, err -} - -func (tfw *testFileWriter) Write(p []byte) (int, error) { - _, err := tfw.FileWriter.Write(tfw.prevchunk) - tfw.prevchunk = make([]byte, len(p)) - copy(tfw.prevchunk, p) - return len(p), err -} - -func (tfw *testFileWriter) Close() error { - tfw.Write(nil) - return tfw.FileWriter.Close() -} - -func (tfw *testFileWriter) Cancel() error { - tfw.Write(nil) - return tfw.FileWriter.Cancel() -} - -func (tfw *testFileWriter) Commit() error { - tfw.Write(nil) - return tfw.FileWriter.Commit() -} diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go deleted file mode 100644 index de8e3143..00000000 --- a/docs/storage/driver/testsuites/testsuites.go +++ /dev/null @@ -1,1229 +0,0 @@ -package testsuites - -import ( - "bytes" - "crypto/sha1" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "path" - "sort" - "strings" - "sync" - "testing" - "time" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "gopkg.in/check.v1" -) - -// Test hooks up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -// RegisterSuite registers an in-process storage driver test suite with -// the go test runner. -func RegisterSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { - check.Suite(&DriverSuite{ - Constructor: driverConstructor, - SkipCheck: skipCheck, - ctx: context.Background(), - }) -} - -// SkipCheck is a function used to determine if a test suite should be skipped. -// If a SkipCheck returns a non-empty skip reason, the suite is skipped with -// the given reason. -type SkipCheck func() (reason string) - -// NeverSkip is a default SkipCheck which never skips the suite. -var NeverSkip SkipCheck = func() string { return "" } - -// DriverConstructor is a function which returns a new -// storagedriver.StorageDriver. -type DriverConstructor func() (storagedriver.StorageDriver, error) - -// DriverTeardown is a function which cleans up a suite's -// storagedriver.StorageDriver. -type DriverTeardown func() error - -// DriverSuite is a gocheck test suite designed to test a -// storagedriver.StorageDriver. The intended way to create a DriverSuite is -// with RegisterSuite. -type DriverSuite struct { - Constructor DriverConstructor - Teardown DriverTeardown - SkipCheck - storagedriver.StorageDriver - ctx context.Context -} - -// SetUpSuite sets up the gocheck test suite. -func (suite *DriverSuite) SetUpSuite(c *check.C) { - if reason := suite.SkipCheck(); reason != "" { - c.Skip(reason) - } - d, err := suite.Constructor() - c.Assert(err, check.IsNil) - suite.StorageDriver = d -} - -// TearDownSuite tears down the gocheck test suite. -func (suite *DriverSuite) TearDownSuite(c *check.C) { - if suite.Teardown != nil { - err := suite.Teardown() - c.Assert(err, check.IsNil) - } -} - -// TearDownTest tears down the gocheck test. -// This causes the suite to abort if any files are left around in the storage -// driver. -func (suite *DriverSuite) TearDownTest(c *check.C) { - files, _ := suite.StorageDriver.List(suite.ctx, "/") - if len(files) > 0 { - c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) - } -} - -// TestRootExists ensures that all storage drivers have a root path by default. -func (suite *DriverSuite) TestRootExists(c *check.C) { - _, err := suite.StorageDriver.List(suite.ctx, "/") - if err != nil { - c.Fatalf(`the root path "/" should always exist: %v`, err) - } -} - -// TestValidPaths checks that various valid file paths are accepted by the -// storage driver. -func (suite *DriverSuite) TestValidPaths(c *check.C) { - contents := randomContents(64) - validFiles := []string{ - "/a", - "/2", - "/aa", - "/a.a", - "/0-9/abcdefg", - "/abcdefg/z.75", - "/abc/1.2.3.4.5-6_zyx/123.z/4", - "/docker/docker-registry", - "/123.abc", - "/abc./abc", - "/.abc", - "/a--b", - "/a-.b", - "/_.abc", - "/Docker/docker-registry", - "/Abc/Cba"} - - for _, filename := range validFiles { - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - defer suite.deletePath(c, firstPart(filename)) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, contents) - } -} - -func (suite *DriverSuite) deletePath(c *check.C, path string) { - for tries := 2; tries > 0; tries-- { - err := suite.StorageDriver.Delete(suite.ctx, path) - if _, ok := err.(storagedriver.PathNotFoundError); ok { - err = nil - } - c.Assert(err, check.IsNil) - paths, err := suite.StorageDriver.List(suite.ctx, path) - if len(paths) == 0 { - break - } - time.Sleep(time.Second * 2) - } -} - -// TestInvalidPaths checks that various invalid file paths are rejected by the -// storage driver. -func (suite *DriverSuite) TestInvalidPaths(c *check.C) { - contents := randomContents(64) - invalidFiles := []string{ - "", - "/", - "abc", - "123.abc", - "//bcd", - "/abc_123/"} - - for _, filename := range invalidFiles { - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - // only delete if file was successfully written - if err == nil { - defer suite.deletePath(c, firstPart(filename)) - } - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - } -} - -// TestWriteRead1 tests a simple write-read workflow. -func (suite *DriverSuite) TestWriteRead1(c *check.C) { - filename := randomPath(32) - contents := []byte("a") - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteRead2 tests a simple write-read workflow with unicode data. -func (suite *DriverSuite) TestWriteRead2(c *check.C) { - filename := randomPath(32) - contents := []byte("\xc3\x9f") - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteRead3 tests a simple write-read workflow with a small string. -func (suite *DriverSuite) TestWriteRead3(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteRead4 tests a simple write-read workflow with 1MB of data. -func (suite *DriverSuite) TestWriteRead4(c *check.C) { - filename := randomPath(32) - contents := randomContents(1024 * 1024) - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteReadNonUTF8 tests that non-utf8 data may be written to the storage -// driver safely. -func (suite *DriverSuite) TestWriteReadNonUTF8(c *check.C) { - filename := randomPath(32) - contents := []byte{0x80, 0x80, 0x80, 0x80} - suite.writeReadCompare(c, filename, contents) -} - -// TestTruncate tests that putting smaller contents than an original file does -// remove the excess contents. -func (suite *DriverSuite) TestTruncate(c *check.C) { - filename := randomPath(32) - contents := randomContents(1024 * 1024) - suite.writeReadCompare(c, filename, contents) - - contents = randomContents(1024) - suite.writeReadCompare(c, filename, contents) -} - -// TestReadNonexistent tests reading content from an empty path. -func (suite *DriverSuite) TestReadNonexistent(c *check.C) { - filename := randomPath(32) - _, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestWriteReadStreams1 tests a simple write-read streaming workflow. -func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { - filename := randomPath(32) - contents := []byte("a") - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreams2 tests a simple write-read streaming workflow with -// unicode data. -func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { - filename := randomPath(32) - contents := []byte("\xc3\x9f") - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreams3 tests a simple write-read streaming workflow with a -// small amount of data. -func (suite *DriverSuite) TestWriteReadStreams3(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB -// of data. -func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { - filename := randomPath(32) - contents := randomContents(1024 * 1024) - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreamsNonUTF8 tests that non-utf8 data may be written to the -// storage driver safely. -func (suite *DriverSuite) TestWriteReadStreamsNonUTF8(c *check.C) { - filename := randomPath(32) - contents := []byte{0x80, 0x80, 0x80, 0x80} - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadLargeStreams tests that a 5GB file may be written to the storage -// driver safely. -func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { - if testing.Short() { - c.Skip("Skipping test in short mode") - } - - filename := randomPath(32) - defer suite.deletePath(c, firstPart(filename)) - - checksum := sha1.New() - var fileSize int64 = 5 * 1024 * 1024 * 1024 - - contents := newRandReader(fileSize) - - writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) - c.Assert(err, check.IsNil) - written, err := io.Copy(writer, io.TeeReader(contents, checksum)) - c.Assert(err, check.IsNil) - c.Assert(written, check.Equals, fileSize) - - err = writer.Commit() - c.Assert(err, check.IsNil) - err = writer.Close() - c.Assert(err, check.IsNil) - - reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - writtenChecksum := sha1.New() - io.Copy(writtenChecksum, reader) - - c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil)) -} - -// TestReaderWithOffset tests that the appropriate data is streamed when -// reading with a given offset. -func (suite *DriverSuite) TestReaderWithOffset(c *check.C) { - filename := randomPath(32) - defer suite.deletePath(c, firstPart(filename)) - - chunkSize := int64(32) - - contentsChunk1 := randomContents(chunkSize) - contentsChunk2 := randomContents(chunkSize) - contentsChunk3 := randomContents(chunkSize) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) - c.Assert(err, check.IsNil) - - reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) - - reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err = ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) - - reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*2) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err = ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contentsChunk3) - - // Ensure we get invalid offest for negative offsets. - reader, err = suite.StorageDriver.Reader(suite.ctx, filename, -1) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) - c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) - c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) - c.Assert(reader, check.IsNil) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - // Read past the end of the content and make sure we get a reader that - // returns 0 bytes and io.EOF - reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*3) - c.Assert(err, check.IsNil) - defer reader.Close() - - buf := make([]byte, chunkSize) - n, err := reader.Read(buf) - c.Assert(err, check.Equals, io.EOF) - c.Assert(n, check.Equals, 0) - - // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. - reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*3-1) - c.Assert(err, check.IsNil) - defer reader.Close() - - n, err = reader.Read(buf) - c.Assert(n, check.Equals, 1) - - // We don't care whether the io.EOF comes on the this read or the first - // zero read, but the only error acceptable here is io.EOF. - if err != nil { - c.Assert(err, check.Equals, io.EOF) - } - - // Any more reads should result in zero bytes and io.EOF - n, err = reader.Read(buf) - c.Assert(n, check.Equals, 0) - c.Assert(err, check.Equals, io.EOF) -} - -// TestContinueStreamAppendLarge tests that a stream write can be appended to without -// corrupting the data with a large chunk size. -func (suite *DriverSuite) TestContinueStreamAppendLarge(c *check.C) { - suite.testContinueStreamAppend(c, int64(10*1024*1024)) -} - -// TestContinueStreamAppendSmall is the same as TestContinueStreamAppendLarge, but only -// with a tiny chunk size in order to test corner cases for some cloud storage drivers. -func (suite *DriverSuite) TestContinueStreamAppendSmall(c *check.C) { - suite.testContinueStreamAppend(c, int64(32)) -} - -func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) { - filename := randomPath(32) - defer suite.deletePath(c, firstPart(filename)) - - contentsChunk1 := randomContents(chunkSize) - contentsChunk2 := randomContents(chunkSize) - contentsChunk3 := randomContents(chunkSize) - - fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) - - writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) - c.Assert(err, check.IsNil) - nn, err := io.Copy(writer, bytes.NewReader(contentsChunk1)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contentsChunk1))) - - err = writer.Close() - c.Assert(err, check.IsNil) - - curSize := writer.Size() - c.Assert(curSize, check.Equals, int64(len(contentsChunk1))) - - writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true) - c.Assert(err, check.IsNil) - c.Assert(writer.Size(), check.Equals, curSize) - - nn, err = io.Copy(writer, bytes.NewReader(contentsChunk2)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - - err = writer.Close() - c.Assert(err, check.IsNil) - - curSize = writer.Size() - c.Assert(curSize, check.Equals, 2*chunkSize) - - writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true) - c.Assert(err, check.IsNil) - c.Assert(writer.Size(), check.Equals, curSize) - - nn, err = io.Copy(writer, bytes.NewReader(fullContents[curSize:])) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(fullContents[curSize:]))) - - err = writer.Commit() - c.Assert(err, check.IsNil) - err = writer.Close() - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, fullContents) -} - -// TestReadNonexistentStream tests that reading a stream for a nonexistent path -// fails. -func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { - filename := randomPath(32) - - _, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.Reader(suite.ctx, filename, 64) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestList checks the returned list of keys after populating a directory tree. -func (suite *DriverSuite) TestList(c *check.C) { - rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) - defer suite.deletePath(c, rootDirectory) - - doesnotexist := path.Join(rootDirectory, "nonexistent") - _, err := suite.StorageDriver.List(suite.ctx, doesnotexist) - c.Assert(err, check.Equals, storagedriver.PathNotFoundError{ - Path: doesnotexist, - DriverName: suite.StorageDriver.Name(), - }) - - parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) - childFiles := make([]string, 50) - for i := 0; i < len(childFiles); i++ { - childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) - childFiles[i] = childFile - err := suite.StorageDriver.PutContent(suite.ctx, childFile, randomContents(32)) - c.Assert(err, check.IsNil) - } - sort.Strings(childFiles) - - keys, err := suite.StorageDriver.List(suite.ctx, "/") - c.Assert(err, check.IsNil) - c.Assert(keys, check.DeepEquals, []string{rootDirectory}) - - keys, err = suite.StorageDriver.List(suite.ctx, rootDirectory) - c.Assert(err, check.IsNil) - c.Assert(keys, check.DeepEquals, []string{parentDirectory}) - - keys, err = suite.StorageDriver.List(suite.ctx, parentDirectory) - c.Assert(err, check.IsNil) - - sort.Strings(keys) - c.Assert(keys, check.DeepEquals, childFiles) - - // A few checks to add here (check out #819 for more discussion on this): - // 1. Ensure that all paths are absolute. - // 2. Ensure that listings only include direct children. - // 3. Ensure that we only respond to directory listings that end with a slash (maybe?). -} - -// TestMove checks that a moved object no longer exists at the source path and -// does exist at the destination. -func (suite *DriverSuite) TestMove(c *check.C) { - contents := randomContents(32) - sourcePath := randomPath(32) - destPath := randomPath(32) - - defer suite.deletePath(c, firstPart(sourcePath)) - defer suite.deletePath(c, firstPart(destPath)) - - err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, contents) - - _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestMoveOverwrite checks that a moved object no longer exists at the source -// path and overwrites the contents at the destination. -func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { - sourcePath := randomPath(32) - destPath := randomPath(32) - sourceContents := randomContents(32) - destContents := randomContents(64) - - defer suite.deletePath(c, firstPart(sourcePath)) - defer suite.deletePath(c, firstPart(destPath)) - - err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, sourceContents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, destPath, destContents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, sourceContents) - - _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestMoveNonexistent checks that moving a nonexistent key fails and does not -// delete the data at the destination path. -func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { - contents := randomContents(32) - sourcePath := randomPath(32) - destPath := randomPath(32) - - defer suite.deletePath(c, firstPart(destPath)) - - err := suite.StorageDriver.PutContent(suite.ctx, destPath, contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, contents) -} - -// TestMoveInvalid provides various checks for invalid moves. -func (suite *DriverSuite) TestMoveInvalid(c *check.C) { - contents := randomContents(32) - - // Create a regular file. - err := suite.StorageDriver.PutContent(suite.ctx, "/notadir", contents) - c.Assert(err, check.IsNil) - defer suite.deletePath(c, "/notadir") - - // Now try to move a non-existent file under it. - err = suite.StorageDriver.Move(suite.ctx, "/notadir/foo", "/notadir/bar") - c.Assert(err, check.NotNil) // non-nil error -} - -// TestDelete checks that the delete operation removes data from the storage -// driver -func (suite *DriverSuite) TestDelete(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - - defer suite.deletePath(c, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(suite.ctx, filename) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestURLFor checks that the URLFor method functions properly, but only if it -// is implemented -func (suite *DriverSuite) TestURLFor(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - - defer suite.deletePath(c, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - url, err := suite.StorageDriver.URLFor(suite.ctx, filename, nil) - if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { - return - } - c.Assert(err, check.IsNil) - - response, err := http.Get(url) - c.Assert(err, check.IsNil) - defer response.Body.Close() - - read, err := ioutil.ReadAll(response.Body) - c.Assert(err, check.IsNil) - c.Assert(read, check.DeepEquals, contents) - - url, err = suite.StorageDriver.URLFor(suite.ctx, filename, map[string]interface{}{"method": "HEAD"}) - if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { - return - } - c.Assert(err, check.IsNil) - - response, err = http.Head(url) - c.Assert(response.StatusCode, check.Equals, 200) - c.Assert(response.ContentLength, check.Equals, int64(32)) -} - -// TestDeleteNonexistent checks that removing a nonexistent key fails. -func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { - filename := randomPath(32) - err := suite.StorageDriver.Delete(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestDeleteFolder checks that deleting a folder removes all child elements. -func (suite *DriverSuite) TestDeleteFolder(c *check.C) { - dirname := randomPath(32) - filename1 := randomPath(32) - filename2 := randomPath(32) - filename3 := randomPath(32) - contents := randomContents(32) - - defer suite.deletePath(c, firstPart(dirname)) - - err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename1), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename2), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename3), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename1)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(suite.ctx, dirname) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestStatCall runs verifies the implementation of the storagedriver's Stat call. -func (suite *DriverSuite) TestStatCall(c *check.C) { - content := randomContents(4096) - dirPath := randomPath(32) - fileName := randomFilename(32) - filePath := path.Join(dirPath, fileName) - - defer suite.deletePath(c, firstPart(dirPath)) - - // Call on non-existent file/dir, check error. - fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - c.Assert(fi, check.IsNil) - - fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - c.Assert(fi, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) - c.Assert(err, check.IsNil) - - // Call on regular file, check results - fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Path(), check.Equals, filePath) - c.Assert(fi.Size(), check.Equals, int64(len(content))) - c.Assert(fi.IsDir(), check.Equals, false) - createdTime := fi.ModTime() - - // Sleep and modify the file - time.Sleep(time.Second * 10) - content = randomContents(4096) - err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) - c.Assert(err, check.IsNil) - fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency) - - // Check if the modification time is after the creation time. - // In case of cloud storage services, storage frontend nodes might have - // time drift between them, however that should be solved with sleeping - // before update. - modTime := fi.ModTime() - if !modTime.After(createdTime) { - c.Errorf("modtime (%s) is before the creation time (%s)", modTime, createdTime) - } - - // Call on directory (do not check ModTime as dirs don't need to support it) - fi, err = suite.StorageDriver.Stat(suite.ctx, dirPath) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Path(), check.Equals, dirPath) - c.Assert(fi.Size(), check.Equals, int64(0)) - c.Assert(fi.IsDir(), check.Equals, true) -} - -// TestPutContentMultipleTimes checks that if storage driver can overwrite the content -// in the subsequent puts. Validates that PutContent does not have to work -// with an offset like Writer does and overwrites the file entirely -// rather than writing the data to the [0,len(data)) of the file. -func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { - filename := randomPath(32) - contents := randomContents(4096) - - defer suite.deletePath(c, firstPart(filename)) - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - contents = randomContents(2048) // upload a different, smaller file - err = suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contents) -} - -// TestConcurrentStreamReads checks that multiple clients can safely read from -// the same file simultaneously with various offsets. -func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { - var filesize int64 = 128 * 1024 * 1024 - - if testing.Short() { - filesize = 10 * 1024 * 1024 - c.Log("Reducing file size to 10MB for short mode") - } - - filename := randomPath(32) - contents := randomContents(filesize) - - defer suite.deletePath(c, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - var wg sync.WaitGroup - - readContents := func() { - defer wg.Done() - offset := rand.Int63n(int64(len(contents))) - reader, err := suite.StorageDriver.Reader(suite.ctx, filename, offset) - c.Assert(err, check.IsNil) - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contents[offset:]) - } - - wg.Add(10) - for i := 0; i < 10; i++ { - go readContents() - } - wg.Wait() -} - -// TestConcurrentFileStreams checks that multiple *os.File objects can be passed -// in to Writer concurrently without hanging. -func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { - numStreams := 32 - - if testing.Short() { - numStreams = 8 - c.Log("Reducing number of streams to 8 for short mode") - } - - var wg sync.WaitGroup - - testStream := func(size int64) { - defer wg.Done() - suite.testFileStreams(c, size) - } - - wg.Add(numStreams) - for i := numStreams; i > 0; i-- { - go testStream(int64(numStreams) * 1024 * 1024) - } - - wg.Wait() -} - -// TODO (brianbland): evaluate the relevancy of this test -// TestEventualConsistency checks that if stat says that a file is a certain size, then -// you can freely read from the file (this is the only guarantee that the driver needs to provide) -// func (suite *DriverSuite) TestEventualConsistency(c *check.C) { -// if testing.Short() { -// c.Skip("Skipping test in short mode") -// } -// -// filename := randomPath(32) -// defer suite.deletePath(c, firstPart(filename)) -// -// var offset int64 -// var misswrites int -// var chunkSize int64 = 32 -// -// for i := 0; i < 1024; i++ { -// contents := randomContents(chunkSize) -// read, err := suite.StorageDriver.Writer(suite.ctx, filename, offset, bytes.NewReader(contents)) -// c.Assert(err, check.IsNil) -// -// fi, err := suite.StorageDriver.Stat(suite.ctx, filename) -// c.Assert(err, check.IsNil) -// -// // We are most concerned with being able to read data as soon as Stat declares -// // it is uploaded. This is the strongest guarantee that some drivers (that guarantee -// // at best eventual consistency) absolutely need to provide. -// if fi.Size() == offset+chunkSize { -// reader, err := suite.StorageDriver.Reader(suite.ctx, filename, offset) -// c.Assert(err, check.IsNil) -// -// readContents, err := ioutil.ReadAll(reader) -// c.Assert(err, check.IsNil) -// -// c.Assert(readContents, check.DeepEquals, contents) -// -// reader.Close() -// offset += read -// } else { -// misswrites++ -// } -// } -// -// if misswrites > 0 { -// c.Log("There were " + string(misswrites) + " occurrences of a write not being instantly available.") -// } -// -// c.Assert(misswrites, check.Not(check.Equals), 1024) -// } - -// BenchmarkPutGetEmptyFiles benchmarks PutContent/GetContent for 0B files -func (suite *DriverSuite) BenchmarkPutGetEmptyFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 0) -} - -// BenchmarkPutGet1KBFiles benchmarks PutContent/GetContent for 1KB files -func (suite *DriverSuite) BenchmarkPutGet1KBFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 1024) -} - -// BenchmarkPutGet1MBFiles benchmarks PutContent/GetContent for 1MB files -func (suite *DriverSuite) BenchmarkPutGet1MBFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 1024*1024) -} - -// BenchmarkPutGet1GBFiles benchmarks PutContent/GetContent for 1GB files -func (suite *DriverSuite) BenchmarkPutGet1GBFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 1024*1024*1024) -} - -func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { - c.SetBytes(size) - parentDir := randomPath(8) - defer func() { - c.StopTimer() - suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - }() - - for i := 0; i < c.N; i++ { - filename := path.Join(parentDir, randomPath(32)) - err := suite.StorageDriver.PutContent(suite.ctx, filename, randomContents(size)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - } -} - -// BenchmarkStreamEmptyFiles benchmarks Writer/Reader for 0B files -func (suite *DriverSuite) BenchmarkStreamEmptyFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 0) -} - -// BenchmarkStream1KBFiles benchmarks Writer/Reader for 1KB files -func (suite *DriverSuite) BenchmarkStream1KBFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 1024) -} - -// BenchmarkStream1MBFiles benchmarks Writer/Reader for 1MB files -func (suite *DriverSuite) BenchmarkStream1MBFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 1024*1024) -} - -// BenchmarkStream1GBFiles benchmarks Writer/Reader for 1GB files -func (suite *DriverSuite) BenchmarkStream1GBFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 1024*1024*1024) -} - -func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { - c.SetBytes(size) - parentDir := randomPath(8) - defer func() { - c.StopTimer() - suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - }() - - for i := 0; i < c.N; i++ { - filename := path.Join(parentDir, randomPath(32)) - writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) - c.Assert(err, check.IsNil) - written, err := io.Copy(writer, bytes.NewReader(randomContents(size))) - c.Assert(err, check.IsNil) - c.Assert(written, check.Equals, size) - - err = writer.Commit() - c.Assert(err, check.IsNil) - err = writer.Close() - c.Assert(err, check.IsNil) - - rc, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - rc.Close() - } -} - -// BenchmarkList5Files benchmarks List for 5 small files -func (suite *DriverSuite) BenchmarkList5Files(c *check.C) { - suite.benchmarkListFiles(c, 5) -} - -// BenchmarkList50Files benchmarks List for 50 small files -func (suite *DriverSuite) BenchmarkList50Files(c *check.C) { - suite.benchmarkListFiles(c, 50) -} - -func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) { - parentDir := randomPath(8) - defer func() { - c.StopTimer() - suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - }() - - for i := int64(0); i < numFiles; i++ { - err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) - c.Assert(err, check.IsNil) - } - - c.ResetTimer() - for i := 0; i < c.N; i++ { - files, err := suite.StorageDriver.List(suite.ctx, parentDir) - c.Assert(err, check.IsNil) - c.Assert(int64(len(files)), check.Equals, numFiles) - } -} - -// BenchmarkDelete5Files benchmarks Delete for 5 small files -func (suite *DriverSuite) BenchmarkDelete5Files(c *check.C) { - suite.benchmarkDeleteFiles(c, 5) -} - -// BenchmarkDelete50Files benchmarks Delete for 50 small files -func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { - suite.benchmarkDeleteFiles(c, 50) -} - -func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { - for i := 0; i < c.N; i++ { - parentDir := randomPath(8) - defer suite.deletePath(c, firstPart(parentDir)) - - c.StopTimer() - for j := int64(0); j < numFiles; j++ { - err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) - c.Assert(err, check.IsNil) - } - c.StartTimer() - - // This is the operation we're benchmarking - err := suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - c.Assert(err, check.IsNil) - } -} - -func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { - tf, err := ioutil.TempFile("", "tf") - c.Assert(err, check.IsNil) - defer os.Remove(tf.Name()) - defer tf.Close() - - filename := randomPath(32) - defer suite.deletePath(c, firstPart(filename)) - - contents := randomContents(size) - - _, err = tf.Write(contents) - c.Assert(err, check.IsNil) - - tf.Sync() - tf.Seek(0, os.SEEK_SET) - - writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) - c.Assert(err, check.IsNil) - nn, err := io.Copy(writer, tf) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, size) - - err = writer.Commit() - c.Assert(err, check.IsNil) - err = writer.Close() - c.Assert(err, check.IsNil) - - reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) -} - -func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { - defer suite.deletePath(c, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) -} - -func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { - defer suite.deletePath(c, firstPart(filename)) - - writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) - c.Assert(err, check.IsNil) - nn, err := io.Copy(writer, bytes.NewReader(contents)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contents))) - - err = writer.Commit() - c.Assert(err, check.IsNil) - err = writer.Close() - c.Assert(err, check.IsNil) - - reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) -} - -var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") -var separatorChars = []byte("._-") - -func randomPath(length int64) string { - path := "/" - for int64(len(path)) < length { - chunkLength := rand.Int63n(length-int64(len(path))) + 1 - chunk := randomFilename(chunkLength) - path += chunk - remaining := length - int64(len(path)) - if remaining == 1 { - path += randomFilename(1) - } else if remaining > 1 { - path += "/" - } - } - return path -} - -func randomFilename(length int64) string { - b := make([]byte, length) - wasSeparator := true - for i := range b { - if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { - b[i] = separatorChars[rand.Intn(len(separatorChars))] - wasSeparator = true - } else { - b[i] = filenameChars[rand.Intn(len(filenameChars))] - wasSeparator = false - } - } - return string(b) -} - -// randomBytes pre-allocates all of the memory sizes needed for the test. If -// anything panics while accessing randomBytes, just make this number bigger. -var randomBytes = make([]byte, 128<<20) - -func init() { - // increase the random bytes to the required maximum - for i := range randomBytes { - randomBytes[i] = byte(rand.Intn(2 << 8)) - } -} - -func randomContents(length int64) []byte { - return randomBytes[:length] -} - -type randReader struct { - r int64 - m sync.Mutex -} - -func (rr *randReader) Read(p []byte) (n int, err error) { - rr.m.Lock() - defer rr.m.Unlock() - - toread := int64(len(p)) - if toread > rr.r { - toread = rr.r - } - n = copy(p, randomContents(toread)) - rr.r -= int64(n) - - if rr.r <= 0 { - err = io.EOF - } - - return -} - -func newRandReader(n int64) *randReader { - return &randReader{r: n} -} - -func firstPart(filePath string) string { - if filePath == "" { - return "/" - } - for { - if filePath[len(filePath)-1] == '/' { - filePath = filePath[:len(filePath)-1] - } - - dir, file := path.Split(filePath) - if dir == "" && file == "" { - return "/" - } - if dir == "/" || dir == "" { - return "/" + file - } - if file == "" { - return dir - } - filePath = dir - } -} diff --git a/docs/storage/filereader.go b/docs/storage/filereader.go deleted file mode 100644 index 3b06c817..00000000 --- a/docs/storage/filereader.go +++ /dev/null @@ -1,177 +0,0 @@ -package storage - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// TODO(stevvooe): Set an optimal buffer size here. We'll have to -// understand the latency characteristics of the underlying network to -// set this correctly, so we may want to leave it to the driver. For -// out of process drivers, we'll have to optimize this buffer size for -// local communication. -const fileReaderBufferSize = 4 << 20 - -// remoteFileReader provides a read seeker interface to files stored in -// storagedriver. Used to implement part of layer interface and will be used -// to implement read side of LayerUpload. -type fileReader struct { - driver storagedriver.StorageDriver - - ctx context.Context - - // identifying fields - path string - size int64 // size is the total size, must be set. - - // mutable fields - rc io.ReadCloser // remote read closer - brd *bufio.Reader // internal buffered io - offset int64 // offset is the current read offset - err error // terminal error, if set, reader is closed -} - -// newFileReader initializes a file reader for the remote file. The reader -// takes on the size and path that must be determined externally with a stat -// call. The reader operates optimistically, assuming that the file is already -// there. -func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) { - return &fileReader{ - ctx: ctx, - driver: driver, - path: path, - size: size, - }, nil -} - -func (fr *fileReader) Read(p []byte) (n int, err error) { - if fr.err != nil { - return 0, fr.err - } - - rd, err := fr.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - fr.offset += int64(n) - - // Simulate io.EOR error if we reach filesize. - if err == nil && fr.offset >= fr.size { - err = io.EOF - } - - return n, err -} - -func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { - if fr.err != nil { - return 0, fr.err - } - - var err error - newOffset := fr.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset = fr.size + int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - if newOffset < 0 { - err = fmt.Errorf("cannot seek to negative position") - } else { - if fr.offset != newOffset { - fr.reset() - } - - // No problems, set the offset. - fr.offset = newOffset - } - - return fr.offset, err -} - -func (fr *fileReader) Close() error { - return fr.closeWithErr(fmt.Errorf("fileReader: closed")) -} - -// reader prepares the current reader at the lrs offset, ensuring its buffered -// and ready to go. -func (fr *fileReader) reader() (io.Reader, error) { - if fr.err != nil { - return nil, fr.err - } - - if fr.rc != nil { - return fr.brd, nil - } - - // If we don't have a reader, open one up. - rc, err := fr.driver.Reader(fr.ctx, fr.path, fr.offset) - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // NOTE(stevvooe): If the path is not found, we simply return a - // reader that returns io.EOF. However, we do not set fr.rc, - // allowing future attempts at getting a reader to possibly - // succeed if the file turns up later. - return ioutil.NopCloser(bytes.NewReader([]byte{})), nil - default: - return nil, err - } - } - - fr.rc = rc - - if fr.brd == nil { - fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) - } else { - fr.brd.Reset(fr.rc) - } - - return fr.brd, nil -} - -// resetReader resets the reader, forcing the read method to open up a new -// connection and rebuild the buffered reader. This should be called when the -// offset and the reader will become out of sync, such as during a seek -// operation. -func (fr *fileReader) reset() { - if fr.err != nil { - return - } - if fr.rc != nil { - fr.rc.Close() - fr.rc = nil - } -} - -func (fr *fileReader) closeWithErr(err error) error { - if fr.err != nil { - return fr.err - } - - fr.err = err - - // close and release reader chain - if fr.rc != nil { - fr.rc.Close() - } - - fr.rc = nil - fr.brd = nil - - return fr.err -} diff --git a/docs/storage/filereader_test.go b/docs/storage/filereader_test.go deleted file mode 100644 index f43873b3..00000000 --- a/docs/storage/filereader_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package storage - -import ( - "bytes" - "crypto/rand" - "io" - mrand "math/rand" - "os" - "testing" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -func TestSimpleRead(t *testing.T) { - ctx := context.Background() - content := make([]byte, 1<<20) - n, err := rand.Read(content) - if err != nil { - t.Fatalf("unexpected error building random data: %v", err) - } - - if n != len(content) { - t.Fatalf("random read didn't fill buffer") - } - - dgst, err := digest.FromReader(bytes.NewReader(content)) - if err != nil { - t.Fatalf("unexpected error digesting random content: %v", err) - } - - driver := inmemory.New() - path := "/random" - - if err := driver.PutContent(ctx, path, content); err != nil { - t.Fatalf("error putting patterned content: %v", err) - } - - fr, err := newFileReader(ctx, driver, path, int64(len(content))) - if err != nil { - t.Fatalf("error allocating file reader: %v", err) - } - - verifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - t.Fatalf("error getting digest verifier: %s", err) - } - - io.Copy(verifier, fr) - - if !verifier.Verified() { - t.Fatalf("unable to verify read data") - } -} - -func TestFileReaderSeek(t *testing.T) { - driver := inmemory.New() - pattern := "01234567890ab" // prime length block - repititions := 1024 - path := "/patterned" - content := bytes.Repeat([]byte(pattern), repititions) - ctx := context.Background() - - if err := driver.PutContent(ctx, path, content); err != nil { - t.Fatalf("error putting patterned content: %v", err) - } - - fr, err := newFileReader(ctx, driver, path, int64(len(content))) - - if err != nil { - t.Fatalf("unexpected error creating file reader: %v", err) - } - - // Seek all over the place, in blocks of pattern size and make sure we get - // the right data. - for _, repitition := range mrand.Perm(repititions - 1) { - targetOffset := int64(len(pattern) * repitition) - // Seek to a multiple of pattern size and read pattern size bytes - offset, err := fr.Seek(targetOffset, os.SEEK_SET) - if err != nil { - t.Fatalf("unexpected error seeking: %v", err) - } - - if offset != targetOffset { - t.Fatalf("did not seek to correct offset: %d != %d", offset, targetOffset) - } - - p := make([]byte, len(pattern)) - - n, err := fr.Read(p) - if err != nil { - t.Fatalf("error reading pattern: %v", err) - } - - if n != len(pattern) { - t.Fatalf("incorrect read length: %d != %d", n, len(pattern)) - } - - if string(p) != pattern { - t.Fatalf("incorrect read content: %q != %q", p, pattern) - } - - // Check offset - current, err := fr.Seek(0, os.SEEK_CUR) - if err != nil { - t.Fatalf("error checking current offset: %v", err) - } - - if current != targetOffset+int64(len(pattern)) { - t.Fatalf("unexpected offset after read: %v", err) - } - } - - start, err := fr.Seek(0, os.SEEK_SET) - if err != nil { - t.Fatalf("error seeking to start: %v", err) - } - - if start != 0 { - t.Fatalf("expected to seek to start: %v != 0", start) - } - - end, err := fr.Seek(0, os.SEEK_END) - if err != nil { - t.Fatalf("error checking current offset: %v", err) - } - - if end != int64(len(content)) { - t.Fatalf("expected to seek to end: %v != %v", end, len(content)) - } - - // 4. Seek before start, ensure error. - - // seek before start - before, err := fr.Seek(-1, os.SEEK_SET) - if err == nil { - t.Fatalf("error expected, returned offset=%v", before) - } - - // 5. Seek after end, - after, err := fr.Seek(1, os.SEEK_END) - if err != nil { - t.Fatalf("unexpected error expected, returned offset=%v", after) - } - - p := make([]byte, 16) - n, err := fr.Read(p) - - if n != 0 { - t.Fatalf("bytes reads %d != %d", n, 0) - } - - if err != io.EOF { - t.Fatalf("expected io.EOF, got %v", err) - } -} - -// TestFileReaderNonExistentFile ensures the reader behaves as expected with a -// missing or zero-length remote file. While the file may not exist, the -// reader should not error out on creation and should return 0-bytes from the -// read method, with an io.EOF error. -func TestFileReaderNonExistentFile(t *testing.T) { - driver := inmemory.New() - fr, err := newFileReader(context.Background(), driver, "/doesnotexist", 10) - if err != nil { - t.Fatalf("unexpected error initializing reader: %v", err) - } - - var buf [1024]byte - - n, err := fr.Read(buf[:]) - if n != 0 { - t.Fatalf("non-zero byte read reported: %d != 0", n) - } - - if err != io.EOF { - t.Fatalf("read on missing file should return io.EOF, got %v", err) - } -} - -// TestLayerReadErrors covers the various error return type for different -// conditions that can arise when reading a layer. -func TestFileReaderErrors(t *testing.T) { - // TODO(stevvooe): We need to cover error return types, driven by the - // errors returned via the HTTP API. For now, here is an incomplete list: - // - // 1. Layer Not Found: returned when layer is not found or access is - // denied. - // 2. Layer Unavailable: returned when link references are unresolved, - // but layer is known to the registry. - // 3. Layer Invalid: This may more split into more errors, but should be - // returned when name or tarsum does not reference a valid error. We - // may also need something to communication layer verification errors - // for the inline tarsum check. - // 4. Timeout: timeouts to backend. Need to better understand these - // failure cases and how the storage driver propagates these errors - // up the stack. -} diff --git a/docs/storage/garbagecollect.go b/docs/storage/garbagecollect.go deleted file mode 100644 index bc340416..00000000 --- a/docs/storage/garbagecollect.go +++ /dev/null @@ -1,133 +0,0 @@ -package storage - -import ( - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/driver" -) - -func emit(format string, a ...interface{}) { - fmt.Printf(format+"\n", a...) -} - -// MarkAndSweep performs a mark and sweep of registry data -func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace, dryRun bool) error { - repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator) - if !ok { - return fmt.Errorf("unable to convert Namespace to RepositoryEnumerator") - } - - // mark - markSet := make(map[digest.Digest]struct{}) - err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error { - if dryRun { - emit(repoName) - } - - var err error - named, err := reference.ParseNamed(repoName) - if err != nil { - return fmt.Errorf("failed to parse repo name %s: %v", repoName, err) - } - repository, err := registry.Repository(ctx, named) - if err != nil { - return fmt.Errorf("failed to construct repository: %v", err) - } - - manifestService, err := repository.Manifests(ctx) - if err != nil { - return fmt.Errorf("failed to construct manifest service: %v", err) - } - - manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator) - if !ok { - return fmt.Errorf("unable to convert ManifestService into ManifestEnumerator") - } - - err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { - // Mark the manifest's blob - if dryRun { - emit("%s: marking manifest %s ", repoName, dgst) - } - markSet[dgst] = struct{}{} - - manifest, err := manifestService.Get(ctx, dgst) - if err != nil { - return fmt.Errorf("failed to retrieve manifest for digest %v: %v", dgst, err) - } - - descriptors := manifest.References() - for _, descriptor := range descriptors { - markSet[descriptor.Digest] = struct{}{} - if dryRun { - emit("%s: marking blob %s", repoName, descriptor.Digest) - } - } - - switch manifest.(type) { - case *schema2.DeserializedManifest: - config := manifest.(*schema2.DeserializedManifest).Config - if dryRun { - emit("%s: marking configuration %s", repoName, config.Digest) - } - markSet[config.Digest] = struct{}{} - break - } - - return nil - }) - - if err != nil { - // In certain situations such as unfinished uploads, deleting all - // tags in S3 or removing the _manifests folder manually, this - // error may be of type PathNotFound. - // - // In these cases we can continue marking other manifests safely. - if _, ok := err.(driver.PathNotFoundError); ok { - return nil - } - } - - return err - }) - - if err != nil { - return fmt.Errorf("failed to mark: %v\n", err) - } - - // sweep - blobService := registry.Blobs() - deleteSet := make(map[digest.Digest]struct{}) - err = blobService.Enumerate(ctx, func(dgst digest.Digest) error { - // check if digest is in markSet. If not, delete it! - if _, ok := markSet[dgst]; !ok { - deleteSet[dgst] = struct{}{} - } - return nil - }) - if err != nil { - return fmt.Errorf("error enumerating blobs: %v", err) - } - if dryRun { - emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet)) - } - // Construct vacuum - vacuum := NewVacuum(ctx, storageDriver) - for dgst := range deleteSet { - if dryRun { - emit("blob eligible for deletion: %s", dgst) - continue - } - err = vacuum.RemoveBlob(string(dgst)) - if err != nil { - return fmt.Errorf("failed to delete blob %s: %v\n", dgst, err) - } - } - - return err -} diff --git a/docs/storage/garbagecollect_test.go b/docs/storage/garbagecollect_test.go deleted file mode 100644 index 86fc175a..00000000 --- a/docs/storage/garbagecollect_test.go +++ /dev/null @@ -1,376 +0,0 @@ -package storage - -import ( - "io" - "path" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/testutil" - "github.com/docker/libtrust" -) - -type image struct { - manifest distribution.Manifest - manifestDigest digest.Digest - layers map[digest.Digest]io.ReadSeeker -} - -func createRegistry(t *testing.T, driver driver.StorageDriver) distribution.Namespace { - ctx := context.Background() - k, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - registry, err := NewRegistry(ctx, driver, EnableDelete, Schema1SigningKey(k)) - if err != nil { - t.Fatalf("Failed to construct namespace") - } - return registry -} - -func makeRepository(t *testing.T, registry distribution.Namespace, name string) distribution.Repository { - ctx := context.Background() - - // Initialize a dummy repository - named, err := reference.ParseNamed(name) - if err != nil { - t.Fatalf("Failed to parse name %s: %v", name, err) - } - - repo, err := registry.Repository(ctx, named) - if err != nil { - t.Fatalf("Failed to construct repository: %v", err) - } - return repo -} - -func makeManifestService(t *testing.T, repository distribution.Repository) distribution.ManifestService { - ctx := context.Background() - - manifestService, err := repository.Manifests(ctx) - if err != nil { - t.Fatalf("Failed to construct manifest store: %v", err) - } - return manifestService -} - -func allBlobs(t *testing.T, registry distribution.Namespace) map[digest.Digest]struct{} { - ctx := context.Background() - blobService := registry.Blobs() - allBlobsMap := make(map[digest.Digest]struct{}) - err := blobService.Enumerate(ctx, func(dgst digest.Digest) error { - allBlobsMap[dgst] = struct{}{} - return nil - }) - if err != nil { - t.Fatalf("Error getting all blobs: %v", err) - } - return allBlobsMap -} - -func uploadImage(t *testing.T, repository distribution.Repository, im image) digest.Digest { - // upload layers - err := testutil.UploadBlobs(repository, im.layers) - if err != nil { - t.Fatalf("layer upload failed: %v", err) - } - - // upload manifest - ctx := context.Background() - manifestService := makeManifestService(t, repository) - manifestDigest, err := manifestService.Put(ctx, im.manifest) - if err != nil { - t.Fatalf("manifest upload failed: %v", err) - } - - return manifestDigest -} - -func uploadRandomSchema1Image(t *testing.T, repository distribution.Repository) image { - randomLayers, err := testutil.CreateRandomLayers(2) - if err != nil { - t.Fatalf("%v", err) - } - - digests := []digest.Digest{} - for digest := range randomLayers { - digests = append(digests, digest) - } - - manifest, err := testutil.MakeSchema1Manifest(digests) - if err != nil { - t.Fatalf("%v", err) - } - - manifestDigest := uploadImage(t, repository, image{manifest: manifest, layers: randomLayers}) - return image{ - manifest: manifest, - manifestDigest: manifestDigest, - layers: randomLayers, - } -} - -func uploadRandomSchema2Image(t *testing.T, repository distribution.Repository) image { - randomLayers, err := testutil.CreateRandomLayers(2) - if err != nil { - t.Fatalf("%v", err) - } - - digests := []digest.Digest{} - for digest := range randomLayers { - digests = append(digests, digest) - } - - manifest, err := testutil.MakeSchema2Manifest(repository, digests) - if err != nil { - t.Fatalf("%v", err) - } - - manifestDigest := uploadImage(t, repository, image{manifest: manifest, layers: randomLayers}) - return image{ - manifest: manifest, - manifestDigest: manifestDigest, - layers: randomLayers, - } -} - -func TestNoDeletionNoEffect(t *testing.T) { - ctx := context.Background() - inmemoryDriver := inmemory.New() - - registry := createRegistry(t, inmemory.New()) - repo := makeRepository(t, registry, "palailogos") - manifestService, err := repo.Manifests(ctx) - - image1 := uploadRandomSchema1Image(t, repo) - image2 := uploadRandomSchema1Image(t, repo) - uploadRandomSchema2Image(t, repo) - - // construct manifestlist for fun. - blobstatter := registry.BlobStatter() - manifestList, err := testutil.MakeManifestList(blobstatter, []digest.Digest{ - image1.manifestDigest, image2.manifestDigest}) - if err != nil { - t.Fatalf("Failed to make manifest list: %v", err) - } - - _, err = manifestService.Put(ctx, manifestList) - if err != nil { - t.Fatalf("Failed to add manifest list: %v", err) - } - - before := allBlobs(t, registry) - - // Run GC - err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) - if err != nil { - t.Fatalf("Failed mark and sweep: %v", err) - } - - after := allBlobs(t, registry) - if len(before) != len(after) { - t.Fatalf("Garbage collection affected storage: %d != %d", len(before), len(after)) - } -} - -func TestGCWithMissingManifests(t *testing.T) { - ctx := context.Background() - d := inmemory.New() - - registry := createRegistry(t, d) - repo := makeRepository(t, registry, "testrepo") - uploadRandomSchema1Image(t, repo) - - // Simulate a missing _manifests directory - revPath, err := pathFor(manifestRevisionsPathSpec{"testrepo"}) - if err != nil { - t.Fatal(err) - } - - _manifestsPath := path.Dir(revPath) - err = d.Delete(ctx, _manifestsPath) - if err != nil { - t.Fatal(err) - } - - err = MarkAndSweep(context.Background(), d, registry, false) - if err != nil { - t.Fatalf("Failed mark and sweep: %v", err) - } - - blobs := allBlobs(t, registry) - if len(blobs) > 0 { - t.Errorf("unexpected blobs after gc") - } -} - -func TestDeletionHasEffect(t *testing.T) { - ctx := context.Background() - inmemoryDriver := inmemory.New() - - registry := createRegistry(t, inmemoryDriver) - repo := makeRepository(t, registry, "komnenos") - manifests, err := repo.Manifests(ctx) - - image1 := uploadRandomSchema1Image(t, repo) - image2 := uploadRandomSchema1Image(t, repo) - image3 := uploadRandomSchema2Image(t, repo) - - manifests.Delete(ctx, image2.manifestDigest) - manifests.Delete(ctx, image3.manifestDigest) - - // Run GC - err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) - if err != nil { - t.Fatalf("Failed mark and sweep: %v", err) - } - - blobs := allBlobs(t, registry) - - // check that the image1 manifest and all the layers are still in blobs - if _, ok := blobs[image1.manifestDigest]; !ok { - t.Fatalf("First manifest is missing") - } - - for layer := range image1.layers { - if _, ok := blobs[layer]; !ok { - t.Fatalf("manifest 1 layer is missing: %v", layer) - } - } - - // check that image2 and image3 layers are not still around - for layer := range image2.layers { - if _, ok := blobs[layer]; ok { - t.Fatalf("manifest 2 layer is present: %v", layer) - } - } - - for layer := range image3.layers { - if _, ok := blobs[layer]; ok { - t.Fatalf("manifest 3 layer is present: %v", layer) - } - } -} - -func getAnyKey(digests map[digest.Digest]io.ReadSeeker) (d digest.Digest) { - for d = range digests { - break - } - return -} - -func getKeys(digests map[digest.Digest]io.ReadSeeker) (ds []digest.Digest) { - for d := range digests { - ds = append(ds, d) - } - return -} - -func TestDeletionWithSharedLayer(t *testing.T) { - ctx := context.Background() - inmemoryDriver := inmemory.New() - - registry := createRegistry(t, inmemoryDriver) - repo := makeRepository(t, registry, "tzimiskes") - - // Create random layers - randomLayers1, err := testutil.CreateRandomLayers(3) - if err != nil { - t.Fatalf("failed to make layers: %v", err) - } - - randomLayers2, err := testutil.CreateRandomLayers(3) - if err != nil { - t.Fatalf("failed to make layers: %v", err) - } - - // Upload all layers - err = testutil.UploadBlobs(repo, randomLayers1) - if err != nil { - t.Fatalf("failed to upload layers: %v", err) - } - - err = testutil.UploadBlobs(repo, randomLayers2) - if err != nil { - t.Fatalf("failed to upload layers: %v", err) - } - - // Construct manifests - manifest1, err := testutil.MakeSchema1Manifest(getKeys(randomLayers1)) - if err != nil { - t.Fatalf("failed to make manifest: %v", err) - } - - sharedKey := getAnyKey(randomLayers1) - manifest2, err := testutil.MakeSchema2Manifest(repo, append(getKeys(randomLayers2), sharedKey)) - if err != nil { - t.Fatalf("failed to make manifest: %v", err) - } - - manifestService := makeManifestService(t, repo) - - // Upload manifests - _, err = manifestService.Put(ctx, manifest1) - if err != nil { - t.Fatalf("manifest upload failed: %v", err) - } - - manifestDigest2, err := manifestService.Put(ctx, manifest2) - if err != nil { - t.Fatalf("manifest upload failed: %v", err) - } - - // delete - err = manifestService.Delete(ctx, manifestDigest2) - if err != nil { - t.Fatalf("manifest deletion failed: %v", err) - } - - // check that all of the layers in layer 1 are still there - blobs := allBlobs(t, registry) - for dgst := range randomLayers1 { - if _, ok := blobs[dgst]; !ok { - t.Fatalf("random layer 1 blob missing: %v", dgst) - } - } -} - -func TestOrphanBlobDeleted(t *testing.T) { - inmemoryDriver := inmemory.New() - - registry := createRegistry(t, inmemoryDriver) - repo := makeRepository(t, registry, "michael_z_doukas") - - digests, err := testutil.CreateRandomLayers(1) - if err != nil { - t.Fatalf("Failed to create random digest: %v", err) - } - - if err = testutil.UploadBlobs(repo, digests); err != nil { - t.Fatalf("Failed to upload blob: %v", err) - } - - // formality to create the necessary directories - uploadRandomSchema2Image(t, repo) - - // Run GC - err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) - if err != nil { - t.Fatalf("Failed mark and sweep: %v", err) - } - - blobs := allBlobs(t, registry) - - // check that orphan blob layers are not still around - for dgst := range digests { - if _, ok := blobs[dgst]; ok { - t.Fatalf("Orphan layer is present: %v", dgst) - } - } -} diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go deleted file mode 100644 index d254bbb8..00000000 --- a/docs/storage/linkedblobstore.go +++ /dev/null @@ -1,472 +0,0 @@ -package storage - -import ( - "fmt" - "net/http" - "path" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/uuid" -) - -// linkPathFunc describes a function that can resolve a link based on the -// repository name and digest. -type linkPathFunc func(name string, dgst digest.Digest) (string, error) - -// linkedBlobStore provides a full BlobService that namespaces the blobs to a -// given repository. Effectively, it manages the links in a given repository -// that grant access to the global blob store. -type linkedBlobStore struct { - *blobStore - registry *registry - blobServer distribution.BlobServer - blobAccessController distribution.BlobDescriptorService - repository distribution.Repository - ctx context.Context // only to be used where context can't come through method args - deleteEnabled bool - resumableDigestEnabled bool - - // linkPathFns specifies one or more path functions allowing one to - // control the repository blob link set to which the blob store - // dispatches. This is required because manifest and layer blobs have not - // yet been fully merged. At some point, this functionality should be - // removed the blob links folder should be merged. The first entry is - // treated as the "canonical" link location and will be used for writes. - linkPathFns []linkPathFunc - - // linkDirectoryPathSpec locates the root directories in which one might find links - linkDirectoryPathSpec pathSpec -} - -var _ distribution.BlobStore = &linkedBlobStore{} - -func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return lbs.blobAccessController.Stat(ctx, dgst) -} - -func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - canonical, err := lbs.Stat(ctx, dgst) // access check - if err != nil { - return nil, err - } - - return lbs.blobStore.Get(ctx, canonical.Digest) -} - -func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - canonical, err := lbs.Stat(ctx, dgst) // access check - if err != nil { - return nil, err - } - - return lbs.blobStore.Open(ctx, canonical.Digest) -} - -func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - canonical, err := lbs.Stat(ctx, dgst) // access check - if err != nil { - return err - } - - if canonical.MediaType != "" { - // Set the repository local content type. - w.Header().Set("Content-Type", canonical.MediaType) - } - - return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest) -} - -func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - dgst := digest.FromBytes(p) - // Place the data in the blob store first. - desc, err := lbs.blobStore.Put(ctx, mediaType, p) - if err != nil { - context.GetLogger(ctx).Errorf("error putting into main store: %v", err) - return distribution.Descriptor{}, err - } - - if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil { - return distribution.Descriptor{}, err - } - - // TODO(stevvooe): Write out mediatype if incoming differs from what is - // returned by Put above. Note that we should allow updates for a given - // repository. - - return desc, lbs.linkBlob(ctx, desc) -} - -// createOptions is a collection of blob creation modifiers relevant to general -// blob storage intended to be configured by the BlobCreateOption.Apply method. -type createOptions struct { - Mount struct { - ShouldMount bool - From reference.Canonical - } -} - -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithMountFrom returns a BlobCreateOption which designates that the blob should be -// mounted from the given canonical reference. -func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*createOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - - opts.Mount.ShouldMount = true - opts.Mount.From = ref - - return nil - }) -} - -// Writer begins a blob write session, returning a handle. -func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") - - var opts createOptions - - for _, option := range options { - err := option.Apply(&opts) - if err != nil { - return nil, err - } - } - - if opts.Mount.ShouldMount { - desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest()) - if err == nil { - // Mount successful, no need to initiate an upload session - return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} - } - } - - uuid := uuid.Generate().String() - startedAt := time.Now().UTC() - - path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Named().Name(), - id: uuid, - }) - - if err != nil { - return nil, err - } - - startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Named().Name(), - id: uuid, - }) - - if err != nil { - return nil, err - } - - // Write a startedat file for this upload - if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { - return nil, err - } - - return lbs.newBlobUpload(ctx, uuid, path, startedAt, false) -} - -func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") - - startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Named().Name(), - id: id, - }) - - if err != nil { - return nil, err - } - - startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath) - if err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return nil, distribution.ErrBlobUploadUnknown - default: - return nil, err - } - } - - startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) - if err != nil { - return nil, err - } - - path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Named().Name(), - id: id, - }) - - if err != nil { - return nil, err - } - - return lbs.newBlobUpload(ctx, id, path, startedAt, true) -} - -func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { - if !lbs.deleteEnabled { - return distribution.ErrUnsupported - } - - // Ensure the blob is available for deletion - _, err := lbs.blobAccessController.Stat(ctx, dgst) - if err != nil { - return err - } - - err = lbs.blobAccessController.Clear(ctx, dgst) - if err != nil { - return err - } - - return nil -} - -func (lbs *linkedBlobStore) Enumerate(ctx context.Context, ingestor func(digest.Digest) error) error { - rootPath, err := pathFor(lbs.linkDirectoryPathSpec) - if err != nil { - return err - } - err = Walk(ctx, lbs.blobStore.driver, rootPath, func(fileInfo driver.FileInfo) error { - // exit early if directory... - if fileInfo.IsDir() { - return nil - } - filePath := fileInfo.Path() - - // check if it's a link - _, fileName := path.Split(filePath) - if fileName != "link" { - return nil - } - - // read the digest found in link - digest, err := lbs.blobStore.readlink(ctx, filePath) - if err != nil { - return err - } - - // ensure this conforms to the linkPathFns - _, err = lbs.Stat(ctx, digest) - if err != nil { - // we expect this error to occur so we move on - if err == distribution.ErrBlobUnknown { - return nil - } - return err - } - - err = ingestor(digest) - if err != nil { - return err - } - - return nil - }) - - if err != nil { - return err - } - - return nil -} - -func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) { - repo, err := lbs.registry.Repository(ctx, sourceRepo) - if err != nil { - return distribution.Descriptor{}, err - } - stat, err := repo.Blobs(ctx).Stat(ctx, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - - desc := distribution.Descriptor{ - Size: stat.Size, - - // NOTE(stevvooe): The central blob store firewalls media types from - // other users. The caller should look this up and override the value - // for the specific repository. - MediaType: "application/octet-stream", - Digest: dgst, - } - return desc, lbs.linkBlob(ctx, desc) -} - -// newBlobUpload allocates a new upload controller with the given state. -func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time, append bool) (distribution.BlobWriter, error) { - fw, err := lbs.driver.Writer(ctx, path, append) - if err != nil { - return nil, err - } - - bw := &blobWriter{ - ctx: ctx, - blobStore: lbs, - id: uuid, - startedAt: startedAt, - digester: digest.Canonical.New(), - fileWriter: fw, - driver: lbs.driver, - path: path, - resumableDigestEnabled: lbs.resumableDigestEnabled, - } - - return bw, nil -} - -// linkBlob links a valid, written blob into the registry under the named -// repository for the upload controller. -func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error { - dgsts := append([]digest.Digest{canonical.Digest}, aliases...) - - // TODO(stevvooe): Need to write out mediatype for only canonical hash - // since we don't care about the aliases. They are generally unused except - // for tarsum but those versions don't care about mediatype. - - // Don't make duplicate links. - seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) - - // only use the first link - linkPathFn := lbs.linkPathFns[0] - - for _, dgst := range dgsts { - if _, seen := seenDigests[dgst]; seen { - continue - } - seenDigests[dgst] = struct{}{} - - blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) - if err != nil { - return err - } - - if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil { - return err - } - } - - return nil -} - -type linkedBlobStatter struct { - *blobStore - repository distribution.Repository - - // linkPathFns specifies one or more path functions allowing one to - // control the repository blob link set to which the blob store - // dispatches. This is required because manifest and layer blobs have not - // yet been fully merged. At some point, this functionality should be - // removed an the blob links folder should be merged. The first entry is - // treated as the "canonical" link location and will be used for writes. - linkPathFns []linkPathFunc -} - -var _ distribution.BlobDescriptorService = &linkedBlobStatter{} - -func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - var ( - found bool - target digest.Digest - ) - - // try the many link path functions until we get success or an error that - // is not PathNotFoundError. - for _, linkPathFn := range lbs.linkPathFns { - var err error - target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn) - - if err == nil { - found = true - break // success! - } - - switch err := err.(type) { - case driver.PathNotFoundError: - // do nothing, just move to the next linkPathFn - default: - return distribution.Descriptor{}, err - } - } - - if !found { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - if target != dgst { - // Track when we are doing cross-digest domain lookups. ie, sha512 to sha256. - context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) - } - - // TODO(stevvooe): Look up repository local mediatype and replace that on - // the returned descriptor. - - return lbs.blobStore.statter.Stat(ctx, target) -} - -func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { - // clear any possible existence of a link described in linkPathFns - for _, linkPathFn := range lbs.linkPathFns { - blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) - if err != nil { - return err - } - - err = lbs.blobStore.driver.Delete(ctx, blobLinkPath) - if err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - continue // just ignore this error and continue - default: - return err - } - } - } - - return nil -} - -// resolveTargetWithFunc allows us to read a link to a resource with different -// linkPathFuncs to let us try a few different paths before returning not -// found. -func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { - blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) - if err != nil { - return "", err - } - - return lbs.blobStore.readlink(ctx, blobLinkPath) -} - -func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - // The canonical descriptor for a blob is set at the commit phase of upload - return nil -} - -// blobLinkPath provides the path to the blob link, also known as layers. -func blobLinkPath(name string, dgst digest.Digest) (string, error) { - return pathFor(layerLinkPathSpec{name: name, digest: dgst}) -} - -// manifestRevisionLinkPath provides the path to the manifest revision link. -func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) { - return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst}) -} diff --git a/docs/storage/manifestlisthandler.go b/docs/storage/manifestlisthandler.go deleted file mode 100644 index 42027d13..00000000 --- a/docs/storage/manifestlisthandler.go +++ /dev/null @@ -1,96 +0,0 @@ -package storage - -import ( - "fmt" - - "encoding/json" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/manifestlist" -) - -// manifestListHandler is a ManifestHandler that covers schema2 manifest lists. -type manifestListHandler struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context -} - -var _ ManifestHandler = &manifestListHandler{} - -func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") - - var m manifestlist.DeserializedManifestList - if err := json.Unmarshal(content, &m); err != nil { - return nil, err - } - - return &m, nil -} - -func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put") - - m, ok := manifestList.(*manifestlist.DeserializedManifestList) - if !ok { - return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList) - } - - if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { - return "", err - } - - mt, payload, err := m.Payload() - if err != nil { - return "", err - } - - revision, err := ms.blobStore.Put(ctx, mt, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return "", err - } - - // Link the revision into the repository. - if err := ms.blobStore.linkBlob(ctx, revision); err != nil { - return "", err - } - - return revision.Digest, nil -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. As a policy, the registry only tries to -// store valid content, leaving trust policies of that content up to -// consumers. -func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error { - var errs distribution.ErrManifestVerification - - if !skipDependencyVerification { - // This manifest service is different from the blob service - // returned by Blob. It uses a linked blob store to ensure that - // only manifests are accessible. - manifestService, err := ms.repository.Manifests(ctx) - if err != nil { - return err - } - - for _, manifestDescriptor := range mnfst.References() { - exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest) - if err != nil && err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - if err != nil || !exists { - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest}) - } - } - } - if len(errs) != 0 { - return errs - } - - return nil -} diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go deleted file mode 100644 index 9e8065bb..00000000 --- a/docs/storage/manifeststore.go +++ /dev/null @@ -1,141 +0,0 @@ -package storage - -import ( - "fmt" - - "encoding/json" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" -) - -// A ManifestHandler gets and puts manifests of a particular type. -type ManifestHandler interface { - // Unmarshal unmarshals the manifest from a byte slice. - Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) - - // Put creates or updates the given manifest returning the manifest digest. - Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) -} - -// SkipLayerVerification allows a manifest to be Put before its -// layers are on the filesystem -func SkipLayerVerification() distribution.ManifestServiceOption { - return skipLayerOption{} -} - -type skipLayerOption struct{} - -func (o skipLayerOption) Apply(m distribution.ManifestService) error { - if ms, ok := m.(*manifestStore); ok { - ms.skipDependencyVerification = true - return nil - } - return fmt.Errorf("skip layer verification only valid for manifestStore") -} - -type manifestStore struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context - - skipDependencyVerification bool - - schema1Handler ManifestHandler - schema2Handler ManifestHandler - manifestListHandler ManifestHandler -} - -var _ distribution.ManifestService = &manifestStore{} - -func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") - - _, err := ms.blobStore.Stat(ms.ctx, dgst) - if err != nil { - if err == distribution.ErrBlobUnknown { - return false, nil - } - - return false, err - } - - return true, nil -} - -func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") - - // TODO(stevvooe): Need to check descriptor from above to ensure that the - // mediatype is as we expect for the manifest store. - - content, err := ms.blobStore.Get(ctx, dgst) - if err != nil { - if err == distribution.ErrBlobUnknown { - return nil, distribution.ErrManifestUnknownRevision{ - Name: ms.repository.Named().Name(), - Revision: dgst, - } - } - - return nil, err - } - - var versioned manifest.Versioned - if err = json.Unmarshal(content, &versioned); err != nil { - return nil, err - } - - switch versioned.SchemaVersion { - case 1: - return ms.schema1Handler.Unmarshal(ctx, dgst, content) - case 2: - // This can be an image manifest or a manifest list - switch versioned.MediaType { - case schema2.MediaTypeManifest: - return ms.schema2Handler.Unmarshal(ctx, dgst, content) - case manifestlist.MediaTypeManifestList: - return ms.manifestListHandler.Unmarshal(ctx, dgst, content) - default: - return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} - } - } - - return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion) -} - -func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") - - switch manifest.(type) { - case *schema1.SignedManifest: - return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification) - case *schema2.DeserializedManifest: - return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification) - case *manifestlist.DeserializedManifestList: - return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification) - } - - return "", fmt.Errorf("unrecognized manifest type %T", manifest) -} - -// Delete removes the revision of the specified manifest. -func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") - return ms.blobStore.Delete(ctx, dgst) -} - -func (ms *manifestStore) Enumerate(ctx context.Context, ingester func(digest.Digest) error) error { - err := ms.blobStore.Enumerate(ctx, func(dgst digest.Digest) error { - err := ingester(dgst) - if err != nil { - return err - } - return nil - }) - return err -} diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go deleted file mode 100644 index cbd30c04..00000000 --- a/docs/storage/manifeststore_test.go +++ /dev/null @@ -1,391 +0,0 @@ -package storage - -import ( - "bytes" - "io" - "reflect" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/testutil" - "github.com/docker/libtrust" -) - -type manifestStoreTestEnv struct { - ctx context.Context - driver driver.StorageDriver - registry distribution.Namespace - repository distribution.Repository - name reference.Named - tag string -} - -func newManifestStoreTestEnv(t *testing.T, name reference.Named, tag string, options ...RegistryOption) *manifestStoreTestEnv { - ctx := context.Background() - driver := inmemory.New() - registry, err := NewRegistry(ctx, driver, options...) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - - repo, err := registry.Repository(ctx, name) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - - return &manifestStoreTestEnv{ - ctx: ctx, - driver: driver, - registry: registry, - repository: repo, - name: name, - tag: tag, - } -} - -func TestManifestStorage(t *testing.T) { - k, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - testManifestStorage(t, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect, Schema1SigningKey(k)) -} - -func testManifestStorage(t *testing.T, options ...RegistryOption) { - repoName, _ := reference.ParseNamed("foo/bar") - env := newManifestStoreTestEnv(t, repoName, "thetag", options...) - ctx := context.Background() - ms, err := env.repository.Manifests(ctx) - if err != nil { - t.Fatal(err) - } - - m := schema1.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: env.name.Name(), - Tag: env.tag, - } - - // Build up some test layers and add them to the manifest, saving the - // readseekers for upload later. - testLayers := map[digest.Digest]io.ReadSeeker{} - for i := 0; i < 2; i++ { - rs, ds, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("unexpected error generating test layer file") - } - dgst := digest.Digest(ds) - - testLayers[digest.Digest(dgst)] = rs - m.FSLayers = append(m.FSLayers, schema1.FSLayer{ - BlobSum: dgst, - }) - m.History = append(m.History, schema1.History{ - V1Compatibility: "", - }) - - } - - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key: %v", err) - } - - sm, merr := schema1.Sign(&m, pk) - if merr != nil { - t.Fatalf("error signing manifest: %v", err) - } - - _, err = ms.Put(ctx, sm) - if err == nil { - t.Fatalf("expected errors putting manifest with full verification") - } - - switch err := err.(type) { - case distribution.ErrManifestVerification: - if len(err) != 2 { - t.Fatalf("expected 2 verification errors: %#v", err) - } - - for _, err := range err { - if _, ok := err.(distribution.ErrManifestBlobUnknown); !ok { - t.Fatalf("unexpected error type: %v", err) - } - } - default: - t.Fatalf("unexpected error verifying manifest: %v", err) - } - - // Now, upload the layers that were missing! - for dgst, rs := range testLayers { - wr, err := env.repository.Blobs(env.ctx).Create(env.ctx) - if err != nil { - t.Fatalf("unexpected error creating test upload: %v", err) - } - - if _, err := io.Copy(wr, rs); err != nil { - t.Fatalf("unexpected error copying to upload: %v", err) - } - - if _, err := wr.Commit(env.ctx, distribution.Descriptor{Digest: dgst}); err != nil { - t.Fatalf("unexpected error finishing upload: %v", err) - } - } - - var manifestDigest digest.Digest - if manifestDigest, err = ms.Put(ctx, sm); err != nil { - t.Fatalf("unexpected error putting manifest: %v", err) - } - - exists, err := ms.Exists(ctx, manifestDigest) - if err != nil { - t.Fatalf("unexpected error checking manifest existence: %#v", err) - } - - if !exists { - t.Fatalf("manifest should exist") - } - - fromStore, err := ms.Get(ctx, manifestDigest) - if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) - } - - fetchedManifest, ok := fromStore.(*schema1.SignedManifest) - if !ok { - t.Fatalf("unexpected manifest type from signedstore") - } - - if !bytes.Equal(fetchedManifest.Canonical, sm.Canonical) { - t.Fatalf("fetched payload does not match original payload: %q != %q", fetchedManifest.Canonical, sm.Canonical) - } - - _, pl, err := fetchedManifest.Payload() - if err != nil { - t.Fatalf("error getting payload %#v", err) - } - - fetchedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") - if err != nil { - t.Fatalf("unexpected error parsing jws: %v", err) - } - - payload, err := fetchedJWS.Payload() - if err != nil { - t.Fatalf("unexpected error extracting payload: %v", err) - } - - // Now that we have a payload, take a moment to check that the manifest is - // return by the payload digest. - - dgst := digest.FromBytes(payload) - exists, err = ms.Exists(ctx, dgst) - if err != nil { - t.Fatalf("error checking manifest existence by digest: %v", err) - } - - if !exists { - t.Fatalf("manifest %s should exist", dgst) - } - - fetchedByDigest, err := ms.Get(ctx, dgst) - if err != nil { - t.Fatalf("unexpected error fetching manifest by digest: %v", err) - } - - byDigestManifest, ok := fetchedByDigest.(*schema1.SignedManifest) - if !ok { - t.Fatalf("unexpected manifest type from signedstore") - } - - if !bytes.Equal(byDigestManifest.Canonical, fetchedManifest.Canonical) { - t.Fatalf("fetched manifest not equal: %q != %q", byDigestManifest.Canonical, fetchedManifest.Canonical) - } - - sigs, err := fetchedJWS.Signatures() - if err != nil { - t.Fatalf("unable to extract signatures: %v", err) - } - - if len(sigs) != 1 { - t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1) - } - - // Now, push the same manifest with a different key - pk2, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key: %v", err) - } - - sm2, err := schema1.Sign(&m, pk2) - if err != nil { - t.Fatalf("unexpected error signing manifest: %v", err) - } - _, pl, err = sm2.Payload() - if err != nil { - t.Fatalf("error getting payload %#v", err) - } - - jws2, err := libtrust.ParsePrettySignature(pl, "signatures") - if err != nil { - t.Fatalf("error parsing signature: %v", err) - } - - sigs2, err := jws2.Signatures() - if err != nil { - t.Fatalf("unable to extract signatures: %v", err) - } - - if len(sigs2) != 1 { - t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1) - } - - if manifestDigest, err = ms.Put(ctx, sm2); err != nil { - t.Fatalf("unexpected error putting manifest: %v", err) - } - - fromStore, err = ms.Get(ctx, manifestDigest) - if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) - } - - fetched, ok := fromStore.(*schema1.SignedManifest) - if !ok { - t.Fatalf("unexpected type from signed manifeststore : %T", fetched) - } - - if _, err := schema1.Verify(fetched); err != nil { - t.Fatalf("unexpected error verifying manifest: %v", err) - } - - _, pl, err = fetched.Payload() - if err != nil { - t.Fatalf("error getting payload %#v", err) - } - - receivedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") - if err != nil { - t.Fatalf("unexpected error parsing jws: %v", err) - } - - receivedPayload, err := receivedJWS.Payload() - if err != nil { - t.Fatalf("unexpected error extracting received payload: %v", err) - } - - if !bytes.Equal(receivedPayload, payload) { - t.Fatalf("payloads are not equal") - } - - // Test deleting manifests - err = ms.Delete(ctx, dgst) - if err != nil { - t.Fatalf("unexpected an error deleting manifest by digest: %v", err) - } - - exists, err = ms.Exists(ctx, dgst) - if err != nil { - t.Fatalf("Error querying manifest existence") - } - if exists { - t.Errorf("Deleted manifest should not exist") - } - - deletedManifest, err := ms.Get(ctx, dgst) - if err == nil { - t.Errorf("Unexpected success getting deleted manifest") - } - switch err.(type) { - case distribution.ErrManifestUnknownRevision: - break - default: - t.Errorf("Unexpected error getting deleted manifest: %s", reflect.ValueOf(err).Type()) - } - - if deletedManifest != nil { - t.Errorf("Deleted manifest get returned non-nil") - } - - // Re-upload should restore manifest to a good state - _, err = ms.Put(ctx, sm) - if err != nil { - t.Errorf("Error re-uploading deleted manifest") - } - - exists, err = ms.Exists(ctx, dgst) - if err != nil { - t.Fatalf("Error querying manifest existence") - } - if !exists { - t.Errorf("Restored manifest should exist") - } - - deletedManifest, err = ms.Get(ctx, dgst) - if err != nil { - t.Errorf("Unexpected error getting manifest") - } - if deletedManifest == nil { - t.Errorf("Deleted manifest get returned non-nil") - } - - r, err := NewRegistry(ctx, env.driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) - if err != nil { - t.Fatalf("error creating registry: %v", err) - } - repo, err := r.Repository(ctx, env.name) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - ms, err = repo.Manifests(ctx) - if err != nil { - t.Fatal(err) - } - err = ms.Delete(ctx, dgst) - if err == nil { - t.Errorf("Unexpected success deleting while disabled") - } -} - -// TestLinkPathFuncs ensures that the link path functions behavior are locked -// down and implemented as expected. -func TestLinkPathFuncs(t *testing.T) { - for _, testcase := range []struct { - repo string - digest digest.Digest - linkPathFn linkPathFunc - expected string - }{ - { - repo: "foo/bar", - digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - linkPathFn: blobLinkPath, - expected: "/docker/registry/v2/repositories/foo/bar/_layers/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", - }, - { - repo: "foo/bar", - digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - linkPathFn: manifestRevisionLinkPath, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", - }, - } { - p, err := testcase.linkPathFn(testcase.repo, testcase.digest) - if err != nil { - t.Fatalf("unexpected error calling linkPathFn(pm, %q, %q): %v", testcase.repo, testcase.digest, err) - } - - if p != testcase.expected { - t.Fatalf("incorrect path returned: %q != %q", p, testcase.expected) - } - } - -} diff --git a/docs/storage/paths.go b/docs/storage/paths.go deleted file mode 100644 index 1b142b88..00000000 --- a/docs/storage/paths.go +++ /dev/null @@ -1,490 +0,0 @@ -package storage - -import ( - "fmt" - "path" - "strings" - - "github.com/docker/distribution/digest" -) - -const ( - storagePathVersion = "v2" // fixed storage layout version - storagePathRoot = "/docker/registry/" // all driver paths have a prefix - - // TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though - // storage path root would configurable for all drivers through this - // package. In reality, we've found it simpler to do this on a per driver - // basis. -) - -// pathFor maps paths based on "object names" and their ids. The "object -// names" mapped by are internal to the storage system. -// -// The path layout in the storage backend is roughly as follows: -// -// /v2 -// -> repositories/ -// ->/ -// -> _manifests/ -// revisions -// -> -// -> link -// tags/ -// -> current/link -// -> index -// -> //link -// -> _layers/ -// -// -> _uploads/ -// data -// startedat -// hashstates// -// -> blob/ -// -// -// The storage backend layout is broken up into a content-addressable blob -// store and repositories. The content-addressable blob store holds most data -// throughout the backend, keyed by algorithm and digests of the underlying -// content. Access to the blob store is controlled through links from the -// repository to blobstore. -// -// A repository is made up of layers, manifests and tags. The layers component -// is just a directory of layers which are "linked" into a repository. A layer -// can only be accessed through a qualified repository name if it is linked in -// the repository. Uploads of layers are managed in the uploads directory, -// which is key by upload id. When all data for an upload is received, the -// data is moved into the blob store and the upload directory is deleted. -// Abandoned uploads can be garbage collected by reading the startedat file -// and removing uploads that have been active for longer than a certain time. -// -// The third component of the repository directory is the manifests store, -// which is made up of a revision store and tag store. Manifests are stored in -// the blob store and linked into the revision store. -// While the registry can save all revisions of a manifest, no relationship is -// implied as to the ordering of changes to a manifest. The tag store provides -// support for name, tag lookups of manifests, using "current/link" under a -// named tag directory. An index is maintained to support deletions of all -// revisions of a given manifest tag. -// -// We cover the path formats implemented by this path mapper below. -// -// Manifests: -// -// manifestRevisionsPathSpec: /v2/repositories//_manifests/revisions/ -// manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// -// manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link -// -// Tags: -// -// manifestTagsPathSpec: /v2/repositories//_manifests/tags/ -// manifestTagPathSpec: /v2/repositories//_manifests/tags// -// manifestTagCurrentPathSpec: /v2/repositories//_manifests/tags//current/link -// manifestTagIndexPathSpec: /v2/repositories//_manifests/tags//index/ -// manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index/// -// manifestTagIndexEntryLinkPathSpec: /v2/repositories//_manifests/tags//index///link -// -// Blobs: -// -// layerLinkPathSpec: /v2/repositories//_layers///link -// -// Uploads: -// -// uploadDataPathSpec: /v2/repositories//_uploads//data -// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat -// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// -// -// Blob Store: -// -// blobsPathSpec: /v2/blobs/ -// blobPathSpec: /v2/blobs/// -// blobDataPathSpec: /v2/blobs////data -// blobMediaTypePathSpec: /v2/blobs////data -// -// For more information on the semantic meaning of each path and their -// contents, please see the path spec documentation. -func pathFor(spec pathSpec) (string, error) { - - // Switch on the path object type and return the appropriate path. At - // first glance, one may wonder why we don't use an interface to - // accomplish this. By keep the formatting separate from the pathSpec, we - // keep separate the path generation componentized. These specs could be - // passed to a completely different mapper implementation and generate a - // different set of paths. - // - // For example, imagine migrating from one backend to the other: one could - // build a filesystem walker that converts a string path in one version, - // to an intermediate path object, than can be consumed and mapped by the - // other version. - - rootPrefix := []string{storagePathRoot, storagePathVersion} - repoPrefix := append(rootPrefix, "repositories") - - switch v := spec.(type) { - - case manifestRevisionsPathSpec: - return path.Join(append(repoPrefix, v.name, "_manifests", "revisions")...), nil - - case manifestRevisionPathSpec: - components, err := digestPathComponents(v.revision, false) - if err != nil { - return "", err - } - - return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil - case manifestRevisionLinkPathSpec: - root, err := pathFor(manifestRevisionPathSpec{ - name: v.name, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "link"), nil - case manifestTagsPathSpec: - return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil - case manifestTagPathSpec: - root, err := pathFor(manifestTagsPathSpec{ - name: v.name, - }) - - if err != nil { - return "", err - } - - return path.Join(root, v.tag), nil - case manifestTagCurrentPathSpec: - root, err := pathFor(manifestTagPathSpec{ - name: v.name, - tag: v.tag, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "current", "link"), nil - case manifestTagIndexPathSpec: - root, err := pathFor(manifestTagPathSpec{ - name: v.name, - tag: v.tag, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "index"), nil - case manifestTagIndexEntryLinkPathSpec: - root, err := pathFor(manifestTagIndexEntryPathSpec{ - name: v.name, - tag: v.tag, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "link"), nil - case manifestTagIndexEntryPathSpec: - root, err := pathFor(manifestTagIndexPathSpec{ - name: v.name, - tag: v.tag, - }) - - if err != nil { - return "", err - } - - components, err := digestPathComponents(v.revision, false) - if err != nil { - return "", err - } - - return path.Join(root, path.Join(components...)), nil - case layerLinkPathSpec: - components, err := digestPathComponents(v.digest, false) - if err != nil { - return "", err - } - - // TODO(stevvooe): Right now, all blobs are linked under "_layers". If - // we have future migrations, we may want to rename this to "_blobs". - // A migration strategy would simply leave existing items in place and - // write the new paths, commit a file then delete the old files. - - blobLinkPathComponents := append(repoPrefix, v.name, "_layers") - - return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil - case blobsPathSpec: - blobsPathPrefix := append(rootPrefix, "blobs") - return path.Join(blobsPathPrefix...), nil - case blobPathSpec: - components, err := digestPathComponents(v.digest, true) - if err != nil { - return "", err - } - - blobPathPrefix := append(rootPrefix, "blobs") - return path.Join(append(blobPathPrefix, components...)...), nil - case blobDataPathSpec: - components, err := digestPathComponents(v.digest, true) - if err != nil { - return "", err - } - - components = append(components, "data") - blobPathPrefix := append(rootPrefix, "blobs") - return path.Join(append(blobPathPrefix, components...)...), nil - - case uploadDataPathSpec: - return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil - case uploadStartedAtPathSpec: - return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil - case uploadHashStatePathSpec: - offset := fmt.Sprintf("%d", v.offset) - if v.list { - offset = "" // Limit to the prefix for listing offsets. - } - return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil - case repositoriesRootPathSpec: - return path.Join(repoPrefix...), nil - default: - // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). - return "", fmt.Errorf("unknown path spec: %#v", v) - } -} - -// pathSpec is a type to mark structs as path specs. There is no -// implementation because we'd like to keep the specs and the mappers -// decoupled. -type pathSpec interface { - pathSpec() -} - -// manifestRevisionsPathSpec describes the directory path for -// a manifest revision. -type manifestRevisionsPathSpec struct { - name string -} - -func (manifestRevisionsPathSpec) pathSpec() {} - -// manifestRevisionPathSpec describes the components of the directory path for -// a manifest revision. -type manifestRevisionPathSpec struct { - name string - revision digest.Digest -} - -func (manifestRevisionPathSpec) pathSpec() {} - -// manifestRevisionLinkPathSpec describes the path components required to look -// up the data link for a revision of a manifest. If this file is not present, -// the manifest blob is not available in the given repo. The contents of this -// file should just be the digest. -type manifestRevisionLinkPathSpec struct { - name string - revision digest.Digest -} - -func (manifestRevisionLinkPathSpec) pathSpec() {} - -// manifestTagsPathSpec describes the path elements required to point to the -// manifest tags directory. -type manifestTagsPathSpec struct { - name string -} - -func (manifestTagsPathSpec) pathSpec() {} - -// manifestTagPathSpec describes the path elements required to point to the -// manifest tag links files under a repository. These contain a blob id that -// can be used to look up the data and signatures. -type manifestTagPathSpec struct { - name string - tag string -} - -func (manifestTagPathSpec) pathSpec() {} - -// manifestTagCurrentPathSpec describes the link to the current revision for a -// given tag. -type manifestTagCurrentPathSpec struct { - name string - tag string -} - -func (manifestTagCurrentPathSpec) pathSpec() {} - -// manifestTagCurrentPathSpec describes the link to the index of revisions -// with the given tag. -type manifestTagIndexPathSpec struct { - name string - tag string -} - -func (manifestTagIndexPathSpec) pathSpec() {} - -// manifestTagIndexEntryPathSpec contains the entries of the index by revision. -type manifestTagIndexEntryPathSpec struct { - name string - tag string - revision digest.Digest -} - -func (manifestTagIndexEntryPathSpec) pathSpec() {} - -// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a -// manifest with given tag within the index. -type manifestTagIndexEntryLinkPathSpec struct { - name string - tag string - revision digest.Digest -} - -func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} - -// blobLinkPathSpec specifies a path for a blob link, which is a file with a -// blob id. The blob link will contain a content addressable blob id reference -// into the blob store. The format of the contents is as follows: -// -// : -// -// The following example of the file contents is more illustrative: -// -// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 -// -// This indicates that there is a blob with the id/digest, calculated via -// sha256 that can be fetched from the blob store. -type layerLinkPathSpec struct { - name string - digest digest.Digest -} - -func (layerLinkPathSpec) pathSpec() {} - -// blobAlgorithmReplacer does some very simple path sanitization for user -// input. Paths should be "safe" before getting this far due to strict digest -// requirements but we can add further path conversion here, if needed. -var blobAlgorithmReplacer = strings.NewReplacer( - "+", "/", - ".", "/", - ";", "/", -) - -// blobsPathSpec contains the path for the blobs directory -type blobsPathSpec struct{} - -func (blobsPathSpec) pathSpec() {} - -// blobPathSpec contains the path for the registry global blob store. -type blobPathSpec struct { - digest digest.Digest -} - -func (blobPathSpec) pathSpec() {} - -// blobDataPathSpec contains the path for the registry global blob store. For -// now, this contains layer data, exclusively. -type blobDataPathSpec struct { - digest digest.Digest -} - -func (blobDataPathSpec) pathSpec() {} - -// uploadDataPathSpec defines the path parameters of the data file for -// uploads. -type uploadDataPathSpec struct { - name string - id string -} - -func (uploadDataPathSpec) pathSpec() {} - -// uploadDataPathSpec defines the path parameters for the file that stores the -// start time of an uploads. If it is missing, the upload is considered -// unknown. Admittedly, the presence of this file is an ugly hack to make sure -// we have a way to cleanup old or stalled uploads that doesn't rely on driver -// FileInfo behavior. If we come up with a more clever way to do this, we -// should remove this file immediately and rely on the startetAt field from -// the client to enforce time out policies. -type uploadStartedAtPathSpec struct { - name string - id string -} - -func (uploadStartedAtPathSpec) pathSpec() {} - -// uploadHashStatePathSpec defines the path parameters for the file that stores -// the hash function state of an upload at a specific byte offset. If `list` is -// set, then the path mapper will generate a list prefix for all hash state -// offsets for the upload identified by the name, id, and alg. -type uploadHashStatePathSpec struct { - name string - id string - alg digest.Algorithm - offset int64 - list bool -} - -func (uploadHashStatePathSpec) pathSpec() {} - -// repositoriesRootPathSpec returns the root of repositories -type repositoriesRootPathSpec struct { -} - -func (repositoriesRootPathSpec) pathSpec() {} - -// digestPathComponents provides a consistent path breakdown for a given -// digest. For a generic digest, it will be as follows: -// -// / -// -// If multilevel is true, the first two bytes of the digest will separate -// groups of digest folder. It will be as follows: -// -// // -// -func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { - if err := dgst.Validate(); err != nil { - return nil, err - } - - algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) - hex := dgst.Hex() - prefix := []string{algorithm} - - var suffix []string - - if multilevel { - suffix = append(suffix, hex[:2]) - } - - suffix = append(suffix, hex) - - return append(prefix, suffix...), nil -} - -// Reconstructs a digest from a path -func digestFromPath(digestPath string) (digest.Digest, error) { - - digestPath = strings.TrimSuffix(digestPath, "/data") - dir, hex := path.Split(digestPath) - dir = path.Dir(dir) - dir, next := path.Split(dir) - - // next is either the algorithm OR the first two characters in the hex string - var algo string - if next == hex[:2] { - algo = path.Base(dir) - } else { - algo = next - } - - dgst := digest.NewDigestFromHex(algo, hex) - return dgst, dgst.Validate() -} diff --git a/docs/storage/paths_test.go b/docs/storage/paths_test.go deleted file mode 100644 index f739552a..00000000 --- a/docs/storage/paths_test.go +++ /dev/null @@ -1,135 +0,0 @@ -package storage - -import ( - "testing" - - "github.com/docker/distribution/digest" -) - -func TestPathMapper(t *testing.T) { - for _, testcase := range []struct { - spec pathSpec - expected string - err error - }{ - { - spec: manifestRevisionPathSpec{ - name: "foo/bar", - revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - { - spec: manifestRevisionLinkPathSpec{ - name: "foo/bar", - revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", - }, - { - spec: manifestTagsPathSpec{ - name: "foo/bar", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags", - }, - { - spec: manifestTagPathSpec{ - name: "foo/bar", - tag: "thetag", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag", - }, - { - spec: manifestTagCurrentPathSpec{ - name: "foo/bar", - tag: "thetag", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/current/link", - }, - { - spec: manifestTagIndexPathSpec{ - name: "foo/bar", - tag: "thetag", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index", - }, - { - spec: manifestTagIndexEntryPathSpec{ - name: "foo/bar", - tag: "thetag", - revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - { - spec: manifestTagIndexEntryLinkPathSpec{ - name: "foo/bar", - tag: "thetag", - revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", - }, - - { - spec: uploadDataPathSpec{ - name: "foo/bar", - id: "asdf-asdf-asdf-adsf", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data", - }, - { - spec: uploadStartedAtPathSpec{ - name: "foo/bar", - id: "asdf-asdf-asdf-adsf", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat", - }, - } { - p, err := pathFor(testcase.spec) - if err != nil { - t.Fatalf("unexpected generating path (%T): %v", testcase.spec, err) - } - - if p != testcase.expected { - t.Fatalf("unexpected path generated (%T): %q != %q", testcase.spec, p, testcase.expected) - } - } - - // Add a few test cases to ensure we cover some errors - - // Specify a path that requires a revision and get a digest validation error. - badpath, err := pathFor(manifestRevisionPathSpec{ - name: "foo/bar", - }) - - if err == nil { - t.Fatalf("expected an error when mapping an invalid revision: %s", badpath) - } - -} - -func TestDigestFromPath(t *testing.T) { - for _, testcase := range []struct { - path string - expected digest.Digest - multilevel bool - err error - }{ - { - path: "/docker/registry/v2/blobs/sha256/99/9943fffae777400c0344c58869c4c2619c329ca3ad4df540feda74d291dd7c86/data", - multilevel: true, - expected: "sha256:9943fffae777400c0344c58869c4c2619c329ca3ad4df540feda74d291dd7c86", - err: nil, - }, - } { - result, err := digestFromPath(testcase.path) - if err != testcase.err { - t.Fatalf("Unexpected error value %v when we wanted %v", err, testcase.err) - } - - if result != testcase.expected { - t.Fatalf("Unexpected result value %v when we wanted %v", result, testcase.expected) - - } - } -} diff --git a/docs/storage/purgeuploads.go b/docs/storage/purgeuploads.go deleted file mode 100644 index 7576b189..00000000 --- a/docs/storage/purgeuploads.go +++ /dev/null @@ -1,139 +0,0 @@ -package storage - -import ( - "path" - "strings" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution/context" - storageDriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/uuid" -) - -// uploadData stored the location of temporary files created during a layer upload -// along with the date the upload was started -type uploadData struct { - containingDir string - startedAt time.Time -} - -func newUploadData() uploadData { - return uploadData{ - containingDir: "", - // default to far in future to protect against missing startedat - startedAt: time.Now().Add(time.Duration(10000 * time.Hour)), - } -} - -// PurgeUploads deletes files from the upload directory -// created before olderThan. The list of files deleted and errors -// encountered are returned -func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { - log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) - uploadData, errors := getOutstandingUploads(ctx, driver) - var deleted []string - for _, uploadData := range uploadData { - if uploadData.startedAt.Before(olderThan) { - var err error - log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", - uploadData.containingDir, uploadData.startedAt, olderThan) - if actuallyDelete { - err = driver.Delete(ctx, uploadData.containingDir) - } - if err == nil { - deleted = append(deleted, uploadData.containingDir) - } else { - errors = append(errors, err) - } - } - } - - log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) - return deleted, errors -} - -// getOutstandingUploads walks the upload directory, collecting files -// which could be eligible for deletion. The only reliable way to -// classify the age of a file is with the date stored in the startedAt -// file, so gather files by UUID with a date from startedAt. -func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) { - var errors []error - uploads := make(map[string]uploadData, 0) - - inUploadDir := false - root, err := pathFor(repositoriesRootPathSpec{}) - if err != nil { - return uploads, append(errors, err) - } - - err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error { - filePath := fileInfo.Path() - _, file := path.Split(filePath) - if file[0] == '_' { - // Reserved directory - inUploadDir = (file == "_uploads") - - if fileInfo.IsDir() && !inUploadDir { - return ErrSkipDir - } - - } - - uuid, isContainingDir := uUIDFromPath(filePath) - if uuid == "" { - // Cannot reliably delete - return nil - } - ud, ok := uploads[uuid] - if !ok { - ud = newUploadData() - } - if isContainingDir { - ud.containingDir = filePath - } - if file == "startedat" { - if t, err := readStartedAtFile(driver, filePath); err == nil { - ud.startedAt = t - } else { - errors = pushError(errors, filePath, err) - } - - } - - uploads[uuid] = ud - return nil - }) - - if err != nil { - errors = pushError(errors, root, err) - } - return uploads, errors -} - -// uUIDFromPath extracts the upload UUID from a given path -// If the UUID is the last path component, this is the containing -// directory for all upload files -func uUIDFromPath(path string) (string, bool) { - components := strings.Split(path, "/") - for i := len(components) - 1; i >= 0; i-- { - if u, err := uuid.Parse(components[i]); err == nil { - return u.String(), i == len(components)-1 - } - } - return "", false -} - -// readStartedAtFile reads the date from an upload's startedAtFile -func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { - // todo:(richardscothern) - pass in a context - startedAtBytes, err := driver.GetContent(context.Background(), path) - if err != nil { - return time.Now(), err - } - startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) - if err != nil { - return time.Now(), err - } - return startedAt, nil -} diff --git a/docs/storage/purgeuploads_test.go b/docs/storage/purgeuploads_test.go deleted file mode 100644 index 3b70f723..00000000 --- a/docs/storage/purgeuploads_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package storage - -import ( - "path" - "strings" - "testing" - "time" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/uuid" -) - -func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) (driver.StorageDriver, context.Context) { - d := inmemory.New() - ctx := context.Background() - for i := 0; i < numUploads; i++ { - addUploads(ctx, t, d, uuid.Generate().String(), repoName, startedAt) - } - return d, ctx -} - -func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { - dataPath, err := pathFor(uploadDataPathSpec{name: repo, id: uploadID}) - if err != nil { - t.Fatalf("Unable to resolve path") - } - if err := d.PutContent(ctx, dataPath, []byte("")); err != nil { - t.Fatalf("Unable to write data file") - } - - startedAtPath, err := pathFor(uploadStartedAtPathSpec{name: repo, id: uploadID}) - if err != nil { - t.Fatalf("Unable to resolve path") - } - - if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { - t.Fatalf("Unable to write startedAt file") - } - -} - -func TestPurgeGather(t *testing.T) { - uploadCount := 5 - fs, ctx := testUploadFS(t, uploadCount, "test-repo", time.Now()) - uploadData, errs := getOutstandingUploads(ctx, fs) - if len(errs) != 0 { - t.Errorf("Unexepected errors: %q", errs) - } - if len(uploadData) != uploadCount { - t.Errorf("Unexpected upload file count: %d != %d", uploadCount, len(uploadData)) - } -} - -func TestPurgeNone(t *testing.T) { - fs, ctx := testUploadFS(t, 10, "test-repo", time.Now()) - oneHourAgo := time.Now().Add(-1 * time.Hour) - deleted, errs := PurgeUploads(ctx, fs, oneHourAgo, true) - if len(errs) != 0 { - t.Error("Unexpected errors", errs) - } - if len(deleted) != 0 { - t.Errorf("Unexpectedly deleted files for time: %s", oneHourAgo) - } -} - -func TestPurgeAll(t *testing.T) { - uploadCount := 10 - oneHourAgo := time.Now().Add(-1 * time.Hour) - fs, ctx := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) - - // Ensure > 1 repos are purged - addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo2", oneHourAgo) - uploadCount++ - - deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) - if len(errs) != 0 { - t.Error("Unexpected errors:", errs) - } - fileCount := uploadCount - if len(deleted) != fileCount { - t.Errorf("Unexpectedly deleted file count %d != %d", - len(deleted), fileCount) - } -} - -func TestPurgeSome(t *testing.T) { - oldUploadCount := 5 - oneHourAgo := time.Now().Add(-1 * time.Hour) - fs, ctx := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo) - - newUploadCount := 4 - - for i := 0; i < newUploadCount; i++ { - addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo", time.Now().Add(1*time.Hour)) - } - - deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) - if len(errs) != 0 { - t.Error("Unexpected errors:", errs) - } - if len(deleted) != oldUploadCount { - t.Errorf("Unexpectedly deleted file count %d != %d", - len(deleted), oldUploadCount) - } -} - -func TestPurgeOnlyUploads(t *testing.T) { - oldUploadCount := 5 - oneHourAgo := time.Now().Add(-1 * time.Hour) - fs, ctx := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) - - // Create a directory tree outside _uploads and ensure - // these files aren't deleted. - dataPath, err := pathFor(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()}) - if err != nil { - t.Fatalf(err.Error()) - } - nonUploadPath := strings.Replace(dataPath, "_upload", "_important", -1) - if strings.Index(nonUploadPath, "_upload") != -1 { - t.Fatalf("Non-upload path not created correctly") - } - - nonUploadFile := path.Join(nonUploadPath, "file") - if err = fs.PutContent(ctx, nonUploadFile, []byte("")); err != nil { - t.Fatalf("Unable to write data file") - } - - deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) - if len(errs) != 0 { - t.Error("Unexpected errors", errs) - } - for _, file := range deleted { - if strings.Index(file, "_upload") == -1 { - t.Errorf("Non-upload file deleted") - } - } -} - -func TestPurgeMissingStartedAt(t *testing.T) { - oneHourAgo := time.Now().Add(-1 * time.Hour) - fs, ctx := testUploadFS(t, 1, "test-repo", oneHourAgo) - - err := Walk(ctx, fs, "/", func(fileInfo driver.FileInfo) error { - filePath := fileInfo.Path() - _, file := path.Split(filePath) - - if file == "startedat" { - if err := fs.Delete(ctx, filePath); err != nil { - t.Fatalf("Unable to delete startedat file: %s", filePath) - } - } - return nil - }) - if err != nil { - t.Fatalf("Unexpected error during Walk: %s ", err.Error()) - } - deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) - if len(errs) > 0 { - t.Errorf("Unexpected errors") - } - if len(deleted) > 0 { - t.Errorf("Files unexpectedly deleted: %s", deleted) - } -} diff --git a/docs/storage/registry.go b/docs/storage/registry.go deleted file mode 100644 index 94034b26..00000000 --- a/docs/storage/registry.go +++ /dev/null @@ -1,279 +0,0 @@ -package storage - -import ( - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/libtrust" -) - -// registry is the top-level implementation of Registry for use in the storage -// package. All instances should descend from this object. -type registry struct { - blobStore *blobStore - blobServer *blobServer - statter *blobStatter // global statter service. - blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider - deleteEnabled bool - resumableDigestEnabled bool - schema1SigningKey libtrust.PrivateKey - blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory -} - -// RegistryOption is the type used for functional options for NewRegistry. -type RegistryOption func(*registry) error - -// EnableRedirect is a functional option for NewRegistry. It causes the backend -// blob server to attempt using (StorageDriver).URLFor to serve all blobs. -func EnableRedirect(registry *registry) error { - registry.blobServer.redirect = true - return nil -} - -// EnableDelete is a functional option for NewRegistry. It enables deletion on -// the registry. -func EnableDelete(registry *registry) error { - registry.deleteEnabled = true - return nil -} - -// DisableDigestResumption is a functional option for NewRegistry. It should be -// used if the registry is acting as a caching proxy. -func DisableDigestResumption(registry *registry) error { - registry.resumableDigestEnabled = false - return nil -} - -// Schema1SigningKey returns a functional option for NewRegistry. It sets the -// key for signing all schema1 manifests. -func Schema1SigningKey(key libtrust.PrivateKey) RegistryOption { - return func(registry *registry) error { - registry.schema1SigningKey = key - return nil - } -} - -// BlobDescriptorServiceFactory returns a functional option for NewRegistry. It sets the -// factory to create BlobDescriptorServiceFactory middleware. -func BlobDescriptorServiceFactory(factory distribution.BlobDescriptorServiceFactory) RegistryOption { - return func(registry *registry) error { - registry.blobDescriptorServiceFactory = factory - return nil - } -} - -// BlobDescriptorCacheProvider returns a functional option for -// NewRegistry. It creates a cached blob statter for use by the -// registry. -func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption { - // TODO(aaronl): The duplication of statter across several objects is - // ugly, and prevents us from using interface types in the registry - // struct. Ideally, blobStore and blobServer should be lazily - // initialized, and use the current value of - // blobDescriptorCacheProvider. - return func(registry *registry) error { - if blobDescriptorCacheProvider != nil { - statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter) - registry.blobStore.statter = statter - registry.blobServer.statter = statter - registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider - } - return nil - } -} - -// NewRegistry creates a new registry instance from the provided driver. The -// resulting registry may be shared by multiple goroutines but is cheap to -// allocate. If the Redirect option is specified, the backend blob server will -// attempt to use (StorageDriver).URLFor to serve all blobs. -func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) { - // create global statter - statter := &blobStatter{ - driver: driver, - } - - bs := &blobStore{ - driver: driver, - statter: statter, - } - - registry := ®istry{ - blobStore: bs, - blobServer: &blobServer{ - driver: driver, - statter: statter, - pathFn: bs.path, - }, - statter: statter, - resumableDigestEnabled: true, - } - - for _, option := range options { - if err := option(registry); err != nil { - return nil, err - } - } - - return registry, nil -} - -// Scope returns the namespace scope for a registry. The registry -// will only serve repositories contained within this scope. -func (reg *registry) Scope() distribution.Scope { - return distribution.GlobalScope -} - -// Repository returns an instance of the repository tied to the registry. -// Instances should not be shared between goroutines but are cheap to -// allocate. In general, they should be request scoped. -func (reg *registry) Repository(ctx context.Context, canonicalName reference.Named) (distribution.Repository, error) { - var descriptorCache distribution.BlobDescriptorService - if reg.blobDescriptorCacheProvider != nil { - var err error - descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName.Name()) - if err != nil { - return nil, err - } - } - - return &repository{ - ctx: ctx, - registry: reg, - name: canonicalName, - descriptorCache: descriptorCache, - }, nil -} - -func (reg *registry) Blobs() distribution.BlobEnumerator { - return reg.blobStore -} - -func (reg *registry) BlobStatter() distribution.BlobStatter { - return reg.statter -} - -// repository provides name-scoped access to various services. -type repository struct { - *registry - ctx context.Context - name reference.Named - descriptorCache distribution.BlobDescriptorService -} - -// Name returns the name of the repository. -func (repo *repository) Named() reference.Named { - return repo.name -} - -func (repo *repository) Tags(ctx context.Context) distribution.TagService { - tags := &tagStore{ - repository: repo, - blobStore: repo.registry.blobStore, - } - - return tags -} - -// Manifests returns an instance of ManifestService. Instantiation is cheap and -// may be context sensitive in the future. The instance should be used similar -// to a request local. -func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - manifestLinkPathFns := []linkPathFunc{ - // NOTE(stevvooe): Need to search through multiple locations since - // 2.1.0 unintentionally linked into _layers. - manifestRevisionLinkPath, - blobLinkPath, - } - - manifestDirectoryPathSpec := manifestRevisionsPathSpec{name: repo.name.Name()} - - var statter distribution.BlobDescriptorService = &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPathFns: manifestLinkPathFns, - } - - if repo.registry.blobDescriptorServiceFactory != nil { - statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) - } - - blobStore := &linkedBlobStore{ - ctx: ctx, - blobStore: repo.blobStore, - repository: repo, - deleteEnabled: repo.registry.deleteEnabled, - blobAccessController: statter, - - // TODO(stevvooe): linkPath limits this blob store to only - // manifests. This instance cannot be used for blob checks. - linkPathFns: manifestLinkPathFns, - linkDirectoryPathSpec: manifestDirectoryPathSpec, - } - - ms := &manifestStore{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - schema1Handler: &signedManifestHandler{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - }, - schema2Handler: &schema2ManifestHandler{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - }, - manifestListHandler: &manifestListHandler{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - }, - } - - // Apply options - for _, option := range options { - err := option.Apply(ms) - if err != nil { - return nil, err - } - } - - return ms, nil -} - -// Blobs returns an instance of the BlobStore. Instantiation is cheap and -// may be context sensitive in the future. The instance should be used similar -// to a request local. -func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { - var statter distribution.BlobDescriptorService = &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPathFns: []linkPathFunc{blobLinkPath}, - } - - if repo.descriptorCache != nil { - statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) - } - - if repo.registry.blobDescriptorServiceFactory != nil { - statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) - } - - return &linkedBlobStore{ - registry: repo.registry, - blobStore: repo.blobStore, - blobServer: repo.blobServer, - blobAccessController: statter, - repository: repo, - ctx: ctx, - - // TODO(stevvooe): linkPath limits this blob store to only layers. - // This instance cannot be used for manifest checks. - linkPathFns: []linkPathFunc{blobLinkPath}, - deleteEnabled: repo.registry.deleteEnabled, - resumableDigestEnabled: repo.resumableDigestEnabled, - } -} diff --git a/docs/storage/schema2manifesthandler.go b/docs/storage/schema2manifesthandler.go deleted file mode 100644 index 6456efa4..00000000 --- a/docs/storage/schema2manifesthandler.go +++ /dev/null @@ -1,128 +0,0 @@ -package storage - -import ( - "errors" - "fmt" - "net/url" - - "encoding/json" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema2" -) - -var ( - errUnexpectedURL = errors.New("unexpected URL on layer") - errMissingURL = errors.New("missing URL on layer") - errInvalidURL = errors.New("invalid URL on layer") -) - -//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. -type schema2ManifestHandler struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context -} - -var _ ManifestHandler = &schema2ManifestHandler{} - -func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") - - var m schema2.DeserializedManifest - if err := json.Unmarshal(content, &m); err != nil { - return nil, err - } - - return &m, nil -} - -func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put") - - m, ok := manifest.(*schema2.DeserializedManifest) - if !ok { - return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest) - } - - if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { - return "", err - } - - mt, payload, err := m.Payload() - if err != nil { - return "", err - } - - revision, err := ms.blobStore.Put(ctx, mt, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return "", err - } - - // Link the revision into the repository. - if err := ms.blobStore.linkBlob(ctx, revision); err != nil { - return "", err - } - - return revision.Digest, nil -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. As a policy, the registry only tries to store -// valid content, leaving trust policies of that content up to consumers. -func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { - var errs distribution.ErrManifestVerification - - if !skipDependencyVerification { - target := mnfst.Target() - _, err := ms.repository.Blobs(ctx).Stat(ctx, target.Digest) - if err != nil { - if err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: target.Digest}) - } - - for _, fsLayer := range mnfst.References() { - var err error - if fsLayer.MediaType != schema2.MediaTypeForeignLayer { - if len(fsLayer.URLs) == 0 { - _, err = ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) - } else { - err = errUnexpectedURL - } - } else { - // Clients download this layer from an external URL, so do not check for - // its presense. - if len(fsLayer.URLs) == 0 { - err = errMissingURL - } - for _, u := range fsLayer.URLs { - var pu *url.URL - pu, err = url.Parse(u) - if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" { - err = errInvalidURL - } - } - } - if err != nil { - if err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) - } - } - } - if len(errs) != 0 { - return errs - } - - return nil -} diff --git a/docs/storage/schema2manifesthandler_test.go b/docs/storage/schema2manifesthandler_test.go deleted file mode 100644 index c2f61edf..00000000 --- a/docs/storage/schema2manifesthandler_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package storage - -import ( - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -func TestVerifyManifestForeignLayer(t *testing.T) { - ctx := context.Background() - inmemoryDriver := inmemory.New() - registry := createRegistry(t, inmemoryDriver) - repo := makeRepository(t, registry, "test") - manifestService := makeManifestService(t, repo) - - config, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeConfig, nil) - if err != nil { - t.Fatal(err) - } - - layer, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeLayer, nil) - if err != nil { - t.Fatal(err) - } - - foreignLayer := distribution.Descriptor{ - Digest: "sha256:463435349086340864309863409683460843608348608934092322395278926a", - Size: 6323, - MediaType: schema2.MediaTypeForeignLayer, - } - - template := schema2.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 2, - MediaType: schema2.MediaTypeManifest, - }, - Config: config, - } - - type testcase struct { - BaseLayer distribution.Descriptor - URLs []string - Err error - } - - cases := []testcase{ - { - foreignLayer, - nil, - errMissingURL, - }, - { - layer, - []string{"http://foo/bar"}, - errUnexpectedURL, - }, - { - foreignLayer, - []string{"file:///local/file"}, - errInvalidURL, - }, - { - foreignLayer, - []string{"http://foo/bar#baz"}, - errInvalidURL, - }, - { - foreignLayer, - []string{""}, - errInvalidURL, - }, - { - foreignLayer, - []string{"https://foo/bar", ""}, - errInvalidURL, - }, - { - foreignLayer, - []string{"http://foo/bar"}, - nil, - }, - { - foreignLayer, - []string{"https://foo/bar"}, - nil, - }, - } - - for _, c := range cases { - m := template - l := c.BaseLayer - l.URLs = c.URLs - m.Layers = []distribution.Descriptor{l} - dm, err := schema2.FromStruct(m) - if err != nil { - t.Error(err) - continue - } - - _, err = manifestService.Put(ctx, dm) - if verr, ok := err.(distribution.ErrManifestVerification); ok { - // Extract the first error - if len(verr) == 2 { - if _, ok = verr[1].(distribution.ErrManifestBlobUnknown); ok { - err = verr[0] - } - } - } - if err != c.Err { - t.Errorf("%#v: expected %v, got %v", l, c.Err, err) - } - } -} diff --git a/docs/storage/signedmanifesthandler.go b/docs/storage/signedmanifesthandler.go deleted file mode 100644 index df6369f3..00000000 --- a/docs/storage/signedmanifesthandler.go +++ /dev/null @@ -1,145 +0,0 @@ -package storage - -import ( - "encoding/json" - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/reference" - "github.com/docker/libtrust" -) - -// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It -// can unmarshal and put schema1 manifests that have been signed by libtrust. -type signedManifestHandler struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context -} - -var _ ManifestHandler = &signedManifestHandler{} - -func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") - - var ( - signatures [][]byte - err error - ) - - jsig, err := libtrust.NewJSONSignature(content, signatures...) - if err != nil { - return nil, err - } - - if ms.repository.schema1SigningKey != nil { - if err := jsig.Sign(ms.repository.schema1SigningKey); err != nil { - return nil, err - } - } - - // Extract the pretty JWS - raw, err := jsig.PrettySignature("signatures") - if err != nil { - return nil, err - } - - var sm schema1.SignedManifest - if err := json.Unmarshal(raw, &sm); err != nil { - return nil, err - } - return &sm, nil -} - -func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put") - - sm, ok := manifest.(*schema1.SignedManifest) - if !ok { - return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest) - } - - if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil { - return "", err - } - - mt := schema1.MediaTypeManifest - payload := sm.Canonical - - revision, err := ms.blobStore.Put(ctx, mt, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return "", err - } - - // Link the revision into the repository. - if err := ms.blobStore.linkBlob(ctx, revision); err != nil { - return "", err - } - - return revision.Digest, nil -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. It ensures that the signature is valid for the -// enclosed payload. As a policy, the registry only tries to store valid -// content, leaving trust policies of that content up to consumers. -func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error { - var errs distribution.ErrManifestVerification - - if len(mnfst.Name) > reference.NameTotalLengthMax { - errs = append(errs, - distribution.ErrManifestNameInvalid{ - Name: mnfst.Name, - Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), - }) - } - - if !reference.NameRegexp.MatchString(mnfst.Name) { - errs = append(errs, - distribution.ErrManifestNameInvalid{ - Name: mnfst.Name, - Reason: fmt.Errorf("invalid manifest name format"), - }) - } - - if len(mnfst.History) != len(mnfst.FSLayers) { - errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", - len(mnfst.History), len(mnfst.FSLayers))) - } - - if _, err := schema1.Verify(&mnfst); err != nil { - switch err { - case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: - errs = append(errs, distribution.ErrManifestUnverified{}) - default: - if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust - errs = append(errs, distribution.ErrManifestUnverified{}) - } else { - errs = append(errs, err) - } - } - } - - if !skipDependencyVerification { - for _, fsLayer := range mnfst.References() { - _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) - if err != nil { - if err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) - } - } - } - if len(errs) != 0 { - return errs - } - - return nil -} diff --git a/docs/storage/tagstore.go b/docs/storage/tagstore.go deleted file mode 100644 index 4386ffca..00000000 --- a/docs/storage/tagstore.go +++ /dev/null @@ -1,191 +0,0 @@ -package storage - -import ( - "path" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -var _ distribution.TagService = &tagStore{} - -// tagStore provides methods to manage manifest tags in a backend storage driver. -// This implementation uses the same on-disk layout as the (now deleted) tag -// store. This provides backward compatibility with current registry deployments -// which only makes use of the Digest field of the returned distribution.Descriptor -// but does not enable full roundtripping of Descriptor objects -type tagStore struct { - repository *repository - blobStore *blobStore -} - -// All returns all tags -func (ts *tagStore) All(ctx context.Context) ([]string, error) { - var tags []string - - pathSpec, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Named().Name(), - }) - if err != nil { - return tags, err - } - - entries, err := ts.blobStore.driver.List(ctx, pathSpec) - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Named().Name()} - default: - return tags, err - } - } - - for _, entry := range entries { - _, filename := path.Split(entry) - tags = append(tags, filename) - } - - return tags, nil -} - -// exists returns true if the specified manifest tag exists in the repository. -func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { - tagPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Named().Name(), - tag: tag, - }) - - if err != nil { - return false, err - } - - exists, err := exists(ctx, ts.blobStore.driver, tagPath) - if err != nil { - return false, err - } - - return exists, nil -} - -// Tag tags the digest with the given tag, updating the the store to point at -// the current tag. The digest must point to a manifest. -func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Named().Name(), - tag: tag, - }) - - if err != nil { - return err - } - - lbs := ts.linkedBlobStore(ctx, tag) - - // Link into the index - if err := lbs.linkBlob(ctx, desc); err != nil { - return err - } - - // Overwrite the current link - return ts.blobStore.link(ctx, currentPath, desc.Digest) -} - -// resolve the current revision for name and tag. -func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Named().Name(), - tag: tag, - }) - - if err != nil { - return distribution.Descriptor{}, err - } - - revision, err := ts.blobStore.readlink(ctx, currentPath) - if err != nil { - switch err.(type) { - case storagedriver.PathNotFoundError: - return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} - } - - return distribution.Descriptor{}, err - } - - return distribution.Descriptor{Digest: revision}, nil -} - -// Untag removes the tag association -func (ts *tagStore) Untag(ctx context.Context, tag string) error { - tagPath, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Named().Name(), - tag: tag, - }) - - switch err.(type) { - case storagedriver.PathNotFoundError: - return distribution.ErrTagUnknown{Tag: tag} - case nil: - break - default: - return err - } - - return ts.blobStore.driver.Delete(ctx, tagPath) -} - -// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one -// to index manifest blobs by tag name. While the tag store doesn't map -// precisely to the linked blob store, using this ensures the links are -// managed via the same code path. -func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore { - return &linkedBlobStore{ - blobStore: ts.blobStore, - repository: ts.repository, - ctx: ctx, - linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) { - return pathFor(manifestTagIndexEntryLinkPathSpec{ - name: name, - tag: tag, - revision: dgst, - }) - - }}, - } -} - -// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by -// digest, tag entries which point to it need to be recovered to avoid dangling tags. -func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) { - allTags, err := ts.All(ctx) - switch err.(type) { - case distribution.ErrRepositoryUnknown: - // This tag store has been initialized but not yet populated - break - case nil: - break - default: - return nil, err - } - - var tags []string - for _, tag := range allTags { - tagLinkPathSpec := manifestTagCurrentPathSpec{ - name: ts.repository.Named().Name(), - tag: tag, - } - - tagLinkPath, err := pathFor(tagLinkPathSpec) - tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath) - if err != nil { - return nil, err - } - - if tagDigest == desc.Digest { - tags = append(tags, tag) - } - } - - return tags, nil -} diff --git a/docs/storage/tagstore_test.go b/docs/storage/tagstore_test.go deleted file mode 100644 index 554a46bf..00000000 --- a/docs/storage/tagstore_test.go +++ /dev/null @@ -1,209 +0,0 @@ -package storage - -import ( - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -type tagsTestEnv struct { - ts distribution.TagService - ctx context.Context -} - -func testTagStore(t *testing.T) *tagsTestEnv { - ctx := context.Background() - d := inmemory.New() - reg, err := NewRegistry(ctx, d) - if err != nil { - t.Fatal(err) - } - - repoRef, _ := reference.ParseNamed("a/b") - repo, err := reg.Repository(ctx, repoRef) - if err != nil { - t.Fatal(err) - } - - return &tagsTestEnv{ - ctx: ctx, - ts: repo.Tags(ctx), - } -} - -func TestTagStoreTag(t *testing.T) { - env := testTagStore(t) - tags := env.ts - ctx := env.ctx - - d := distribution.Descriptor{} - err := tags.Tag(ctx, "latest", d) - if err == nil { - t.Errorf("unexpected error putting malformed descriptor : %s", err) - } - - d.Digest = "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - err = tags.Tag(ctx, "latest", d) - if err != nil { - t.Error(err) - } - - d1, err := tags.Get(ctx, "latest") - if err != nil { - t.Error(err) - } - - if d1.Digest != d.Digest { - t.Error("put and get digest differ") - } - - // Overwrite existing - d.Digest = "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" - err = tags.Tag(ctx, "latest", d) - if err != nil { - t.Error(err) - } - - d1, err = tags.Get(ctx, "latest") - if err != nil { - t.Error(err) - } - - if d1.Digest != d.Digest { - t.Error("put and get digest differ") - } -} - -func TestTagStoreUnTag(t *testing.T) { - env := testTagStore(t) - tags := env.ts - ctx := env.ctx - desc := distribution.Descriptor{Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"} - - err := tags.Untag(ctx, "latest") - if err == nil { - t.Errorf("Expected error untagging non-existant tag") - } - - err = tags.Tag(ctx, "latest", desc) - if err != nil { - t.Error(err) - } - - err = tags.Untag(ctx, "latest") - if err != nil { - t.Error(err) - } - - errExpect := distribution.ErrTagUnknown{Tag: "latest"}.Error() - _, err = tags.Get(ctx, "latest") - if err == nil || err.Error() != errExpect { - t.Error("Expected error getting untagged tag") - } -} - -func TestTagStoreAll(t *testing.T) { - env := testTagStore(t) - tagStore := env.ts - ctx := env.ctx - - alpha := "abcdefghijklmnopqrstuvwxyz" - for i := 0; i < len(alpha); i++ { - tag := alpha[i] - desc := distribution.Descriptor{Digest: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"} - err := tagStore.Tag(ctx, string(tag), desc) - if err != nil { - t.Error(err) - } - } - - all, err := tagStore.All(ctx) - if err != nil { - t.Error(err) - } - if len(all) != len(alpha) { - t.Errorf("Unexpected count returned from enumerate") - } - - for i, c := range all { - if c != string(alpha[i]) { - t.Errorf("unexpected tag in enumerate %s", c) - } - } - - removed := "a" - err = tagStore.Untag(ctx, removed) - if err != nil { - t.Error(err) - } - - all, err = tagStore.All(ctx) - if err != nil { - t.Error(err) - } - for _, tag := range all { - if tag == removed { - t.Errorf("unexpected tag in enumerate %s", removed) - } - } - -} - -func TestTagLookup(t *testing.T) { - env := testTagStore(t) - tagStore := env.ts - ctx := env.ctx - - descA := distribution.Descriptor{Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"} - desc0 := distribution.Descriptor{Digest: "sha256:0000000000000000000000000000000000000000000000000000000000000000"} - - tags, err := tagStore.Lookup(ctx, descA) - if err != nil { - t.Fatal(err) - } - if len(tags) != 0 { - t.Fatalf("Lookup returned > 0 tags from empty store") - } - - err = tagStore.Tag(ctx, "a", descA) - if err != nil { - t.Fatal(err) - } - - err = tagStore.Tag(ctx, "b", descA) - if err != nil { - t.Fatal(err) - } - - err = tagStore.Tag(ctx, "0", desc0) - if err != nil { - t.Fatal(err) - } - - err = tagStore.Tag(ctx, "1", desc0) - if err != nil { - t.Fatal(err) - } - - tags, err = tagStore.Lookup(ctx, descA) - if err != nil { - t.Fatal(err) - } - - if len(tags) != 2 { - t.Errorf("Lookup of descA returned %d tags, expected 2", len(tags)) - } - - tags, err = tagStore.Lookup(ctx, desc0) - if err != nil { - t.Fatal(err) - } - - if len(tags) != 2 { - t.Errorf("Lookup of descB returned %d tags, expected 2", len(tags)) - } - -} diff --git a/docs/storage/util.go b/docs/storage/util.go deleted file mode 100644 index 773d7ba0..00000000 --- a/docs/storage/util.go +++ /dev/null @@ -1,21 +0,0 @@ -package storage - -import ( - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" -) - -// Exists provides a utility method to test whether or not a path exists in -// the given driver. -func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) { - if _, err := drv.Stat(ctx, path); err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return false, nil - default: - return false, err - } - } - - return true, nil -} diff --git a/docs/storage/vacuum.go b/docs/storage/vacuum.go deleted file mode 100644 index 3bdfebf2..00000000 --- a/docs/storage/vacuum.go +++ /dev/null @@ -1,67 +0,0 @@ -package storage - -import ( - "path" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver" -) - -// vacuum contains functions for cleaning up repositories and blobs -// These functions will only reliably work on strongly consistent -// storage systems. -// https://en.wikipedia.org/wiki/Consistency_model - -// NewVacuum creates a new Vacuum -func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum { - return Vacuum{ - ctx: ctx, - driver: driver, - } -} - -// Vacuum removes content from the filesystem -type Vacuum struct { - driver driver.StorageDriver - ctx context.Context -} - -// RemoveBlob removes a blob from the filesystem -func (v Vacuum) RemoveBlob(dgst string) error { - d, err := digest.ParseDigest(dgst) - if err != nil { - return err - } - - blobPath, err := pathFor(blobPathSpec{digest: d}) - if err != nil { - return err - } - - context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) - - err = v.driver.Delete(v.ctx, blobPath) - if err != nil { - return err - } - - return nil -} - -// RemoveRepository removes a repository directory from the -// filesystem -func (v Vacuum) RemoveRepository(repoName string) error { - rootForRepository, err := pathFor(repositoriesRootPathSpec{}) - if err != nil { - return err - } - repoDir := path.Join(rootForRepository, repoName) - context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) - err = v.driver.Delete(v.ctx, repoDir) - if err != nil { - return err - } - - return nil -} diff --git a/docs/storage/walk.go b/docs/storage/walk.go deleted file mode 100644 index d979796e..00000000 --- a/docs/storage/walk.go +++ /dev/null @@ -1,59 +0,0 @@ -package storage - -import ( - "errors" - "fmt" - "sort" - - "github.com/docker/distribution/context" - storageDriver "github.com/docker/distribution/registry/storage/driver" -) - -// ErrSkipDir is used as a return value from onFileFunc to indicate that -// the directory named in the call is to be skipped. It is not returned -// as an error by any function. -var ErrSkipDir = errors.New("skip this directory") - -// WalkFn is called once per file by Walk -// If the returned error is ErrSkipDir and fileInfo refers -// to a directory, the directory will not be entered and Walk -// will continue the traversal. Otherwise Walk will return -type WalkFn func(fileInfo storageDriver.FileInfo) error - -// Walk traverses a filesystem defined within driver, starting -// from the given path, calling f on each file -func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { - children, err := driver.List(ctx, from) - if err != nil { - return err - } - sort.Stable(sort.StringSlice(children)) - for _, child := range children { - // TODO(stevvooe): Calling driver.Stat for every entry is quite - // expensive when running against backends with a slow Stat - // implementation, such as s3. This is very likely a serious - // performance bottleneck. - fileInfo, err := driver.Stat(ctx, child) - if err != nil { - return err - } - err = f(fileInfo) - skipDir := (err == ErrSkipDir) - if err != nil && !skipDir { - return err - } - - if fileInfo.IsDir() && !skipDir { - if err := Walk(ctx, driver, child, f); err != nil { - return err - } - } - } - return nil -} - -// pushError formats an error type given a path and an error -// and pushes it to a slice of errors -func pushError(errors []error, path string, err error) []error { - return append(errors, fmt.Errorf("%s: %s", path, err)) -} diff --git a/docs/storage/walk_test.go b/docs/storage/walk_test.go deleted file mode 100644 index 3d7a4b1b..00000000 --- a/docs/storage/walk_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package storage - -import ( - "fmt" - "sort" - "testing" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -func testFS(t *testing.T) (driver.StorageDriver, map[string]string, context.Context) { - d := inmemory.New() - ctx := context.Background() - - expected := map[string]string{ - "/a": "dir", - "/a/b": "dir", - "/a/b/c": "dir", - "/a/b/c/d": "file", - "/a/b/c/e": "file", - "/a/b/f": "dir", - "/a/b/f/g": "file", - "/a/b/f/h": "file", - "/a/b/f/i": "file", - "/z": "dir", - "/z/y": "file", - } - - for p, typ := range expected { - if typ != "file" { - continue - } - - if err := d.PutContent(ctx, p, []byte(p)); err != nil { - t.Fatalf("unable to put content into fixture: %v", err) - } - } - - return d, expected, ctx -} - -func TestWalkErrors(t *testing.T) { - d, expected, ctx := testFS(t) - fileCount := len(expected) - err := Walk(ctx, d, "", func(fileInfo driver.FileInfo) error { - return nil - }) - if err == nil { - t.Error("Expected invalid root err") - } - - errEarlyExpected := fmt.Errorf("Early termination") - - err = Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { - // error on the 2nd file - if fileInfo.Path() == "/a/b" { - return errEarlyExpected - } - - delete(expected, fileInfo.Path()) - return nil - }) - if len(expected) != fileCount-1 { - t.Error("Walk failed to terminate with error") - } - if err != errEarlyExpected { - if err == nil { - t.Fatalf("expected an error due to early termination") - } else { - t.Error(err.Error()) - } - } - - err = Walk(ctx, d, "/nonexistent", func(fileInfo driver.FileInfo) error { - return nil - }) - if err == nil { - t.Errorf("Expected missing file err") - } - -} - -func TestWalk(t *testing.T) { - d, expected, ctx := testFS(t) - var traversed []string - err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { - filePath := fileInfo.Path() - filetype, ok := expected[filePath] - if !ok { - t.Fatalf("Unexpected file in walk: %q", filePath) - } - - if fileInfo.IsDir() { - if filetype != "dir" { - t.Errorf("Unexpected file type: %q", filePath) - } - } else { - if filetype != "file" { - t.Errorf("Unexpected file type: %q", filePath) - } - - // each file has its own path as the contents. If the length - // doesn't match the path length, fail. - if fileInfo.Size() != int64(len(fileInfo.Path())) { - t.Fatalf("unexpected size for %q: %v != %v", - fileInfo.Path(), fileInfo.Size(), len(fileInfo.Path())) - } - } - delete(expected, filePath) - traversed = append(traversed, filePath) - return nil - }) - if len(expected) > 0 { - t.Errorf("Missed files in walk: %q", expected) - } - - if !sort.StringsAreSorted(traversed) { - t.Errorf("result should be sorted: %v", traversed) - } - - if err != nil { - t.Fatalf(err.Error()) - } -} - -func TestWalkSkipDir(t *testing.T) { - d, expected, ctx := testFS(t) - err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { - filePath := fileInfo.Path() - if filePath == "/a/b" { - // skip processing /a/b/c and /a/b/c/d - return ErrSkipDir - } - delete(expected, filePath) - return nil - }) - if err != nil { - t.Fatalf(err.Error()) - } - if _, ok := expected["/a/b/c"]; !ok { - t.Errorf("/a/b/c not skipped") - } - if _, ok := expected["/a/b/c/d"]; !ok { - t.Errorf("/a/b/c/d not skipped") - } - if _, ok := expected["/a/b/c/e"]; !ok { - t.Errorf("/a/b/c/e not skipped") - } - -}