2015-02-11 01:25:40 +00:00
package handlers
2014-11-11 02:57:38 +00:00
import (
2015-07-29 19:50:43 +00:00
cryptorand "crypto/rand"
2015-04-01 23:27:24 +00:00
"expvar"
2014-12-11 06:33:36 +00:00
"fmt"
2015-04-07 22:52:48 +00:00
"math/rand"
2015-01-28 23:55:18 +00:00
"net"
2014-11-11 02:57:38 +00:00
"net/http"
2015-09-18 18:03:15 +00:00
"net/url"
2015-01-28 23:55:18 +00:00
"os"
2016-01-21 00:40:58 +00:00
"runtime"
2015-04-01 23:27:24 +00:00
"time"
2014-11-11 02:57:38 +00:00
2015-04-17 12:19:20 +00:00
log "github.com/Sirupsen/logrus"
2015-02-12 00:49:49 +00:00
"github.com/docker/distribution"
2014-12-24 00:01:38 +00:00
"github.com/docker/distribution/configuration"
2015-02-07 00:19:19 +00:00
ctxu "github.com/docker/distribution/context"
2015-08-06 22:28:11 +00:00
"github.com/docker/distribution/health"
2015-08-19 00:19:46 +00:00
"github.com/docker/distribution/health/checks"
2015-02-12 00:49:49 +00:00
"github.com/docker/distribution/notifications"
2015-12-15 22:35:23 +00:00
"github.com/docker/distribution/reference"
2015-05-15 01:21:39 +00:00
"github.com/docker/distribution/registry/api/errcode"
2015-02-11 02:14:23 +00:00
"github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/auth"
2015-03-06 15:45:16 +00:00
registrymiddleware "github.com/docker/distribution/registry/middleware/registry"
repositorymiddleware "github.com/docker/distribution/registry/middleware/repository"
2015-07-29 18:12:01 +00:00
"github.com/docker/distribution/registry/proxy"
2015-02-11 01:41:09 +00:00
"github.com/docker/distribution/registry/storage"
2015-05-21 00:12:40 +00:00
memorycache "github.com/docker/distribution/registry/storage/cache/memory"
rediscache "github.com/docker/distribution/registry/storage/cache/redis"
2015-02-11 02:14:23 +00:00
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/factory"
2015-03-03 16:57:52 +00:00
storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware"
2016-01-21 00:40:58 +00:00
"github.com/docker/distribution/version"
2015-12-15 19:37:28 +00:00
"github.com/docker/libtrust"
2015-04-01 23:27:24 +00:00
"github.com/garyburd/redigo/redis"
2014-11-11 02:57:38 +00:00
"github.com/gorilla/mux"
2015-02-04 01:59:24 +00:00
"golang.org/x/net/context"
2014-11-11 02:57:38 +00:00
)
2015-07-29 19:50:43 +00:00
// randomSecretSize is the number of random bytes to generate if no secret
// was specified.
const randomSecretSize = 32
2015-08-19 00:19:46 +00:00
// defaultCheckInterval is the default time in between health checks
const defaultCheckInterval = 10 * time . Second
2014-11-11 02:57:38 +00:00
// App is a global registry application object. Shared resources can be placed
// on this object that will be accessible from all requests. Any writable
// fields should be protected.
type App struct {
2015-02-07 00:19:19 +00:00
context . Context
2014-11-11 02:57:38 +00:00
2015-08-20 20:56:36 +00:00
Config * configuration . Configuration
2014-11-21 03:57:01 +00:00
2015-01-28 23:55:18 +00:00
router * mux . Router // main application router, configured with dispatchers
driver storagedriver . StorageDriver // driver maintains the app global storage driver instance.
2015-04-10 02:21:33 +00:00
registry distribution . Namespace // registry is the primary registry backend for the app instance.
2015-01-28 23:55:18 +00:00
accessController auth . AccessController // main access controller for application
2014-11-21 03:57:01 +00:00
2015-09-18 18:03:15 +00:00
// httpHost is a parsed representation of the http.host parameter from
// the configuration. Only the Scheme and Host fields are used.
httpHost url . URL
2015-01-28 23:55:18 +00:00
// events contains notification related configuration.
events struct {
sink notifications . Sink
source notifications . SourceRecord
}
2015-04-01 23:27:24 +00:00
redis * redis . Pool
2015-07-29 18:12:01 +00:00
2015-12-15 19:37:28 +00:00
// trustKey is a deprecated key used to sign manifests converted to
// schema1 for backward compatibility. It should not be used for any
// other purposes.
trustKey libtrust . PrivateKey
// isCache is true if this registry is configured as a pull through cache
2015-07-29 18:12:01 +00:00
isCache bool
2015-08-06 17:34:35 +00:00
2015-12-15 19:37:28 +00:00
// readOnly is true if the registry is in a read-only maintenance mode
2015-08-06 17:34:35 +00:00
readOnly bool
2014-11-11 02:57:38 +00:00
}
// NewApp takes a configuration and returns a configured app, ready to serve
// requests. The app only implements ServeHTTP and can be wrapped in other
// handlers accordingly.
2016-01-21 00:40:58 +00:00
func NewApp ( ctx context . Context , config * configuration . Configuration ) * App {
2014-11-11 02:57:38 +00:00
app := & App {
2016-01-21 00:40:58 +00:00
Config : config ,
2015-04-10 01:45:39 +00:00
Context : ctx ,
2016-01-21 00:40:58 +00:00
router : v2 . RouterWithPrefix ( config . HTTP . Prefix ) ,
isCache : config . Proxy . RemoteURL != "" ,
2014-11-11 02:57:38 +00:00
}
// Register the handler dispatchers.
2014-12-12 06:24:25 +00:00
app . register ( v2 . RouteNameBase , func ( ctx * Context , r * http . Request ) http . Handler {
2014-12-11 06:33:36 +00:00
return http . HandlerFunc ( apiBase )
} )
2014-12-12 06:24:25 +00:00
app . register ( v2 . RouteNameManifest , imageManifestDispatcher )
2015-07-13 20:08:13 +00:00
app . register ( v2 . RouteNameCatalog , catalogDispatcher )
2014-12-12 06:24:25 +00:00
app . register ( v2 . RouteNameTags , tagsDispatcher )
Refactor Blob Service API
This PR refactors the blob service API to be oriented around blob descriptors.
Identified by digests, blobs become an abstract entity that can be read and
written using a descriptor as a handle. This allows blobs to take many forms,
such as a ReadSeekCloser or a simple byte buffer, allowing blob oriented
operations to better integrate with blob agnostic APIs (such as the `io`
package). The error definitions are now better organized to reflect conditions
that can only be seen when interacting with the blob API.
The main benefit of this is to separate the much smaller metadata from large
file storage. Many benefits also follow from this. Reading and writing has
been separated into discrete services. Backend implementation is also
simplified, by reducing the amount of metadata that needs to be picked up to
simply serve a read. This also improves cacheability.
"Opening" a blob simply consists of an access check (Stat) and a path
calculation. Caching is greatly simplified and we've made the mapping of
provisional to canonical hashes a first-class concept. BlobDescriptorService
and BlobProvider can be combined in different ways to achieve varying effects.
Recommend Review Approach
-------------------------
This is a very large patch. While apologies are in order, we are getting a
considerable amount of refactoring. Most changes follow from the changes to
the root package (distribution), so start there. From there, the main changes
are in storage. Looking at (*repository).Blobs will help to understand the how
the linkedBlobStore is wired. One can explore the internals within and also
branch out into understanding the changes to the caching layer. Following the
descriptions below will also help to guide you.
To reduce the chances for regressions, it was critical that major changes to
unit tests were avoided. Where possible, they are left untouched and where
not, the spirit is hopefully captured. Pay particular attention to where
behavior may have changed.
Storage
-------
The primary changes to the `storage` package, other than the interface
updates, were to merge the layerstore and blobstore. Blob access is now
layered even further. The first layer, blobStore, exposes a global
`BlobStatter` and `BlobProvider`. Operations here provide a fast path for most
read operations that don't take access control into account. The
`linkedBlobStore` layers on top of the `blobStore`, providing repository-
scoped blob link management in the backend. The `linkedBlobStore` implements
the full `BlobStore` suite, providing access-controlled, repository-local blob
writers. The abstraction between the two is slightly broken in that
`linkedBlobStore` is the only channel under which one can write into the global
blob store. The `linkedBlobStore` also provides flexibility in that it can act
over different link sets depending on configuration. This allows us to use the
same code for signature links, manifest links and blob links. Eventually, we
will fully consolidate this storage.
The improved cache flow comes from the `linkedBlobStatter` component
of `linkedBlobStore`. Using a `cachedBlobStatter`, these combine together to
provide a simple cache hierarchy that should streamline access checks on read
and write operations, or at least provide a single path to optimize. The
metrics have been changed in a slightly incompatible way since the former
operations, Fetch and Exists, are no longer relevant.
The fileWriter and fileReader have been slightly modified to support the rest
of the changes. The most interesting is the removal of the `Stat` call from
`newFileReader`. This was the source of unnecessary round trips that were only
present to look up the size of the resulting reader. Now, one must simply pass
in the size, requiring the caller to decide whether or not the `Stat` call is
appropriate. In several cases, it turned out the caller already had the size
already. The `WriterAt` implementation has been removed from `fileWriter`,
since it is no longer required for `BlobWriter`, reducing the number of paths
which writes may take.
Cache
-----
Unfortunately, the `cache` package required a near full rewrite. It was pretty
mechanical in that the cache is oriented around the `BlobDescriptorService`
slightly modified to include the ability to set the values for individual
digests. While the implementation is oriented towards caching, it can act as a
primary store. Provisions are in place to have repository local metadata, in
addition to global metadata. Fallback is implemented as a part of the storage
package to maintain this flexibility.
One unfortunate side-effect is that caching is now repository-scoped, rather
than global. This should have little effect on performance but may increase
memory usage.
Handlers
--------
The `handlers` package has been updated to leverage the new API. For the most
part, the changes are superficial or mechanical based on the API changes. This
did expose a bug in the handling of provisional vs canonical digests that was
fixed in the unit tests.
Configuration
-------------
One user-facing change has been made to the configuration and is updated in
the associated documentation. The `layerinfo` cache parameter has been
deprecated by the `blobdescriptor` cache parameter. Both are equivalent and
configuration files should be backward compatible.
Notifications
-------------
Changes the `notification` package are simply to support the interface
changes.
Context
-------
A small change has been made to the tracing log-level. Traces have been moved
from "info" to "debug" level to reduce output when not needed.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-05-12 07:10:29 +00:00
app . register ( v2 . RouteNameBlob , blobDispatcher )
app . register ( v2 . RouteNameBlobUpload , blobUploadDispatcher )
app . register ( v2 . RouteNameBlobUploadChunk , blobUploadDispatcher )
2014-11-11 02:57:38 +00:00
2016-01-21 00:40:58 +00:00
// override the storage driver's UA string for registry outbound HTTP requests
storageParams := config . Storage . Parameters ( )
if storageParams == nil {
storageParams = make ( configuration . Parameters )
}
storageParams [ "useragent" ] = fmt . Sprintf ( "docker-distribution/%s %s" , version . Version , runtime . Version ( ) )
2015-01-28 23:55:18 +00:00
var err error
2016-01-21 00:40:58 +00:00
app . driver , err = factory . Create ( config . Storage . Type ( ) , storageParams )
2014-11-21 03:57:01 +00:00
if err != nil {
// TODO(stevvooe): Move the creation of a service into a protected
// method, where this is created lazily. Its status can be queried via
// a health check.
panic ( err )
}
2015-04-07 22:52:48 +00:00
2015-04-17 01:34:29 +00:00
purgeConfig := uploadPurgeDefaultConfig ( )
2016-01-21 00:40:58 +00:00
if mc , ok := config . Storage [ "maintenance" ] ; ok {
2015-08-06 17:34:35 +00:00
if v , ok := mc [ "uploadpurging" ] ; ok {
purgeConfig , ok = v . ( map [ interface { } ] interface { } )
if ! ok {
panic ( "uploadpurging config key must contain additional keys" )
}
}
if v , ok := mc [ "readonly" ] ; ok {
2015-08-07 01:02:43 +00:00
readOnly , ok := v . ( map [ interface { } ] interface { } )
2015-08-06 17:34:35 +00:00
if ! ok {
2015-08-07 01:02:43 +00:00
panic ( "readonly config key must contain additional keys" )
}
if readOnlyEnabled , ok := readOnly [ "enabled" ] ; ok {
app . readOnly , ok = readOnlyEnabled . ( bool )
if ! ok {
panic ( "readonly's enabled config key must have a boolean value" )
}
2015-04-17 01:34:29 +00:00
}
}
}
2015-04-27 22:58:58 +00:00
startUploadPurger ( app , app . driver , ctxu . GetLogger ( app ) , purgeConfig )
2015-04-07 22:52:48 +00:00
2016-01-21 00:40:58 +00:00
app . driver , err = applyStorageMiddleware ( app . driver , config . Middleware [ "storage" ] )
2015-03-09 17:55:52 +00:00
if err != nil {
panic ( err )
}
2014-11-21 03:57:01 +00:00
2016-01-21 00:40:58 +00:00
app . configureSecret ( config )
app . configureEvents ( config )
app . configureRedis ( config )
app . configureLogHook ( config )
2015-03-06 15:45:16 +00:00
2016-04-07 00:01:30 +00:00
options := registrymiddleware . GetRegistryOptions ( )
2016-02-10 23:20:39 +00:00
if config . Compatibility . Schema1 . TrustKey != "" {
app . trustKey , err = libtrust . LoadKeyFile ( config . Compatibility . Schema1 . TrustKey )
if err != nil {
panic ( fmt . Sprintf ( ` could not load schema1 "signingkey" parameter: %v ` , err ) )
}
} else {
// Generate an ephemeral key to be used for signing converted manifests
// for clients that don't support schema2.
app . trustKey , err = libtrust . GenerateECP256PrivateKey ( )
if err != nil {
panic ( err )
}
2015-12-15 19:37:28 +00:00
}
2016-04-07 00:01:30 +00:00
options = append ( options , storage . Schema1SigningKey ( app . trustKey ) )
2016-01-21 00:40:58 +00:00
if config . HTTP . Host != "" {
u , err := url . Parse ( config . HTTP . Host )
2015-09-18 18:03:15 +00:00
if err != nil {
panic ( fmt . Sprintf ( ` could not parse http "host" parameter: %v ` , err ) )
}
app . httpHost = * u
}
2015-08-18 17:56:27 +00:00
if app . isCache {
options = append ( options , storage . DisableDigestResumption )
}
2015-07-24 06:16:27 +00:00
// configure deletion
2016-01-21 00:40:58 +00:00
if d , ok := config . Storage [ "delete" ] ; ok {
2015-05-27 17:52:22 +00:00
e , ok := d [ "enabled" ]
if ok {
2015-08-18 17:56:27 +00:00
if deleteEnabled , ok := e . ( bool ) ; ok && deleteEnabled {
options = append ( options , storage . EnableDelete )
2015-05-27 17:52:22 +00:00
}
}
}
2015-07-24 06:16:27 +00:00
// configure redirects
var redirectDisabled bool
2016-01-21 00:40:58 +00:00
if redirectConfig , ok := config . Storage [ "redirect" ] ; ok {
2015-07-24 06:16:27 +00:00
v := redirectConfig [ "disable" ]
switch v := v . ( type ) {
case bool :
redirectDisabled = v
default :
panic ( fmt . Sprintf ( "invalid type for redirect config: %#v" , redirectConfig ) )
}
2015-08-18 17:56:27 +00:00
}
if redirectDisabled {
ctxu . GetLogger ( app ) . Infof ( "backend redirection disabled" )
} else {
options = append ( options , storage . EnableRedirect )
2015-07-24 06:16:27 +00:00
}
2015-04-02 23:38:01 +00:00
// configure storage caches
2016-01-21 00:40:58 +00:00
if cc , ok := config . Storage [ "cache" ] ; ok {
Refactor Blob Service API
This PR refactors the blob service API to be oriented around blob descriptors.
Identified by digests, blobs become an abstract entity that can be read and
written using a descriptor as a handle. This allows blobs to take many forms,
such as a ReadSeekCloser or a simple byte buffer, allowing blob oriented
operations to better integrate with blob agnostic APIs (such as the `io`
package). The error definitions are now better organized to reflect conditions
that can only be seen when interacting with the blob API.
The main benefit of this is to separate the much smaller metadata from large
file storage. Many benefits also follow from this. Reading and writing has
been separated into discrete services. Backend implementation is also
simplified, by reducing the amount of metadata that needs to be picked up to
simply serve a read. This also improves cacheability.
"Opening" a blob simply consists of an access check (Stat) and a path
calculation. Caching is greatly simplified and we've made the mapping of
provisional to canonical hashes a first-class concept. BlobDescriptorService
and BlobProvider can be combined in different ways to achieve varying effects.
Recommend Review Approach
-------------------------
This is a very large patch. While apologies are in order, we are getting a
considerable amount of refactoring. Most changes follow from the changes to
the root package (distribution), so start there. From there, the main changes
are in storage. Looking at (*repository).Blobs will help to understand the how
the linkedBlobStore is wired. One can explore the internals within and also
branch out into understanding the changes to the caching layer. Following the
descriptions below will also help to guide you.
To reduce the chances for regressions, it was critical that major changes to
unit tests were avoided. Where possible, they are left untouched and where
not, the spirit is hopefully captured. Pay particular attention to where
behavior may have changed.
Storage
-------
The primary changes to the `storage` package, other than the interface
updates, were to merge the layerstore and blobstore. Blob access is now
layered even further. The first layer, blobStore, exposes a global
`BlobStatter` and `BlobProvider`. Operations here provide a fast path for most
read operations that don't take access control into account. The
`linkedBlobStore` layers on top of the `blobStore`, providing repository-
scoped blob link management in the backend. The `linkedBlobStore` implements
the full `BlobStore` suite, providing access-controlled, repository-local blob
writers. The abstraction between the two is slightly broken in that
`linkedBlobStore` is the only channel under which one can write into the global
blob store. The `linkedBlobStore` also provides flexibility in that it can act
over different link sets depending on configuration. This allows us to use the
same code for signature links, manifest links and blob links. Eventually, we
will fully consolidate this storage.
The improved cache flow comes from the `linkedBlobStatter` component
of `linkedBlobStore`. Using a `cachedBlobStatter`, these combine together to
provide a simple cache hierarchy that should streamline access checks on read
and write operations, or at least provide a single path to optimize. The
metrics have been changed in a slightly incompatible way since the former
operations, Fetch and Exists, are no longer relevant.
The fileWriter and fileReader have been slightly modified to support the rest
of the changes. The most interesting is the removal of the `Stat` call from
`newFileReader`. This was the source of unnecessary round trips that were only
present to look up the size of the resulting reader. Now, one must simply pass
in the size, requiring the caller to decide whether or not the `Stat` call is
appropriate. In several cases, it turned out the caller already had the size
already. The `WriterAt` implementation has been removed from `fileWriter`,
since it is no longer required for `BlobWriter`, reducing the number of paths
which writes may take.
Cache
-----
Unfortunately, the `cache` package required a near full rewrite. It was pretty
mechanical in that the cache is oriented around the `BlobDescriptorService`
slightly modified to include the ability to set the values for individual
digests. While the implementation is oriented towards caching, it can act as a
primary store. Provisions are in place to have repository local metadata, in
addition to global metadata. Fallback is implemented as a part of the storage
package to maintain this flexibility.
One unfortunate side-effect is that caching is now repository-scoped, rather
than global. This should have little effect on performance but may increase
memory usage.
Handlers
--------
The `handlers` package has been updated to leverage the new API. For the most
part, the changes are superficial or mechanical based on the API changes. This
did expose a bug in the handling of provisional vs canonical digests that was
fixed in the unit tests.
Configuration
-------------
One user-facing change has been made to the configuration and is updated in
the associated documentation. The `layerinfo` cache parameter has been
deprecated by the `blobdescriptor` cache parameter. Both are equivalent and
configuration files should be backward compatible.
Notifications
-------------
Changes the `notification` package are simply to support the interface
changes.
Context
-------
A small change has been made to the tracing log-level. Traces have been moved
from "info" to "debug" level to reduce output when not needed.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-05-12 07:10:29 +00:00
v , ok := cc [ "blobdescriptor" ]
if ! ok {
// Backwards compatible: "layerinfo" == "blobdescriptor"
v = cc [ "layerinfo" ]
}
switch v {
2015-04-02 23:38:01 +00:00
case "redis" :
if app . redis == nil {
panic ( "redis configuration required to use for layerinfo cache" )
}
2015-08-18 17:56:27 +00:00
cacheProvider := rediscache . NewRedisBlobDescriptorCacheProvider ( app . redis )
localOptions := append ( options , storage . BlobDescriptorCacheProvider ( cacheProvider ) )
app . registry , err = storage . NewRegistry ( app , app . driver , localOptions ... )
if err != nil {
panic ( "could not create registry: " + err . Error ( ) )
}
Refactor Blob Service API
This PR refactors the blob service API to be oriented around blob descriptors.
Identified by digests, blobs become an abstract entity that can be read and
written using a descriptor as a handle. This allows blobs to take many forms,
such as a ReadSeekCloser or a simple byte buffer, allowing blob oriented
operations to better integrate with blob agnostic APIs (such as the `io`
package). The error definitions are now better organized to reflect conditions
that can only be seen when interacting with the blob API.
The main benefit of this is to separate the much smaller metadata from large
file storage. Many benefits also follow from this. Reading and writing has
been separated into discrete services. Backend implementation is also
simplified, by reducing the amount of metadata that needs to be picked up to
simply serve a read. This also improves cacheability.
"Opening" a blob simply consists of an access check (Stat) and a path
calculation. Caching is greatly simplified and we've made the mapping of
provisional to canonical hashes a first-class concept. BlobDescriptorService
and BlobProvider can be combined in different ways to achieve varying effects.
Recommend Review Approach
-------------------------
This is a very large patch. While apologies are in order, we are getting a
considerable amount of refactoring. Most changes follow from the changes to
the root package (distribution), so start there. From there, the main changes
are in storage. Looking at (*repository).Blobs will help to understand the how
the linkedBlobStore is wired. One can explore the internals within and also
branch out into understanding the changes to the caching layer. Following the
descriptions below will also help to guide you.
To reduce the chances for regressions, it was critical that major changes to
unit tests were avoided. Where possible, they are left untouched and where
not, the spirit is hopefully captured. Pay particular attention to where
behavior may have changed.
Storage
-------
The primary changes to the `storage` package, other than the interface
updates, were to merge the layerstore and blobstore. Blob access is now
layered even further. The first layer, blobStore, exposes a global
`BlobStatter` and `BlobProvider`. Operations here provide a fast path for most
read operations that don't take access control into account. The
`linkedBlobStore` layers on top of the `blobStore`, providing repository-
scoped blob link management in the backend. The `linkedBlobStore` implements
the full `BlobStore` suite, providing access-controlled, repository-local blob
writers. The abstraction between the two is slightly broken in that
`linkedBlobStore` is the only channel under which one can write into the global
blob store. The `linkedBlobStore` also provides flexibility in that it can act
over different link sets depending on configuration. This allows us to use the
same code for signature links, manifest links and blob links. Eventually, we
will fully consolidate this storage.
The improved cache flow comes from the `linkedBlobStatter` component
of `linkedBlobStore`. Using a `cachedBlobStatter`, these combine together to
provide a simple cache hierarchy that should streamline access checks on read
and write operations, or at least provide a single path to optimize. The
metrics have been changed in a slightly incompatible way since the former
operations, Fetch and Exists, are no longer relevant.
The fileWriter and fileReader have been slightly modified to support the rest
of the changes. The most interesting is the removal of the `Stat` call from
`newFileReader`. This was the source of unnecessary round trips that were only
present to look up the size of the resulting reader. Now, one must simply pass
in the size, requiring the caller to decide whether or not the `Stat` call is
appropriate. In several cases, it turned out the caller already had the size
already. The `WriterAt` implementation has been removed from `fileWriter`,
since it is no longer required for `BlobWriter`, reducing the number of paths
which writes may take.
Cache
-----
Unfortunately, the `cache` package required a near full rewrite. It was pretty
mechanical in that the cache is oriented around the `BlobDescriptorService`
slightly modified to include the ability to set the values for individual
digests. While the implementation is oriented towards caching, it can act as a
primary store. Provisions are in place to have repository local metadata, in
addition to global metadata. Fallback is implemented as a part of the storage
package to maintain this flexibility.
One unfortunate side-effect is that caching is now repository-scoped, rather
than global. This should have little effect on performance but may increase
memory usage.
Handlers
--------
The `handlers` package has been updated to leverage the new API. For the most
part, the changes are superficial or mechanical based on the API changes. This
did expose a bug in the handling of provisional vs canonical digests that was
fixed in the unit tests.
Configuration
-------------
One user-facing change has been made to the configuration and is updated in
the associated documentation. The `layerinfo` cache parameter has been
deprecated by the `blobdescriptor` cache parameter. Both are equivalent and
configuration files should be backward compatible.
Notifications
-------------
Changes the `notification` package are simply to support the interface
changes.
Context
-------
A small change has been made to the tracing log-level. Traces have been moved
from "info" to "debug" level to reduce output when not needed.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-05-12 07:10:29 +00:00
ctxu . GetLogger ( app ) . Infof ( "using redis blob descriptor cache" )
2015-04-02 23:38:01 +00:00
case "inmemory" :
2015-08-18 17:56:27 +00:00
cacheProvider := memorycache . NewInMemoryBlobDescriptorCacheProvider ( )
localOptions := append ( options , storage . BlobDescriptorCacheProvider ( cacheProvider ) )
app . registry , err = storage . NewRegistry ( app , app . driver , localOptions ... )
if err != nil {
panic ( "could not create registry: " + err . Error ( ) )
}
Refactor Blob Service API
This PR refactors the blob service API to be oriented around blob descriptors.
Identified by digests, blobs become an abstract entity that can be read and
written using a descriptor as a handle. This allows blobs to take many forms,
such as a ReadSeekCloser or a simple byte buffer, allowing blob oriented
operations to better integrate with blob agnostic APIs (such as the `io`
package). The error definitions are now better organized to reflect conditions
that can only be seen when interacting with the blob API.
The main benefit of this is to separate the much smaller metadata from large
file storage. Many benefits also follow from this. Reading and writing has
been separated into discrete services. Backend implementation is also
simplified, by reducing the amount of metadata that needs to be picked up to
simply serve a read. This also improves cacheability.
"Opening" a blob simply consists of an access check (Stat) and a path
calculation. Caching is greatly simplified and we've made the mapping of
provisional to canonical hashes a first-class concept. BlobDescriptorService
and BlobProvider can be combined in different ways to achieve varying effects.
Recommend Review Approach
-------------------------
This is a very large patch. While apologies are in order, we are getting a
considerable amount of refactoring. Most changes follow from the changes to
the root package (distribution), so start there. From there, the main changes
are in storage. Looking at (*repository).Blobs will help to understand the how
the linkedBlobStore is wired. One can explore the internals within and also
branch out into understanding the changes to the caching layer. Following the
descriptions below will also help to guide you.
To reduce the chances for regressions, it was critical that major changes to
unit tests were avoided. Where possible, they are left untouched and where
not, the spirit is hopefully captured. Pay particular attention to where
behavior may have changed.
Storage
-------
The primary changes to the `storage` package, other than the interface
updates, were to merge the layerstore and blobstore. Blob access is now
layered even further. The first layer, blobStore, exposes a global
`BlobStatter` and `BlobProvider`. Operations here provide a fast path for most
read operations that don't take access control into account. The
`linkedBlobStore` layers on top of the `blobStore`, providing repository-
scoped blob link management in the backend. The `linkedBlobStore` implements
the full `BlobStore` suite, providing access-controlled, repository-local blob
writers. The abstraction between the two is slightly broken in that
`linkedBlobStore` is the only channel under which one can write into the global
blob store. The `linkedBlobStore` also provides flexibility in that it can act
over different link sets depending on configuration. This allows us to use the
same code for signature links, manifest links and blob links. Eventually, we
will fully consolidate this storage.
The improved cache flow comes from the `linkedBlobStatter` component
of `linkedBlobStore`. Using a `cachedBlobStatter`, these combine together to
provide a simple cache hierarchy that should streamline access checks on read
and write operations, or at least provide a single path to optimize. The
metrics have been changed in a slightly incompatible way since the former
operations, Fetch and Exists, are no longer relevant.
The fileWriter and fileReader have been slightly modified to support the rest
of the changes. The most interesting is the removal of the `Stat` call from
`newFileReader`. This was the source of unnecessary round trips that were only
present to look up the size of the resulting reader. Now, one must simply pass
in the size, requiring the caller to decide whether or not the `Stat` call is
appropriate. In several cases, it turned out the caller already had the size
already. The `WriterAt` implementation has been removed from `fileWriter`,
since it is no longer required for `BlobWriter`, reducing the number of paths
which writes may take.
Cache
-----
Unfortunately, the `cache` package required a near full rewrite. It was pretty
mechanical in that the cache is oriented around the `BlobDescriptorService`
slightly modified to include the ability to set the values for individual
digests. While the implementation is oriented towards caching, it can act as a
primary store. Provisions are in place to have repository local metadata, in
addition to global metadata. Fallback is implemented as a part of the storage
package to maintain this flexibility.
One unfortunate side-effect is that caching is now repository-scoped, rather
than global. This should have little effect on performance but may increase
memory usage.
Handlers
--------
The `handlers` package has been updated to leverage the new API. For the most
part, the changes are superficial or mechanical based on the API changes. This
did expose a bug in the handling of provisional vs canonical digests that was
fixed in the unit tests.
Configuration
-------------
One user-facing change has been made to the configuration and is updated in
the associated documentation. The `layerinfo` cache parameter has been
deprecated by the `blobdescriptor` cache parameter. Both are equivalent and
configuration files should be backward compatible.
Notifications
-------------
Changes the `notification` package are simply to support the interface
changes.
Context
-------
A small change has been made to the tracing log-level. Traces have been moved
from "info" to "debug" level to reduce output when not needed.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-05-12 07:10:29 +00:00
ctxu . GetLogger ( app ) . Infof ( "using inmemory blob descriptor cache" )
2015-04-02 23:38:01 +00:00
default :
Refactor Blob Service API
This PR refactors the blob service API to be oriented around blob descriptors.
Identified by digests, blobs become an abstract entity that can be read and
written using a descriptor as a handle. This allows blobs to take many forms,
such as a ReadSeekCloser or a simple byte buffer, allowing blob oriented
operations to better integrate with blob agnostic APIs (such as the `io`
package). The error definitions are now better organized to reflect conditions
that can only be seen when interacting with the blob API.
The main benefit of this is to separate the much smaller metadata from large
file storage. Many benefits also follow from this. Reading and writing has
been separated into discrete services. Backend implementation is also
simplified, by reducing the amount of metadata that needs to be picked up to
simply serve a read. This also improves cacheability.
"Opening" a blob simply consists of an access check (Stat) and a path
calculation. Caching is greatly simplified and we've made the mapping of
provisional to canonical hashes a first-class concept. BlobDescriptorService
and BlobProvider can be combined in different ways to achieve varying effects.
Recommend Review Approach
-------------------------
This is a very large patch. While apologies are in order, we are getting a
considerable amount of refactoring. Most changes follow from the changes to
the root package (distribution), so start there. From there, the main changes
are in storage. Looking at (*repository).Blobs will help to understand the how
the linkedBlobStore is wired. One can explore the internals within and also
branch out into understanding the changes to the caching layer. Following the
descriptions below will also help to guide you.
To reduce the chances for regressions, it was critical that major changes to
unit tests were avoided. Where possible, they are left untouched and where
not, the spirit is hopefully captured. Pay particular attention to where
behavior may have changed.
Storage
-------
The primary changes to the `storage` package, other than the interface
updates, were to merge the layerstore and blobstore. Blob access is now
layered even further. The first layer, blobStore, exposes a global
`BlobStatter` and `BlobProvider`. Operations here provide a fast path for most
read operations that don't take access control into account. The
`linkedBlobStore` layers on top of the `blobStore`, providing repository-
scoped blob link management in the backend. The `linkedBlobStore` implements
the full `BlobStore` suite, providing access-controlled, repository-local blob
writers. The abstraction between the two is slightly broken in that
`linkedBlobStore` is the only channel under which one can write into the global
blob store. The `linkedBlobStore` also provides flexibility in that it can act
over different link sets depending on configuration. This allows us to use the
same code for signature links, manifest links and blob links. Eventually, we
will fully consolidate this storage.
The improved cache flow comes from the `linkedBlobStatter` component
of `linkedBlobStore`. Using a `cachedBlobStatter`, these combine together to
provide a simple cache hierarchy that should streamline access checks on read
and write operations, or at least provide a single path to optimize. The
metrics have been changed in a slightly incompatible way since the former
operations, Fetch and Exists, are no longer relevant.
The fileWriter and fileReader have been slightly modified to support the rest
of the changes. The most interesting is the removal of the `Stat` call from
`newFileReader`. This was the source of unnecessary round trips that were only
present to look up the size of the resulting reader. Now, one must simply pass
in the size, requiring the caller to decide whether or not the `Stat` call is
appropriate. In several cases, it turned out the caller already had the size
already. The `WriterAt` implementation has been removed from `fileWriter`,
since it is no longer required for `BlobWriter`, reducing the number of paths
which writes may take.
Cache
-----
Unfortunately, the `cache` package required a near full rewrite. It was pretty
mechanical in that the cache is oriented around the `BlobDescriptorService`
slightly modified to include the ability to set the values for individual
digests. While the implementation is oriented towards caching, it can act as a
primary store. Provisions are in place to have repository local metadata, in
addition to global metadata. Fallback is implemented as a part of the storage
package to maintain this flexibility.
One unfortunate side-effect is that caching is now repository-scoped, rather
than global. This should have little effect on performance but may increase
memory usage.
Handlers
--------
The `handlers` package has been updated to leverage the new API. For the most
part, the changes are superficial or mechanical based on the API changes. This
did expose a bug in the handling of provisional vs canonical digests that was
fixed in the unit tests.
Configuration
-------------
One user-facing change has been made to the configuration and is updated in
the associated documentation. The `layerinfo` cache parameter has been
deprecated by the `blobdescriptor` cache parameter. Both are equivalent and
configuration files should be backward compatible.
Notifications
-------------
Changes the `notification` package are simply to support the interface
changes.
Context
-------
A small change has been made to the tracing log-level. Traces have been moved
from "info" to "debug" level to reduce output when not needed.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-05-12 07:10:29 +00:00
if v != "" {
2016-01-21 00:40:58 +00:00
ctxu . GetLogger ( app ) . Warnf ( "unknown cache type %q, caching disabled" , config . Storage [ "cache" ] )
2015-04-02 23:38:01 +00:00
}
}
}
if app . registry == nil {
// configure the registry if no cache section is available.
2015-08-18 17:56:27 +00:00
app . registry , err = storage . NewRegistry ( app . Context , app . driver , options ... )
if err != nil {
panic ( "could not create registry: " + err . Error ( ) )
}
2015-04-01 23:41:33 +00:00
}
2016-05-24 18:07:55 +00:00
app . registry , err = applyRegistryMiddleware ( app , app . registry , config . Middleware [ "registry" ] )
2015-03-09 17:55:52 +00:00
if err != nil {
panic ( err )
2015-03-06 15:45:16 +00:00
}
2016-01-21 00:40:58 +00:00
authType := config . Auth . Type ( )
2014-12-18 20:30:19 +00:00
if authType != "" {
2016-01-21 00:40:58 +00:00
accessController , err := auth . GetAccessController ( config . Auth . Type ( ) , config . Auth . Parameters ( ) )
2014-12-18 20:30:19 +00:00
if err != nil {
panic ( fmt . Sprintf ( "unable to configure authorization (%s): %v" , authType , err ) )
}
app . accessController = accessController
2015-06-11 02:29:27 +00:00
ctxu . GetLogger ( app ) . Debugf ( "configured %q access controller" , authType )
2014-12-18 20:30:19 +00:00
}
2015-07-29 18:12:01 +00:00
// configure as a pull through cache
2016-01-21 00:40:58 +00:00
if config . Proxy . RemoteURL != "" {
app . registry , err = proxy . NewRegistryPullThroughCache ( ctx , app . registry , app . driver , config . Proxy )
2015-07-29 18:12:01 +00:00
if err != nil {
panic ( err . Error ( ) )
}
app . isCache = true
2016-01-21 00:40:58 +00:00
ctxu . GetLogger ( app ) . Info ( "Registry configured as a proxy cache to " , config . Proxy . RemoteURL )
2015-07-29 18:12:01 +00:00
}
2014-11-11 02:57:38 +00:00
return app
}
2015-08-06 22:28:11 +00:00
// RegisterHealthChecks is an awful hack to defer health check registration
// control to callers. This should only ever be called once per registry
// process, typically in a main function. The correct way would be register
// health checks outside of app, since multiple apps may exist in the same
// process. Because the configuration and app are tightly coupled,
// implementing this properly will require a refactor. This method may panic
// if called twice in the same process.
2015-08-19 22:11:10 +00:00
func ( app * App ) RegisterHealthChecks ( healthRegistries ... * health . Registry ) {
if len ( healthRegistries ) > 1 {
panic ( "RegisterHealthChecks called with more than one registry" )
}
healthRegistry := health . DefaultRegistry
if len ( healthRegistries ) == 1 {
healthRegistry = healthRegistries [ 0 ]
}
2015-08-19 21:12:51 +00:00
if app . Config . Health . StorageDriver . Enabled {
interval := app . Config . Health . StorageDriver . Interval
if interval == 0 {
interval = defaultCheckInterval
}
storageDriverCheck := func ( ) error {
_ , err := app . driver . List ( app , "/" ) // "/" should always exist
return err // any error will be treated as failure
}
if app . Config . Health . StorageDriver . Threshold != 0 {
2015-08-19 22:11:10 +00:00
healthRegistry . RegisterPeriodicThresholdFunc ( "storagedriver_" + app . Config . Storage . Type ( ) , interval , app . Config . Health . StorageDriver . Threshold , storageDriverCheck )
2015-08-19 21:12:51 +00:00
} else {
2015-08-19 22:11:10 +00:00
healthRegistry . RegisterPeriodicFunc ( "storagedriver_" + app . Config . Storage . Type ( ) , interval , storageDriverCheck )
2015-08-19 21:12:51 +00:00
}
}
2015-08-19 00:19:46 +00:00
for _ , fileChecker := range app . Config . Health . FileCheckers {
interval := fileChecker . Interval
if interval == 0 {
interval = defaultCheckInterval
}
2015-08-20 00:57:18 +00:00
ctxu . GetLogger ( app ) . Infof ( "configuring file health check path=%s, interval=%d" , fileChecker . File , interval / time . Second )
healthRegistry . Register ( fileChecker . File , health . PeriodicChecker ( checks . FileChecker ( fileChecker . File ) , interval ) )
2015-08-19 00:19:46 +00:00
}
for _ , httpChecker := range app . Config . Health . HTTPCheckers {
interval := httpChecker . Interval
if interval == 0 {
interval = defaultCheckInterval
}
2015-08-20 00:57:18 +00:00
statusCode := httpChecker . StatusCode
if statusCode == 0 {
statusCode = 200
}
2015-08-20 01:23:58 +00:00
checker := checks . HTTPChecker ( httpChecker . URI , statusCode , httpChecker . Timeout , httpChecker . Headers )
2015-08-20 00:57:18 +00:00
2015-08-19 00:19:46 +00:00
if httpChecker . Threshold != 0 {
ctxu . GetLogger ( app ) . Infof ( "configuring HTTP health check uri=%s, interval=%d, threshold=%d" , httpChecker . URI , interval / time . Second , httpChecker . Threshold )
2015-08-20 00:57:18 +00:00
healthRegistry . Register ( httpChecker . URI , health . PeriodicThresholdChecker ( checker , interval , httpChecker . Threshold ) )
2015-08-19 00:19:46 +00:00
} else {
ctxu . GetLogger ( app ) . Infof ( "configuring HTTP health check uri=%s, interval=%d" , httpChecker . URI , interval / time . Second )
2015-08-20 00:57:18 +00:00
healthRegistry . Register ( httpChecker . URI , health . PeriodicChecker ( checker , interval ) )
}
}
for _ , tcpChecker := range app . Config . Health . TCPCheckers {
interval := tcpChecker . Interval
if interval == 0 {
interval = defaultCheckInterval
}
checker := checks . TCPChecker ( tcpChecker . Addr , tcpChecker . Timeout )
if tcpChecker . Threshold != 0 {
ctxu . GetLogger ( app ) . Infof ( "configuring TCP health check addr=%s, interval=%d, threshold=%d" , tcpChecker . Addr , interval / time . Second , tcpChecker . Threshold )
healthRegistry . Register ( tcpChecker . Addr , health . PeriodicThresholdChecker ( checker , interval , tcpChecker . Threshold ) )
} else {
ctxu . GetLogger ( app ) . Infof ( "configuring TCP health check addr=%s, interval=%d" , tcpChecker . Addr , interval / time . Second )
healthRegistry . Register ( tcpChecker . Addr , health . PeriodicChecker ( checker , interval ) )
2015-08-19 00:19:46 +00:00
}
}
2015-08-06 22:28:11 +00:00
}
2014-11-11 02:57:38 +00:00
// register a handler with the application, by route name. The handler will be
// passed through the application filters and context will be constructed at
// request time.
func ( app * App ) register ( routeName string , dispatch dispatchFunc ) {
// TODO(stevvooe): This odd dispatcher/route registration is by-product of
// some limitations in the gorilla/mux router. We are using it to keep
// routing consistent between the client and server, but we may want to
// replace it with manual routing and structure-based dispatch for better
// control over the request execution.
app . router . GetRoute ( routeName ) . Handler ( app . dispatcher ( dispatch ) )
}
2015-01-28 23:55:18 +00:00
// configureEvents prepares the event sink for action.
func ( app * App ) configureEvents ( configuration * configuration . Configuration ) {
// Configure all of the endpoint sinks.
var sinks [ ] notifications . Sink
for _ , endpoint := range configuration . Notifications . Endpoints {
if endpoint . Disabled {
2015-02-07 00:19:19 +00:00
ctxu . GetLogger ( app ) . Infof ( "endpoint %s disabled, skipping" , endpoint . Name )
2015-01-28 23:55:18 +00:00
continue
}
2015-02-07 00:19:19 +00:00
ctxu . GetLogger ( app ) . Infof ( "configuring endpoint %v (%v), timeout=%s, headers=%v" , endpoint . Name , endpoint . URL , endpoint . Timeout , endpoint . Headers )
2015-01-28 23:55:18 +00:00
endpoint := notifications . NewEndpoint ( endpoint . Name , endpoint . URL , notifications . EndpointConfig {
Timeout : endpoint . Timeout ,
Threshold : endpoint . Threshold ,
Backoff : endpoint . Backoff ,
Headers : endpoint . Headers ,
} )
sinks = append ( sinks , endpoint )
}
2016-02-11 00:26:29 +00:00
// NOTE(stevvooe): Moving to a new queuing implementation is as easy as
2015-01-28 23:55:18 +00:00
// replacing broadcaster with a rabbitmq implementation. It's recommended
// that the registry instances also act as the workers to keep deployment
// simple.
app . events . sink = notifications . NewBroadcaster ( sinks ... )
// Populate registry event source
hostname , err := os . Hostname ( )
if err != nil {
hostname = configuration . HTTP . Addr
} else {
// try to pick the port off the config
_ , port , err := net . SplitHostPort ( configuration . HTTP . Addr )
if err == nil {
hostname = net . JoinHostPort ( hostname , port )
}
}
app . events . source = notifications . SourceRecord {
Addr : hostname ,
2015-04-10 01:45:39 +00:00
InstanceID : ctxu . GetStringValue ( app , "instance.id" ) ,
2015-01-28 23:55:18 +00:00
}
}
2016-10-06 00:47:12 +00:00
type redisStartAtKey struct { }
2015-04-01 23:27:24 +00:00
func ( app * App ) configureRedis ( configuration * configuration . Configuration ) {
if configuration . Redis . Addr == "" {
ctxu . GetLogger ( app ) . Infof ( "redis not configured" )
return
}
pool := & redis . Pool {
Dial : func ( ) ( redis . Conn , error ) {
// TODO(stevvooe): Yet another use case for contextual timing.
2016-10-06 00:47:12 +00:00
ctx := context . WithValue ( app , redisStartAtKey { } , time . Now ( ) )
2015-04-01 23:27:24 +00:00
done := func ( err error ) {
logger := ctxu . GetLoggerWithField ( ctx , "redis.connect.duration" ,
2016-10-06 00:47:12 +00:00
ctxu . Since ( ctx , redisStartAtKey { } ) )
2015-04-01 23:27:24 +00:00
if err != nil {
logger . Errorf ( "redis: error connecting: %v" , err )
} else {
logger . Infof ( "redis: connect %v" , configuration . Redis . Addr )
}
}
conn , err := redis . DialTimeout ( "tcp" ,
configuration . Redis . Addr ,
configuration . Redis . DialTimeout ,
configuration . Redis . ReadTimeout ,
configuration . Redis . WriteTimeout )
if err != nil {
ctxu . GetLogger ( app ) . Errorf ( "error connecting to redis instance %s: %v" ,
configuration . Redis . Addr , err )
done ( err )
return nil , err
}
// authorize the connection
if configuration . Redis . Password != "" {
if _ , err = conn . Do ( "AUTH" , configuration . Redis . Password ) ; err != nil {
defer conn . Close ( )
done ( err )
return nil , err
}
}
// select the database to use
if configuration . Redis . DB != 0 {
if _ , err = conn . Do ( "SELECT" , configuration . Redis . DB ) ; err != nil {
defer conn . Close ( )
done ( err )
return nil , err
}
}
done ( nil )
return conn , nil
} ,
MaxIdle : configuration . Redis . Pool . MaxIdle ,
MaxActive : configuration . Redis . Pool . MaxActive ,
IdleTimeout : configuration . Redis . Pool . IdleTimeout ,
TestOnBorrow : func ( c redis . Conn , t time . Time ) error {
// TODO(stevvooe): We can probably do something more interesting
// here with the health package.
_ , err := c . Do ( "PING" )
return err
} ,
Wait : false , // if a connection is not avialable, proceed without cache.
}
app . redis = pool
2015-04-03 04:22:11 +00:00
// setup expvar
registry := expvar . Get ( "registry" )
if registry == nil {
registry = expvar . NewMap ( "registry" )
}
registry . ( * expvar . Map ) . Set ( "redis" , expvar . Func ( func ( ) interface { } {
2015-04-01 23:27:24 +00:00
return map [ string ] interface { } {
"Config" : configuration . Redis ,
"Active" : app . redis . ActiveCount ( ) ,
}
} ) )
}
2015-04-17 12:19:20 +00:00
// configureLogHook prepares logging hook parameters.
func ( app * App ) configureLogHook ( configuration * configuration . Configuration ) {
2015-07-24 03:51:11 +00:00
entry , ok := ctxu . GetLogger ( app ) . ( * log . Entry )
if ! ok {
// somehow, we are not using logrus
return
}
logger := entry . Logger
2015-04-17 12:19:20 +00:00
for _ , configHook := range configuration . Log . Hooks {
if ! configHook . Disabled {
switch configHook . Type {
case "mail" :
hook := & logHook { }
hook . LevelsParam = configHook . Levels
hook . Mail = & mailer {
Addr : configHook . MailOptions . SMTP . Addr ,
Username : configHook . MailOptions . SMTP . Username ,
Password : configHook . MailOptions . SMTP . Password ,
Insecure : configHook . MailOptions . SMTP . Insecure ,
From : configHook . MailOptions . From ,
To : configHook . MailOptions . To ,
}
logger . Hooks . Add ( hook )
default :
}
}
}
}
2015-07-29 19:50:43 +00:00
// configureSecret creates a random secret if a secret wasn't included in the
// configuration.
func ( app * App ) configureSecret ( configuration * configuration . Configuration ) {
if configuration . HTTP . Secret == "" {
var secretBytes [ randomSecretSize ] byte
if _ , err := cryptorand . Read ( secretBytes [ : ] ) ; err != nil {
panic ( fmt . Sprintf ( "could not generate random bytes for HTTP secret: %v" , err ) )
}
configuration . HTTP . Secret = string ( secretBytes [ : ] )
ctxu . GetLogger ( app ) . Warn ( "No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable." )
}
}
2015-01-28 23:55:18 +00:00
func ( app * App ) ServeHTTP ( w http . ResponseWriter , r * http . Request ) {
defer r . Body . Close ( ) // ensure that request body is always closed.
2015-04-16 02:20:45 +00:00
// Instantiate an http context here so we can track the error codes
// returned by the request router.
ctx := defaultContextManager . context ( app , w , r )
2015-07-29 23:52:47 +00:00
2015-04-16 02:20:45 +00:00
defer func ( ) {
2015-07-29 23:52:47 +00:00
status , ok := ctx . Value ( "http.response.status" ) . ( int )
if ok && status >= 200 && status <= 399 {
ctxu . GetResponseLogger ( ctx ) . Infof ( "response completed" )
}
2015-04-16 02:20:45 +00:00
} ( )
defer defaultContextManager . release ( ctx )
// NOTE(stevvooe): Total hack to get instrumented responsewriter from context.
var err error
w , err = ctxu . GetResponseWriter ( ctx )
if err != nil {
ctxu . GetLogger ( ctx ) . Warnf ( "response writer not found in context" )
}
2015-01-28 23:55:18 +00:00
// Set a header with the Docker Distribution API Version for all responses.
w . Header ( ) . Add ( "Docker-Distribution-API-Version" , "registry/2.0" )
app . router . ServeHTTP ( w , r )
}
2014-11-11 02:57:38 +00:00
// dispatchFunc takes a context and request and returns a constructed handler
// for the route. The dispatcher will use this to dynamically create request
// specific handlers for each endpoint without creating a new router for each
// request.
type dispatchFunc func ( ctx * Context , r * http . Request ) http . Handler
// TODO(stevvooe): dispatchers should probably have some validation error
// chain with proper error reporting.
// dispatcher returns a handler that constructs a request specific context and
// handler, using the dispatch factory function.
func ( app * App ) dispatcher ( dispatch dispatchFunc ) http . Handler {
return http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
2015-08-10 21:20:52 +00:00
for headerName , headerValues := range app . Config . HTTP . Headers {
for _ , value := range headerValues {
w . Header ( ) . Add ( headerName , value )
}
}
2015-02-07 00:19:19 +00:00
context := app . context ( w , r )
2014-11-11 02:57:38 +00:00
2015-02-07 00:19:19 +00:00
if err := app . authorized ( w , r , context ) ; err != nil {
2015-07-16 19:40:31 +00:00
ctxu . GetLogger ( context ) . Warnf ( "error authorizing context: %v" , err )
2014-12-18 20:30:19 +00:00
return
}
2014-11-11 02:57:38 +00:00
2015-04-14 23:07:23 +00:00
// Add username to request logging
2016-01-29 01:02:09 +00:00
context . Context = ctxu . WithLogger ( context . Context , ctxu . GetLogger ( context . Context , auth . UserNameKey ) )
2015-04-14 23:07:23 +00:00
2015-02-13 21:59:50 +00:00
if app . nameRequired ( r ) {
2015-12-15 22:35:23 +00:00
nameRef , err := reference . ParseNamed ( getName ( context ) )
if err != nil {
ctxu . GetLogger ( context ) . Errorf ( "error parsing reference from context: %v" , err )
context . Errors = append ( context . Errors , distribution . ErrRepositoryNameInvalid {
Name : getName ( context ) ,
Reason : err ,
} )
if err := errcode . ServeJSON ( w , context . Errors ) ; err != nil {
ctxu . GetLogger ( context ) . Errorf ( "error serving error json: %v (from %v)" , err , context . Errors )
}
return
}
repository , err := app . registry . Repository ( context , nameRef )
2015-02-13 21:59:50 +00:00
if err != nil {
ctxu . GetLogger ( context ) . Errorf ( "error resolving repository: %v" , err )
switch err := err . ( type ) {
case distribution . ErrRepositoryUnknown :
2015-06-03 13:52:39 +00:00
context . Errors = append ( context . Errors , v2 . ErrorCodeNameUnknown . WithDetail ( err ) )
2015-02-13 21:59:50 +00:00
case distribution . ErrRepositoryNameInvalid :
2015-06-03 13:52:39 +00:00
context . Errors = append ( context . Errors , v2 . ErrorCodeNameInvalid . WithDetail ( err ) )
2016-04-29 21:34:24 +00:00
case errcode . Error :
context . Errors = append ( context . Errors , err )
2015-02-13 21:59:50 +00:00
}
2015-07-16 18:35:02 +00:00
if err := errcode . ServeJSON ( w , context . Errors ) ; err != nil {
ctxu . GetLogger ( context ) . Errorf ( "error serving error json: %v (from %v)" , err , context . Errors )
}
2015-02-13 21:59:50 +00:00
return
}
// assign and decorate the authorized repository with an event bridge.
context . Repository = notifications . Listen (
repository ,
app . eventBridge ( context , r ) )
2015-03-06 15:45:16 +00:00
2016-05-24 18:07:55 +00:00
context . Repository , err = applyRepoMiddleware ( app , context . Repository , app . Config . Middleware [ "repository" ] )
2015-03-09 17:55:52 +00:00
if err != nil {
ctxu . GetLogger ( context ) . Errorf ( "error initializing repository middleware: %v" , err )
2015-06-03 13:52:39 +00:00
context . Errors = append ( context . Errors , errcode . ErrorCodeUnknown . WithDetail ( err ) )
2015-05-15 01:21:39 +00:00
2015-07-16 18:35:02 +00:00
if err := errcode . ServeJSON ( w , context . Errors ) ; err != nil {
ctxu . GetLogger ( context ) . Errorf ( "error serving error json: %v (from %v)" , err , context . Errors )
}
2015-03-09 17:55:52 +00:00
return
2015-03-06 15:45:16 +00:00
}
2015-02-13 21:59:50 +00:00
}
2015-04-16 02:20:45 +00:00
dispatch ( context , r ) . ServeHTTP ( w , r )
2014-11-11 02:57:38 +00:00
// Automated error response handling here. Handlers may return their
// own errors if they need different behavior (such as range errors
// for layer upload).
2014-11-21 03:57:01 +00:00
if context . Errors . Len ( ) > 0 {
2015-07-16 18:35:02 +00:00
if err := errcode . ServeJSON ( w , context . Errors ) ; err != nil {
ctxu . GetLogger ( context ) . Errorf ( "error serving error json: %v (from %v)" , err , context . Errors )
}
2015-07-29 23:52:47 +00:00
app . logError ( context , context . Errors )
2014-11-11 02:57:38 +00:00
}
} )
}
2014-12-11 06:33:36 +00:00
2016-10-06 00:47:12 +00:00
type errCodeKey struct { }
func ( errCodeKey ) String ( ) string { return "err.code" }
type errMessageKey struct { }
func ( errMessageKey ) String ( ) string { return "err.message" }
type errDetailKey struct { }
func ( errDetailKey ) String ( ) string { return "err.detail" }
2015-05-15 01:21:39 +00:00
func ( app * App ) logError ( context context . Context , errors errcode . Errors ) {
2015-06-03 13:52:39 +00:00
for _ , e1 := range errors {
var c ctxu . Context
switch e1 . ( type ) {
case errcode . Error :
e , _ := e1 . ( errcode . Error )
2016-10-06 00:47:12 +00:00
c = ctxu . WithValue ( context , errCodeKey { } , e . Code )
c = ctxu . WithValue ( c , errMessageKey { } , e . Code . Message ( ) )
c = ctxu . WithValue ( c , errDetailKey { } , e . Detail )
2015-06-03 13:52:39 +00:00
case errcode . ErrorCode :
e , _ := e1 . ( errcode . ErrorCode )
2016-10-06 00:47:12 +00:00
c = ctxu . WithValue ( context , errCodeKey { } , e )
c = ctxu . WithValue ( c , errMessageKey { } , e . Message ( ) )
2015-06-03 13:52:39 +00:00
default :
// just normal go 'error'
2016-10-06 00:47:12 +00:00
c = ctxu . WithValue ( context , errCodeKey { } , errcode . ErrorCodeUnknown )
c = ctxu . WithValue ( c , errMessageKey { } , e1 . Error ( ) )
2015-06-03 13:52:39 +00:00
}
2015-04-20 23:35:09 +00:00
c = ctxu . WithLogger ( c , ctxu . GetLogger ( c ,
2016-10-06 00:47:12 +00:00
errCodeKey { } ,
errMessageKey { } ,
errDetailKey { } ) )
2015-07-29 23:52:47 +00:00
ctxu . GetResponseLogger ( c ) . Errorf ( "response completed with error" )
2015-04-20 23:35:09 +00:00
}
}
2014-12-18 20:30:19 +00:00
// context constructs the context object for the application. This only be
// called once per request.
2015-02-07 00:19:19 +00:00
func ( app * App ) context ( w http . ResponseWriter , r * http . Request ) * Context {
2015-04-16 02:20:45 +00:00
ctx := defaultContextManager . context ( app , w , r )
2015-02-07 00:19:19 +00:00
ctx = ctxu . WithVars ( ctx , r )
ctx = ctxu . WithLogger ( ctx , ctxu . GetLogger ( ctx ,
"vars.name" ,
2015-02-26 23:47:04 +00:00
"vars.reference" ,
2015-02-07 00:19:19 +00:00
"vars.digest" ,
"vars.uuid" ) )
2014-12-18 20:30:19 +00:00
context := & Context {
2015-09-18 18:03:15 +00:00
App : app ,
Context : ctx ,
}
if app . httpHost . Scheme != "" && app . httpHost . Host != "" {
// A "host" item in the configuration takes precedence over
// X-Forwarded-Proto and X-Forwarded-Host headers, and the
// hostname in the request.
2016-02-23 01:49:23 +00:00
context . urlBuilder = v2 . NewURLBuilder ( & app . httpHost , false )
2015-09-18 18:03:15 +00:00
} else {
2016-02-23 01:49:23 +00:00
context . urlBuilder = v2 . NewURLBuilderFromRequest ( r , app . Config . HTTP . RelativeURLs )
2014-12-18 20:30:19 +00:00
}
return context
}
2015-01-17 02:32:27 +00:00
// authorized checks if the request can proceed with access to the requested
2015-02-10 23:19:02 +00:00
// repository. If it succeeds, the context may access the requested
// repository. An error will be returned if access is not available.
2015-02-07 00:19:19 +00:00
func ( app * App ) authorized ( w http . ResponseWriter , r * http . Request , context * Context ) error {
ctxu . GetLogger ( context ) . Debug ( "authorizing request" )
repo := getName ( context )
2014-12-18 20:30:19 +00:00
if app . accessController == nil {
return nil // access controller is not enabled.
}
var accessRecords [ ] auth . Access
2015-01-17 02:32:27 +00:00
if repo != "" {
2015-03-09 23:23:27 +00:00
accessRecords = appendAccessRecords ( accessRecords , r . Method , repo )
2015-12-15 02:34:18 +00:00
if fromRepo := r . FormValue ( "from" ) ; fromRepo != "" {
// mounting a blob from one repository to another requires pull (GET)
// access to the source repository.
accessRecords = appendAccessRecords ( accessRecords , "GET" , fromRepo )
}
2014-12-19 01:20:35 +00:00
} else {
// Only allow the name not to be set on the base route.
2015-02-13 21:59:50 +00:00
if app . nameRequired ( r ) {
2015-02-10 23:19:02 +00:00
// For this to be properly secured, repo must always be set for a
// resource that may make a modification. The only condition under
// which name is not set and we still allow access is when the
// base route is accessed. This section prevents us from making
// that mistake elsewhere in the code, allowing any operation to
// proceed.
2015-08-06 23:25:08 +00:00
if err := errcode . ServeJSON ( w , errcode . ErrorCodeUnauthorized ) ; err != nil {
2015-07-16 18:35:02 +00:00
ctxu . GetLogger ( context ) . Errorf ( "error serving error json: %v (from %v)" , err , context . Errors )
}
2015-02-10 23:19:02 +00:00
return fmt . Errorf ( "forbidden: no repository name" )
2014-12-19 01:20:35 +00:00
}
2015-07-13 20:08:13 +00:00
accessRecords = appendCatalogAccessRecord ( accessRecords , r )
2014-12-18 20:30:19 +00:00
}
2015-02-07 00:19:19 +00:00
ctx , err := app . accessController . Authorized ( context . Context , accessRecords ... )
2015-02-04 01:59:24 +00:00
if err != nil {
2014-12-18 20:30:19 +00:00
switch err := err . ( type ) {
case auth . Challenge :
2015-06-17 01:57:47 +00:00
// Add the appropriate WWW-Auth header
2015-07-24 02:39:56 +00:00
err . SetHeaders ( w )
2014-12-18 20:30:19 +00:00
2015-08-06 23:25:08 +00:00
if err := errcode . ServeJSON ( w , errcode . ErrorCodeUnauthorized . WithDetail ( accessRecords ) ) ; err != nil {
2015-07-16 18:35:02 +00:00
ctxu . GetLogger ( context ) . Errorf ( "error serving error json: %v (from %v)" , err , context . Errors )
}
2014-12-18 20:30:19 +00:00
default :
// This condition is a potential security problem either in
// the configuration or whatever is backing the access
// controller. Just return a bad request with no information
// to avoid exposure. The request should not proceed.
2015-02-07 00:19:19 +00:00
ctxu . GetLogger ( context ) . Errorf ( "error checking authorization: %v" , err )
2014-12-18 20:30:19 +00:00
w . WriteHeader ( http . StatusBadRequest )
}
return err
}
2015-02-09 22:44:58 +00:00
// TODO(stevvooe): This pattern needs to be cleaned up a bit. One context
// should be replaced by another, rather than replacing the context on a
// mutable object.
2015-02-07 00:19:19 +00:00
context . Context = ctx
2014-12-18 20:30:19 +00:00
return nil
}
2015-01-28 23:55:18 +00:00
// eventBridge returns a bridge for the current request, configured with the
// correct actor and source.
func ( app * App ) eventBridge ( ctx * Context , r * http . Request ) notifications . Listener {
actor := notifications . ActorRecord {
2015-02-07 00:19:19 +00:00
Name : getUserName ( ctx , r ) ,
2015-01-28 23:55:18 +00:00
}
2015-02-07 00:19:19 +00:00
request := notifications . NewRequestRecord ( ctxu . GetRequestID ( ctx ) , r )
2015-01-28 23:55:18 +00:00
2015-02-03 21:28:10 +00:00
return notifications . NewBridge ( ctx . urlBuilder , app . events . source , actor , request , app . events . sink )
2015-01-28 23:55:18 +00:00
}
2015-02-13 21:59:50 +00:00
// nameRequired returns true if the route requires a name.
func ( app * App ) nameRequired ( r * http . Request ) bool {
route := mux . CurrentRoute ( r )
2015-07-13 20:08:13 +00:00
routeName := route . GetName ( )
return route == nil || ( routeName != v2 . RouteNameBase && routeName != v2 . RouteNameCatalog )
2015-02-13 21:59:50 +00:00
}
2014-12-11 06:33:36 +00:00
// apiBase implements a simple yes-man for doing overall checks against the
// api. This can support auth roundtrips to support docker login.
func apiBase ( w http . ResponseWriter , r * http . Request ) {
const emptyJSON = "{}"
// Provide a simple /v2/ 200 OK response with empty json response.
2014-12-31 04:09:45 +00:00
w . Header ( ) . Set ( "Content-Type" , "application/json; charset=utf-8" )
2014-12-11 06:33:36 +00:00
w . Header ( ) . Set ( "Content-Length" , fmt . Sprint ( len ( emptyJSON ) ) )
fmt . Fprint ( w , emptyJSON )
}
2015-03-09 23:23:27 +00:00
// appendAccessRecords checks the method and adds the appropriate Access records to the records list.
func appendAccessRecords ( records [ ] auth . Access , method string , repo string ) [ ] auth . Access {
resource := auth . Resource {
Type : "repository" ,
Name : repo ,
}
switch method {
case "GET" , "HEAD" :
records = append ( records ,
auth . Access {
Resource : resource ,
Action : "pull" ,
} )
case "POST" , "PUT" , "PATCH" :
records = append ( records ,
auth . Access {
Resource : resource ,
Action : "pull" ,
} ,
auth . Access {
Resource : resource ,
Action : "push" ,
} )
case "DELETE" :
// DELETE access requires full admin rights, which is represented
// as "*". This may not be ideal.
records = append ( records ,
auth . Access {
Resource : resource ,
Action : "*" ,
} )
}
return records
}
2015-03-09 17:55:52 +00:00
2015-07-13 20:08:13 +00:00
// Add the access record for the catalog if it's our current route
func appendCatalogAccessRecord ( accessRecords [ ] auth . Access , r * http . Request ) [ ] auth . Access {
route := mux . CurrentRoute ( r )
routeName := route . GetName ( )
if routeName == v2 . RouteNameCatalog {
resource := auth . Resource {
Type : "registry" ,
Name : "catalog" ,
}
accessRecords = append ( accessRecords ,
auth . Access {
Resource : resource ,
Action : "*" ,
} )
}
return accessRecords
}
2015-03-09 17:55:52 +00:00
// applyRegistryMiddleware wraps a registry instance with the configured middlewares
2015-07-29 18:12:01 +00:00
func applyRegistryMiddleware ( ctx context . Context , registry distribution . Namespace , middlewares [ ] configuration . Middleware ) ( distribution . Namespace , error ) {
2015-03-09 17:55:52 +00:00
for _ , mw := range middlewares {
2015-07-29 18:12:01 +00:00
rmw , err := registrymiddleware . Get ( ctx , mw . Name , mw . Options , registry )
2015-03-09 17:55:52 +00:00
if err != nil {
return nil , fmt . Errorf ( "unable to configure registry middleware (%s): %s" , mw . Name , err )
}
registry = rmw
}
return registry , nil
}
// applyRepoMiddleware wraps a repository with the configured middlewares
2015-07-29 18:12:01 +00:00
func applyRepoMiddleware ( ctx context . Context , repository distribution . Repository , middlewares [ ] configuration . Middleware ) ( distribution . Repository , error ) {
2015-03-09 17:55:52 +00:00
for _ , mw := range middlewares {
2015-07-29 18:12:01 +00:00
rmw , err := repositorymiddleware . Get ( ctx , mw . Name , mw . Options , repository )
2015-03-09 17:55:52 +00:00
if err != nil {
return nil , err
}
repository = rmw
}
return repository , nil
}
// applyStorageMiddleware wraps a storage driver with the configured middlewares
func applyStorageMiddleware ( driver storagedriver . StorageDriver , middlewares [ ] configuration . Middleware ) ( storagedriver . StorageDriver , error ) {
for _ , mw := range middlewares {
smw , err := storagemiddleware . Get ( mw . Name , mw . Options , driver )
if err != nil {
return nil , fmt . Errorf ( "unable to configure storage middleware (%s): %v" , mw . Name , err )
}
driver = smw
}
return driver , nil
}
2015-04-07 22:52:48 +00:00
2015-04-17 01:34:29 +00:00
// uploadPurgeDefaultConfig provides a default configuration for upload
// purging to be used in the absence of configuration in the
// confifuration file
func uploadPurgeDefaultConfig ( ) map [ interface { } ] interface { } {
config := map [ interface { } ] interface { } { }
config [ "enabled" ] = true
config [ "age" ] = "168h"
config [ "interval" ] = "24h"
config [ "dryrun" ] = false
return config
}
func badPurgeUploadConfig ( reason string ) {
panic ( fmt . Sprintf ( "Unable to parse upload purge configuration: %s" , reason ) )
}
2015-04-07 22:52:48 +00:00
// startUploadPurger schedules a goroutine which will periodically
// check upload directories for old files and delete them
2015-04-27 22:58:58 +00:00
func startUploadPurger ( ctx context . Context , storageDriver storagedriver . StorageDriver , log ctxu . Logger , config map [ interface { } ] interface { } ) {
2015-04-17 01:34:29 +00:00
if config [ "enabled" ] == false {
return
}
2015-04-07 22:52:48 +00:00
2015-04-17 01:34:29 +00:00
var purgeAgeDuration time . Duration
var err error
purgeAge , ok := config [ "age" ]
if ok {
ageStr , ok := purgeAge . ( string )
if ! ok {
badPurgeUploadConfig ( "age is not a string" )
}
purgeAgeDuration , err = time . ParseDuration ( ageStr )
if err != nil {
badPurgeUploadConfig ( fmt . Sprintf ( "Cannot parse duration: %s" , err . Error ( ) ) )
}
} else {
badPurgeUploadConfig ( "age missing" )
}
var intervalDuration time . Duration
interval , ok := config [ "interval" ]
if ok {
intervalStr , ok := interval . ( string )
if ! ok {
badPurgeUploadConfig ( "interval is not a string" )
}
intervalDuration , err = time . ParseDuration ( intervalStr )
if err != nil {
badPurgeUploadConfig ( fmt . Sprintf ( "Cannot parse interval: %s" , err . Error ( ) ) )
}
} else {
badPurgeUploadConfig ( "interval missing" )
}
var dryRunBool bool
dryRun , ok := config [ "dryrun" ]
if ok {
dryRunBool , ok = dryRun . ( bool )
if ! ok {
badPurgeUploadConfig ( "cannot parse dryrun" )
}
} else {
badPurgeUploadConfig ( "dryrun missing" )
}
2015-04-07 22:52:48 +00:00
go func ( ) {
2015-04-17 01:34:29 +00:00
rand . Seed ( time . Now ( ) . Unix ( ) )
jitter := time . Duration ( rand . Int ( ) % 60 ) * time . Minute
2015-04-07 22:52:48 +00:00
log . Infof ( "Starting upload purge in %s" , jitter )
time . Sleep ( jitter )
for {
2015-04-27 22:58:58 +00:00
storage . PurgeUploads ( ctx , storageDriver , time . Now ( ) . Add ( - purgeAgeDuration ) , ! dryRunBool )
2015-04-17 01:34:29 +00:00
log . Infof ( "Starting upload purge in %s" , intervalDuration )
time . Sleep ( intervalDuration )
2015-04-07 22:52:48 +00:00
}
} ( )
}