plugin/metrics: Switch to using promhttp instead of deprecated Handler (#1312)

prometheus.Handler is deprecated according to the godoc for the package so
instead we're using promhttp.

Additionally, we are exposing the Registry that metrics is using so other
plugins that are not inside of coredns can read the registry. Otherwise, if
we kept using the Default one, there's no way to access that from outside
of the coredns repo since it is vendored.
This commit is contained in:
James Hartig 2017-12-14 13:19:03 -05:00 committed by Miek Gieben
parent 1919913c98
commit 671d170619
6728 changed files with 1994787 additions and 16 deletions

View file

@ -0,0 +1,458 @@
{
"ImportPath": "github.com/docker/distribution",
"GoVersion": "go1.6",
"GodepVersion": "v74",
"Packages": [
"./..."
],
"Deps": [
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/storage",
"Comment": "v5.0.0-beta-6-g0b5fe2a",
"Rev": "0b5fe2abe0271ba07049eacaa65922d67c319543"
},
{
"ImportPath": "github.com/Sirupsen/logrus",
"Comment": "v0.7.3",
"Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d"
},
{
"ImportPath": "github.com/Sirupsen/logrus/formatters/logstash",
"Comment": "v0.7.3",
"Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/client",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/request",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/session",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudfront/sign",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath",
"Comment": "v1.2.4",
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
},
{
"ImportPath": "github.com/bugsnag/bugsnag-go",
"Comment": "v1.0.2-5-gb1d1530",
"Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274"
},
{
"ImportPath": "github.com/bugsnag/bugsnag-go/errors",
"Comment": "v1.0.2-5-gb1d1530",
"Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274"
},
{
"ImportPath": "github.com/bugsnag/osext",
"Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702"
},
{
"ImportPath": "github.com/bugsnag/panicwrap",
"Comment": "1.0.0-2-ge2c2850",
"Rev": "e2c28503fcd0675329da73bf48b33404db873782"
},
{
"ImportPath": "github.com/denverdino/aliyungo/common",
"Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2"
},
{
"ImportPath": "github.com/denverdino/aliyungo/oss",
"Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2"
},
{
"ImportPath": "github.com/denverdino/aliyungo/util",
"Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2"
},
{
"ImportPath": "github.com/docker/goamz/aws",
"Rev": "f0a21f5b2e12f83a505ecf79b633bb2035cf6f85"
},
{
"ImportPath": "github.com/docker/goamz/s3",
"Rev": "f0a21f5b2e12f83a505ecf79b633bb2035cf6f85"
},
{
"ImportPath": "github.com/docker/libtrust",
"Rev": "fa567046d9b14f6aa788882a950d69651d230b21"
},
{
"ImportPath": "github.com/garyburd/redigo/internal",
"Rev": "535138d7bcd717d6531c701ef5933d98b1866257"
},
{
"ImportPath": "github.com/garyburd/redigo/redis",
"Rev": "535138d7bcd717d6531c701ef5933d98b1866257"
},
{
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "8d92cf5fc15a4382f8964b08e1f42a75c0591aa3"
},
{
"ImportPath": "github.com/gorilla/context",
"Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a"
},
{
"ImportPath": "github.com/gorilla/handlers",
"Rev": "60c7bfde3e33c201519a200a4507a158cc03a17b"
},
{
"ImportPath": "github.com/gorilla/mux",
"Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf"
},
{
"ImportPath": "github.com/inconshreveable/mousetrap",
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
},
{
"ImportPath": "github.com/mitchellh/mapstructure",
"Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef"
},
{
"ImportPath": "github.com/ncw/swift",
"Rev": "ce444d6d47c51d4dda9202cd38f5094dd8e27e86"
},
{
"ImportPath": "github.com/ncw/swift/swifttest",
"Rev": "ce444d6d47c51d4dda9202cd38f5094dd8e27e86"
},
{
"ImportPath": "github.com/spf13/cobra",
"Rev": "312092086bed4968099259622145a0c9ae280064"
},
{
"ImportPath": "github.com/spf13/pflag",
"Rev": "5644820622454e71517561946e3d94b9f9db6842"
},
{
"ImportPath": "github.com/stevvooe/resumable",
"Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4"
},
{
"ImportPath": "github.com/stevvooe/resumable/sha256",
"Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4"
},
{
"ImportPath": "github.com/stevvooe/resumable/sha512",
"Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4"
},
{
"ImportPath": "github.com/yvasiyarov/go-metrics",
"Rev": "57bccd1ccd43f94bb17fdd8bf3007059b802f85e"
},
{
"ImportPath": "github.com/yvasiyarov/gorelic",
"Comment": "v0.0.6-8-ga9bba5b",
"Rev": "a9bba5b9ab508a086f9a12b8c51fab68478e2128"
},
{
"ImportPath": "github.com/yvasiyarov/newrelic_platform_go",
"Rev": "b21fdbd4370f3717f3bbd2bf41c223bc273068e6"
},
{
"ImportPath": "golang.org/x/crypto/bcrypt",
"Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b"
},
{
"ImportPath": "golang.org/x/crypto/blowfish",
"Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b"
},
{
"ImportPath": "golang.org/x/crypto/ocsp",
"Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b"
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "4876518f9e71663000c348837735820161a42df7"
},
{
"ImportPath": "golang.org/x/net/context/ctxhttp",
"Rev": "4876518f9e71663000c348837735820161a42df7"
},
{
"ImportPath": "golang.org/x/net/http2",
"Rev": "4876518f9e71663000c348837735820161a42df7"
},
{
"ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "4876518f9e71663000c348837735820161a42df7"
},
{
"ImportPath": "golang.org/x/net/internal/timeseries",
"Rev": "4876518f9e71663000c348837735820161a42df7"
},
{
"ImportPath": "golang.org/x/net/trace",
"Rev": "4876518f9e71663000c348837735820161a42df7"
},
{
"ImportPath": "golang.org/x/oauth2",
"Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf"
},
{
"ImportPath": "golang.org/x/oauth2/google",
"Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf"
},
{
"ImportPath": "golang.org/x/oauth2/internal",
"Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf"
},
{
"ImportPath": "golang.org/x/oauth2/jws",
"Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf"
},
{
"ImportPath": "golang.org/x/oauth2/jwt",
"Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf"
},
{
"ImportPath": "golang.org/x/time/rate",
"Rev": "a4bde12657593d5e90d0533a3e4fd95e635124cb"
},
{
"ImportPath": "google.golang.org/api/gensupport",
"Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54"
},
{
"ImportPath": "google.golang.org/api/googleapi",
"Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54"
},
{
"ImportPath": "google.golang.org/api/googleapi/internal/uritemplates",
"Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54"
},
{
"ImportPath": "google.golang.org/api/storage/v1",
"Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54"
},
{
"ImportPath": "google.golang.org/appengine",
"Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19"
},
{
"ImportPath": "google.golang.org/appengine/internal",
"Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19"
},
{
"ImportPath": "google.golang.org/appengine/internal/app_identity",
"Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19"
},
{
"ImportPath": "google.golang.org/appengine/internal/base",
"Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19"
},
{
"ImportPath": "google.golang.org/appengine/internal/datastore",
"Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19"
},
{
"ImportPath": "google.golang.org/appengine/internal/log",
"Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19"
},
{
"ImportPath": "google.golang.org/appengine/internal/modules",
"Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19"
},
{
"ImportPath": "google.golang.org/appengine/internal/remote_api",
"Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19"
},
{
"ImportPath": "google.golang.org/cloud",
"Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2"
},
{
"ImportPath": "google.golang.org/cloud/compute/metadata",
"Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2"
},
{
"ImportPath": "google.golang.org/cloud/internal",
"Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2"
},
{
"ImportPath": "google.golang.org/cloud/internal/opts",
"Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2"
},
{
"ImportPath": "google.golang.org/cloud/storage",
"Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2"
},
{
"ImportPath": "google.golang.org/grpc",
"Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994"
},
{
"ImportPath": "google.golang.org/grpc/codes",
"Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994"
},
{
"ImportPath": "google.golang.org/grpc/credentials",
"Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994"
},
{
"ImportPath": "google.golang.org/grpc/grpclog",
"Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994"
},
{
"ImportPath": "google.golang.org/grpc/internal",
"Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994"
},
{
"ImportPath": "google.golang.org/grpc/metadata",
"Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994"
},
{
"ImportPath": "google.golang.org/grpc/naming",
"Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994"
},
{
"ImportPath": "google.golang.org/grpc/peer",
"Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994"
},
{
"ImportPath": "google.golang.org/grpc/transport",
"Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994"
},
{
"ImportPath": "gopkg.in/check.v1",
"Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673"
},
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "bef53efd0c76e49e6de55ead051f886bea7e9420"
},
{
"ImportPath": "rsc.io/letsencrypt",
"Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c"
},
{
"ImportPath": "rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme",
"Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c"
},
{
"ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1",
"Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c"
},
{
"ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher",
"Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c"
},
{
"ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json",
"Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c"
}
]
}

5
vendor/github.com/docker/distribution/Godeps/Readme generated vendored Normal file
View file

@ -0,0 +1,5 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

View file

@ -0,0 +1,97 @@
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/version"
)
var (
algorithm = digest.Canonical
showVersion bool
)
type job struct {
name string
reader io.Reader
}
func init() {
flag.Var(&algorithm, "a", "select the digest algorithm (shorthand)")
flag.Var(&algorithm, "algorithm", "select the digest algorithm")
flag.BoolVar(&showVersion, "version", false, "show the version and exit")
log.SetFlags(0)
log.SetPrefix(os.Args[0] + ": ")
}
func usage() {
fmt.Fprintf(os.Stderr, "usage: %s [files...]\n", os.Args[0])
fmt.Fprintf(os.Stderr, `
Calculate the digest of one or more input files, emitting the result
to standard out. If no files are provided, the digest of stdin will
be calculated.
`)
flag.PrintDefaults()
}
func unsupported() {
log.Fatalf("unsupported digest algorithm: %v", algorithm)
}
func main() {
var jobs []job
flag.Usage = usage
flag.Parse()
if showVersion {
version.PrintVersion()
return
}
var fail bool // if we fail on one item, foul the exit code
if flag.NArg() > 0 {
for _, path := range flag.Args() {
fp, err := os.Open(path)
if err != nil {
log.Printf("%s: %v", path, err)
fail = true
continue
}
defer fp.Close()
jobs = append(jobs, job{name: path, reader: fp})
}
} else {
// just read stdin
jobs = append(jobs, job{name: "-", reader: os.Stdin})
}
digestFn := algorithm.FromReader
if !algorithm.Available() {
unsupported()
}
for _, job := range jobs {
dgst, err := digestFn(job.reader)
if err != nil {
log.Printf("%s: %v", job.name, err)
fail = true
continue
}
fmt.Printf("%v\t%s\n", dgst, job.name)
}
if fail {
os.Exit(1)
}
}

View file

@ -0,0 +1,131 @@
// registry-api-descriptor-template uses the APIDescriptor defined in the
// api/v2 package to execute templates passed to the command line.
//
// For example, to generate a new API specification, one would execute the
// following command from the repo root:
//
// $ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md
//
// The templates are passed in the api/v2.APIDescriptor object. Please see the
// package documentation for fields available on that object. The template
// syntax is from Go's standard library text/template package. For information
// on Go's template syntax, please see golang.org/pkg/text/template.
package main
import (
"log"
"net/http"
"os"
"path/filepath"
"regexp"
"text/template"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
)
var spaceRegex = regexp.MustCompile(`\n\s*`)
func main() {
if len(os.Args) != 2 {
log.Fatalln("please specify a template to execute.")
}
path := os.Args[1]
filename := filepath.Base(path)
funcMap := template.FuncMap{
"removenewlines": func(s string) string {
return spaceRegex.ReplaceAllString(s, " ")
},
"statustext": http.StatusText,
"prettygorilla": prettyGorillaMuxPath,
}
tmpl := template.Must(template.New(filename).Funcs(funcMap).ParseFiles(path))
data := struct {
RouteDescriptors []v2.RouteDescriptor
ErrorDescriptors []errcode.ErrorDescriptor
}{
RouteDescriptors: v2.APIDescriptor.RouteDescriptors,
ErrorDescriptors: append(errcode.GetErrorCodeGroup("registry.api.v2"),
// The following are part of the specification but provided by errcode default.
errcode.ErrorCodeUnauthorized.Descriptor(),
errcode.ErrorCodeDenied.Descriptor(),
errcode.ErrorCodeUnsupported.Descriptor()),
}
if err := tmpl.Execute(os.Stdout, data); err != nil {
log.Fatalln(err)
}
}
// prettyGorillaMuxPath removes the regular expressions from a gorilla/mux
// route string, making it suitable for documentation.
func prettyGorillaMuxPath(s string) string {
// Stateful parser that removes regular expressions from gorilla
// routes. It correctly handles balanced bracket pairs.
var output string
var label string
var level int
start:
if s[0] == '{' {
s = s[1:]
level++
goto capture
}
output += string(s[0])
s = s[1:]
goto end
capture:
switch s[0] {
case '{':
level++
case '}':
level--
if level == 0 {
s = s[1:]
goto label
}
case ':':
s = s[1:]
goto skip
default:
label += string(s[0])
}
s = s[1:]
goto capture
skip:
switch s[0] {
case '{':
level++
case '}':
level--
}
s = s[1:]
if level == 0 {
goto label
}
goto skip
label:
if label != "" {
output += "<" + label + ">"
label = ""
}
end:
if s != "" {
goto start
}
return output
}

View file

@ -0,0 +1,55 @@
version: 0.1
log:
level: debug
fields:
service: registry
environment: development
storage:
cache:
blobdescriptor: redis
filesystem:
rootdirectory: /var/lib/registry-cache
maintenance:
uploadpurging:
enabled: false
http:
addr: :5000
secret: asecretforlocaldevelopment
debug:
addr: localhost:5001
headers:
X-Content-Type-Options: [nosniff]
redis:
addr: localhost:6379
pool:
maxidle: 16
maxactive: 64
idletimeout: 300s
dialtimeout: 10ms
readtimeout: 10ms
writetimeout: 10ms
notifications:
endpoints:
- name: local-8082
url: http://localhost:5003/callback
headers:
Authorization: [Bearer <an example token>]
timeout: 1s
threshold: 10
backoff: 1s
disabled: true
- name: local-8083
url: http://localhost:8083/callback
timeout: 1s
threshold: 10
backoff: 1s
disabled: true
proxy:
remoteurl: https://registry-1.docker.io
username: username
password: password
health:
storagedriver:
enabled: true
interval: 10s
threshold: 3

View file

@ -0,0 +1,66 @@
version: 0.1
log:
level: debug
fields:
service: registry
environment: development
hooks:
- type: mail
disabled: true
levels:
- panic
options:
smtp:
addr: mail.example.com:25
username: mailuser
password: password
insecure: true
from: sender@example.com
to:
- errors@example.com
storage:
delete:
enabled: true
cache:
blobdescriptor: redis
filesystem:
rootdirectory: /var/lib/registry
maintenance:
uploadpurging:
enabled: false
http:
addr: :5000
debug:
addr: localhost:5001
headers:
X-Content-Type-Options: [nosniff]
redis:
addr: localhost:6379
pool:
maxidle: 16
maxactive: 64
idletimeout: 300s
dialtimeout: 10ms
readtimeout: 10ms
writetimeout: 10ms
notifications:
endpoints:
- name: local-5003
url: http://localhost:5003/callback
headers:
Authorization: [Bearer <an example token>]
timeout: 1s
threshold: 10
backoff: 1s
disabled: true
- name: local-8083
url: http://localhost:8083/callback
timeout: 1s
threshold: 10
backoff: 1s
disabled: true
health:
storagedriver:
enabled: true
interval: 10s
threshold: 3

View file

@ -0,0 +1,18 @@
version: 0.1
log:
fields:
service: registry
storage:
cache:
blobdescriptor: inmemory
filesystem:
rootdirectory: /var/lib/registry
http:
addr: :5000
headers:
X-Content-Type-Options: [nosniff]
health:
storagedriver:
enabled: true
interval: 10s
threshold: 3

View file

@ -0,0 +1,25 @@
package main
import (
_ "net/http/pprof"
"github.com/docker/distribution/registry"
_ "github.com/docker/distribution/registry/auth/htpasswd"
_ "github.com/docker/distribution/registry/auth/silly"
_ "github.com/docker/distribution/registry/auth/token"
_ "github.com/docker/distribution/registry/proxy"
_ "github.com/docker/distribution/registry/storage/driver/azure"
_ "github.com/docker/distribution/registry/storage/driver/filesystem"
_ "github.com/docker/distribution/registry/storage/driver/gcs"
_ "github.com/docker/distribution/registry/storage/driver/inmemory"
_ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront"
_ "github.com/docker/distribution/registry/storage/driver/middleware/redirect"
_ "github.com/docker/distribution/registry/storage/driver/oss"
_ "github.com/docker/distribution/registry/storage/driver/s3-aws"
_ "github.com/docker/distribution/registry/storage/driver/s3-goamz"
_ "github.com/docker/distribution/registry/storage/driver/swift"
)
func main() {
registry.RootCmd.Execute()
}

View file

@ -0,0 +1,643 @@
package configuration
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"reflect"
"strings"
"time"
)
// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and
// optionally modified by environment variables.
//
// Note that yaml field names should never include _ characters, since this is the separator used
// in environment variable names.
type Configuration struct {
// Version is the version which defines the format of the rest of the configuration
Version Version `yaml:"version"`
// Log supports setting various parameters related to the logging
// subsystem.
Log struct {
// AccessLog configures access logging.
AccessLog struct {
// Disabled disables access logging.
Disabled bool `yaml:"disabled,omitempty"`
} `yaml:"accesslog,omitempty"`
// Level is the granularity at which registry operations are logged.
Level Loglevel `yaml:"level"`
// Formatter overrides the default formatter with another. Options
// include "text", "json" and "logstash".
Formatter string `yaml:"formatter,omitempty"`
// Fields allows users to specify static string fields to include in
// the logger context.
Fields map[string]interface{} `yaml:"fields,omitempty"`
// Hooks allows users to configure the log hooks, to enabling the
// sequent handling behavior, when defined levels of log message emit.
Hooks []LogHook `yaml:"hooks,omitempty"`
}
// Loglevel is the level at which registry operations are logged. This is
// deprecated. Please use Log.Level in the future.
Loglevel Loglevel `yaml:"loglevel,omitempty"`
// Storage is the configuration for the registry's storage driver
Storage Storage `yaml:"storage"`
// Auth allows configuration of various authorization methods that may be
// used to gate requests.
Auth Auth `yaml:"auth,omitempty"`
// Middleware lists all middlewares to be used by the registry.
Middleware map[string][]Middleware `yaml:"middleware,omitempty"`
// Reporting is the configuration for error reporting
Reporting Reporting `yaml:"reporting,omitempty"`
// HTTP contains configuration parameters for the registry's http
// interface.
HTTP struct {
// Addr specifies the bind address for the registry instance.
Addr string `yaml:"addr,omitempty"`
// Net specifies the net portion of the bind address. A default empty value means tcp.
Net string `yaml:"net,omitempty"`
// Host specifies an externally-reachable address for the registry, as a fully
// qualified URL.
Host string `yaml:"host,omitempty"`
Prefix string `yaml:"prefix,omitempty"`
// Secret specifies the secret key which HMAC tokens are created with.
Secret string `yaml:"secret,omitempty"`
// RelativeURLs specifies that relative URLs should be returned in
// Location headers
RelativeURLs bool `yaml:"relativeurls,omitempty"`
// TLS instructs the http server to listen with a TLS configuration.
// This only support simple tls configuration with a cert and key.
// Mostly, this is useful for testing situations or simple deployments
// that require tls. If more complex configurations are required, use
// a proxy or make a proposal to add support here.
TLS struct {
// Certificate specifies the path to an x509 certificate file to
// be used for TLS.
Certificate string `yaml:"certificate,omitempty"`
// Key specifies the path to the x509 key file, which should
// contain the private portion for the file specified in
// Certificate.
Key string `yaml:"key,omitempty"`
// Specifies the CA certs for client authentication
// A file may contain multiple CA certificates encoded as PEM
ClientCAs []string `yaml:"clientcas,omitempty"`
// LetsEncrypt is used to configuration setting up TLS through
// Let's Encrypt instead of manually specifying certificate and
// key. If a TLS certificate is specified, the Let's Encrypt
// section will not be used.
LetsEncrypt struct {
// CacheFile specifies cache file to use for lets encrypt
// certificates and keys.
CacheFile string `yaml:"cachefile,omitempty"`
// Email is the email to use during Let's Encrypt registration
Email string `yaml:"email,omitempty"`
} `yaml:"letsencrypt,omitempty"`
} `yaml:"tls,omitempty"`
// Headers is a set of headers to include in HTTP responses. A common
// use case for this would be security headers such as
// Strict-Transport-Security. The map keys are the header names, and
// the values are the associated header payloads.
Headers http.Header `yaml:"headers,omitempty"`
// Debug configures the http debug interface, if specified. This can
// include services such as pprof, expvar and other data that should
// not be exposed externally. Left disabled by default.
Debug struct {
// Addr specifies the bind address for the debug server.
Addr string `yaml:"addr,omitempty"`
} `yaml:"debug,omitempty"`
// HTTP2 configuration options
HTTP2 struct {
// Specifies wether the registry should disallow clients attempting
// to connect via http2. If set to true, only http/1.1 is supported.
Disabled bool `yaml:"disabled,omitempty"`
} `yaml:"http2,omitempty"`
} `yaml:"http,omitempty"`
// Notifications specifies configuration about various endpoint to which
// registry events are dispatched.
Notifications Notifications `yaml:"notifications,omitempty"`
// Redis configures the redis pool available to the registry webapp.
Redis struct {
// Addr specifies the the redis instance available to the application.
Addr string `yaml:"addr,omitempty"`
// Password string to use when making a connection.
Password string `yaml:"password,omitempty"`
// DB specifies the database to connect to on the redis instance.
DB int `yaml:"db,omitempty"`
DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect
ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data
WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data
// Pool configures the behavior of the redis connection pool.
Pool struct {
// MaxIdle sets the maximum number of idle connections.
MaxIdle int `yaml:"maxidle,omitempty"`
// MaxActive sets the maximum number of connections that should be
// opened before blocking a connection request.
MaxActive int `yaml:"maxactive,omitempty"`
// IdleTimeout sets the amount time to wait before closing
// inactive connections.
IdleTimeout time.Duration `yaml:"idletimeout,omitempty"`
} `yaml:"pool,omitempty"`
} `yaml:"redis,omitempty"`
Health Health `yaml:"health,omitempty"`
Proxy Proxy `yaml:"proxy,omitempty"`
// Compatibility is used for configurations of working with older or deprecated features.
Compatibility struct {
// Schema1 configures how schema1 manifests will be handled
Schema1 struct {
// TrustKey is the signing key to use for adding the signature to
// schema1 manifests.
TrustKey string `yaml:"signingkeyfile,omitempty"`
} `yaml:"schema1,omitempty"`
} `yaml:"compatibility,omitempty"`
// Validation configures validation options for the registry.
Validation struct {
// Enabled enables the other options in this section.
Enabled bool `yaml:"enabled,omitempty"`
// Manifests configures manifest validation.
Manifests struct {
// URLs configures validation for URLs in pushed manifests.
URLs struct {
// Allow specifies regular expressions (https://godoc.org/regexp/syntax)
// that URLs in pushed manifests must match.
Allow []string `yaml:"allow,omitempty"`
// Deny specifies regular expressions (https://godoc.org/regexp/syntax)
// that URLs in pushed manifests must not match.
Deny []string `yaml:"deny,omitempty"`
} `yaml:"urls,omitempty"`
} `yaml:"manifests,omitempty"`
} `yaml:"validation,omitempty"`
// Policy configures registry policy options.
Policy struct {
// Repository configures policies for repositories
Repository struct {
// Classes is a list of repository classes which the
// registry allows content for. This class is matched
// against the configuration media type inside uploaded
// manifests. When non-empty, the registry will enforce
// the class in authorized resources.
Classes []string `yaml:"classes"`
} `yaml:"repository,omitempty"`
} `yaml:"policy,omitempty"`
}
// LogHook is composed of hook Level and Type.
// After hooks configuration, it can execute the next handling automatically,
// when defined levels of log message emitted.
// Example: hook can sending an email notification when error log happens in app.
type LogHook struct {
// Disable lets user select to enable hook or not.
Disabled bool `yaml:"disabled,omitempty"`
// Type allows user to select which type of hook handler they want.
Type string `yaml:"type,omitempty"`
// Levels set which levels of log message will let hook executed.
Levels []string `yaml:"levels,omitempty"`
// MailOptions allows user to configurate email parameters.
MailOptions MailOptions `yaml:"options,omitempty"`
}
// MailOptions provides the configuration sections to user, for specific handler.
type MailOptions struct {
SMTP struct {
// Addr defines smtp host address
Addr string `yaml:"addr,omitempty"`
// Username defines user name to smtp host
Username string `yaml:"username,omitempty"`
// Password defines password of login user
Password string `yaml:"password,omitempty"`
// Insecure defines if smtp login skips the secure certification.
Insecure bool `yaml:"insecure,omitempty"`
} `yaml:"smtp,omitempty"`
// From defines mail sending address
From string `yaml:"from,omitempty"`
// To defines mail receiving address
To []string `yaml:"to,omitempty"`
}
// FileChecker is a type of entry in the health section for checking files.
type FileChecker struct {
// Interval is the duration in between checks
Interval time.Duration `yaml:"interval,omitempty"`
// File is the path to check
File string `yaml:"file,omitempty"`
// Threshold is the number of times a check must fail to trigger an
// unhealthy state
Threshold int `yaml:"threshold,omitempty"`
}
// HTTPChecker is a type of entry in the health section for checking HTTP URIs.
type HTTPChecker struct {
// Timeout is the duration to wait before timing out the HTTP request
Timeout time.Duration `yaml:"timeout,omitempty"`
// StatusCode is the expected status code
StatusCode int
// Interval is the duration in between checks
Interval time.Duration `yaml:"interval,omitempty"`
// URI is the HTTP URI to check
URI string `yaml:"uri,omitempty"`
// Headers lists static headers that should be added to all requests
Headers http.Header `yaml:"headers"`
// Threshold is the number of times a check must fail to trigger an
// unhealthy state
Threshold int `yaml:"threshold,omitempty"`
}
// TCPChecker is a type of entry in the health section for checking TCP servers.
type TCPChecker struct {
// Timeout is the duration to wait before timing out the TCP connection
Timeout time.Duration `yaml:"timeout,omitempty"`
// Interval is the duration in between checks
Interval time.Duration `yaml:"interval,omitempty"`
// Addr is the TCP address to check
Addr string `yaml:"addr,omitempty"`
// Threshold is the number of times a check must fail to trigger an
// unhealthy state
Threshold int `yaml:"threshold,omitempty"`
}
// Health provides the configuration section for health checks.
type Health struct {
// FileCheckers is a list of paths to check
FileCheckers []FileChecker `yaml:"file,omitempty"`
// HTTPCheckers is a list of URIs to check
HTTPCheckers []HTTPChecker `yaml:"http,omitempty"`
// TCPCheckers is a list of URIs to check
TCPCheckers []TCPChecker `yaml:"tcp,omitempty"`
// StorageDriver configures a health check on the configured storage
// driver
StorageDriver struct {
// Enabled turns on the health check for the storage driver
Enabled bool `yaml:"enabled,omitempty"`
// Interval is the duration in between checks
Interval time.Duration `yaml:"interval,omitempty"`
// Threshold is the number of times a check must fail to trigger an
// unhealthy state
Threshold int `yaml:"threshold,omitempty"`
} `yaml:"storagedriver,omitempty"`
}
// v0_1Configuration is a Version 0.1 Configuration struct
// This is currently aliased to Configuration, as it is the current version
type v0_1Configuration Configuration
// UnmarshalYAML implements the yaml.Unmarshaler interface
// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent uints
func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
var versionString string
err := unmarshal(&versionString)
if err != nil {
return err
}
newVersion := Version(versionString)
if _, err := newVersion.major(); err != nil {
return err
}
if _, err := newVersion.minor(); err != nil {
return err
}
*version = newVersion
return nil
}
// CurrentVersion is the most recent Version that can be parsed
var CurrentVersion = MajorMinorVersion(0, 1)
// Loglevel is the level at which operations are logged
// This can be error, warn, info, or debug
type Loglevel string
// UnmarshalYAML implements the yaml.Umarshaler interface
// Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a
// valid loglevel
func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error {
var loglevelString string
err := unmarshal(&loglevelString)
if err != nil {
return err
}
loglevelString = strings.ToLower(loglevelString)
switch loglevelString {
case "error", "warn", "info", "debug":
default:
return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString)
}
*loglevel = Loglevel(loglevelString)
return nil
}
// Parameters defines a key-value parameters mapping
type Parameters map[string]interface{}
// Storage defines the configuration for registry object storage
type Storage map[string]Parameters
// Type returns the storage driver type, such as filesystem or s3
func (storage Storage) Type() string {
var storageType []string
// Return only key in this map
for k := range storage {
switch k {
case "maintenance":
// allow configuration of maintenance
case "cache":
// allow configuration of caching
case "delete":
// allow configuration of delete
case "redirect":
// allow configuration of redirect
default:
storageType = append(storageType, k)
}
}
if len(storageType) > 1 {
panic("multiple storage drivers specified in configuration or environment: " + strings.Join(storageType, ", "))
}
if len(storageType) == 1 {
return storageType[0]
}
return ""
}
// Parameters returns the Parameters map for a Storage configuration
func (storage Storage) Parameters() Parameters {
return storage[storage.Type()]
}
// setParameter changes the parameter at the provided key to the new value
func (storage Storage) setParameter(key string, value interface{}) {
storage[storage.Type()][key] = value
}
// UnmarshalYAML implements the yaml.Unmarshaler interface
// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters
func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error {
var storageMap map[string]Parameters
err := unmarshal(&storageMap)
if err == nil {
if len(storageMap) > 1 {
types := make([]string, 0, len(storageMap))
for k := range storageMap {
switch k {
case "maintenance":
// allow for configuration of maintenance
case "cache":
// allow configuration of caching
case "delete":
// allow configuration of delete
case "redirect":
// allow configuration of redirect
default:
types = append(types, k)
}
}
if len(types) > 1 {
return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types)
}
}
*storage = storageMap
return nil
}
var storageType string
err = unmarshal(&storageType)
if err == nil {
*storage = Storage{storageType: Parameters{}}
return nil
}
return err
}
// MarshalYAML implements the yaml.Marshaler interface
func (storage Storage) MarshalYAML() (interface{}, error) {
if storage.Parameters() == nil {
return storage.Type(), nil
}
return map[string]Parameters(storage), nil
}
// Auth defines the configuration for registry authorization.
type Auth map[string]Parameters
// Type returns the auth type, such as htpasswd or token
func (auth Auth) Type() string {
// Return only key in this map
for k := range auth {
return k
}
return ""
}
// Parameters returns the Parameters map for an Auth configuration
func (auth Auth) Parameters() Parameters {
return auth[auth.Type()]
}
// setParameter changes the parameter at the provided key to the new value
func (auth Auth) setParameter(key string, value interface{}) {
auth[auth.Type()][key] = value
}
// UnmarshalYAML implements the yaml.Unmarshaler interface
// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters
func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error {
var m map[string]Parameters
err := unmarshal(&m)
if err == nil {
if len(m) > 1 {
types := make([]string, 0, len(m))
for k := range m {
types = append(types, k)
}
// TODO(stevvooe): May want to change this slightly for
// authorization to allow multiple challenges.
return fmt.Errorf("must provide exactly one type. Provided: %v", types)
}
*auth = m
return nil
}
var authType string
err = unmarshal(&authType)
if err == nil {
*auth = Auth{authType: Parameters{}}
return nil
}
return err
}
// MarshalYAML implements the yaml.Marshaler interface
func (auth Auth) MarshalYAML() (interface{}, error) {
if auth.Parameters() == nil {
return auth.Type(), nil
}
return map[string]Parameters(auth), nil
}
// Notifications configures multiple http endpoints.
type Notifications struct {
// Endpoints is a list of http configurations for endpoints that
// respond to webhook notifications. In the future, we may allow other
// kinds of endpoints, such as external queues.
Endpoints []Endpoint `yaml:"endpoints,omitempty"`
}
// Endpoint describes the configuration of an http webhook notification
// endpoint.
type Endpoint struct {
Name string `yaml:"name"` // identifies the endpoint in the registry instance.
Disabled bool `yaml:"disabled"` // disables the endpoint
URL string `yaml:"url"` // post url for the endpoint.
Headers http.Header `yaml:"headers"` // static headers that should be added to all requests
Timeout time.Duration `yaml:"timeout"` // HTTP timeout
Threshold int `yaml:"threshold"` // circuit breaker threshold before backing off on failure
Backoff time.Duration `yaml:"backoff"` // backoff duration
IgnoredMediaTypes []string `yaml:"ignoredmediatypes"` // target media types to ignore
}
// Reporting defines error reporting methods.
type Reporting struct {
// Bugsnag configures error reporting for Bugsnag (bugsnag.com).
Bugsnag BugsnagReporting `yaml:"bugsnag,omitempty"`
// NewRelic configures error reporting for NewRelic (newrelic.com)
NewRelic NewRelicReporting `yaml:"newrelic,omitempty"`
}
// BugsnagReporting configures error reporting for Bugsnag (bugsnag.com).
type BugsnagReporting struct {
// APIKey is the Bugsnag api key.
APIKey string `yaml:"apikey,omitempty"`
// ReleaseStage tracks where the registry is deployed.
// Examples: production, staging, development
ReleaseStage string `yaml:"releasestage,omitempty"`
// Endpoint is used for specifying an enterprise Bugsnag endpoint.
Endpoint string `yaml:"endpoint,omitempty"`
}
// NewRelicReporting configures error reporting for NewRelic (newrelic.com)
type NewRelicReporting struct {
// LicenseKey is the NewRelic user license key
LicenseKey string `yaml:"licensekey,omitempty"`
// Name is the component name of the registry in NewRelic
Name string `yaml:"name,omitempty"`
// Verbose configures debug output to STDOUT
Verbose bool `yaml:"verbose,omitempty"`
}
// Middleware configures named middlewares to be applied at injection points.
type Middleware struct {
// Name the middleware registers itself as
Name string `yaml:"name"`
// Flag to disable middleware easily
Disabled bool `yaml:"disabled,omitempty"`
// Map of parameters that will be passed to the middleware's initialization function
Options Parameters `yaml:"options"`
}
// Proxy configures the registry as a pull through cache
type Proxy struct {
// RemoteURL is the URL of the remote registry
RemoteURL string `yaml:"remoteurl"`
// Username of the hub user
Username string `yaml:"username"`
// Password of the hub user
Password string `yaml:"password"`
}
// Parse parses an input configuration yaml document into a Configuration struct
// This should generally be capable of handling old configuration format versions
//
// Environment variables may be used to override configuration parameters other than version,
// following the scheme below:
// Configuration.Abc may be replaced by the value of REGISTRY_ABC,
// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth
func Parse(rd io.Reader) (*Configuration, error) {
in, err := ioutil.ReadAll(rd)
if err != nil {
return nil, err
}
p := NewParser("registry", []VersionedParseInfo{
{
Version: MajorMinorVersion(0, 1),
ParseAs: reflect.TypeOf(v0_1Configuration{}),
ConversionFunc: func(c interface{}) (interface{}, error) {
if v0_1, ok := c.(*v0_1Configuration); ok {
if v0_1.Loglevel == Loglevel("") {
v0_1.Loglevel = Loglevel("info")
}
if v0_1.Storage.Type() == "" {
return nil, fmt.Errorf("No storage configuration provided")
}
return (*Configuration)(v0_1), nil
}
return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c)
},
},
})
config := new(Configuration)
err = p.Parse(in, config)
if err != nil {
return nil, err
}
return config, nil
}

View file

@ -0,0 +1,529 @@
package configuration
import (
"bytes"
"net/http"
"os"
"reflect"
"strings"
"testing"
. "gopkg.in/check.v1"
"gopkg.in/yaml.v2"
)
// Hook up gocheck into the "go test" runner
func Test(t *testing.T) { TestingT(t) }
// configStruct is a canonical example configuration, which should map to configYamlV0_1
var configStruct = Configuration{
Version: "0.1",
Log: struct {
AccessLog struct {
Disabled bool `yaml:"disabled,omitempty"`
} `yaml:"accesslog,omitempty"`
Level Loglevel `yaml:"level"`
Formatter string `yaml:"formatter,omitempty"`
Fields map[string]interface{} `yaml:"fields,omitempty"`
Hooks []LogHook `yaml:"hooks,omitempty"`
}{
Fields: map[string]interface{}{"environment": "test"},
},
Loglevel: "info",
Storage: Storage{
"s3": Parameters{
"region": "us-east-1",
"bucket": "my-bucket",
"rootdirectory": "/registry",
"encrypt": true,
"secure": false,
"accesskey": "SAMPLEACCESSKEY",
"secretkey": "SUPERSECRET",
"host": nil,
"port": 42,
},
},
Auth: Auth{
"silly": Parameters{
"realm": "silly",
"service": "silly",
},
},
Reporting: Reporting{
Bugsnag: BugsnagReporting{
APIKey: "BugsnagApiKey",
},
},
Notifications: Notifications{
Endpoints: []Endpoint{
{
Name: "endpoint-1",
URL: "http://example.com",
Headers: http.Header{
"Authorization": []string{"Bearer <example>"},
},
IgnoredMediaTypes: []string{"application/octet-stream"},
},
},
},
HTTP: struct {
Addr string `yaml:"addr,omitempty"`
Net string `yaml:"net,omitempty"`
Host string `yaml:"host,omitempty"`
Prefix string `yaml:"prefix,omitempty"`
Secret string `yaml:"secret,omitempty"`
RelativeURLs bool `yaml:"relativeurls,omitempty"`
TLS struct {
Certificate string `yaml:"certificate,omitempty"`
Key string `yaml:"key,omitempty"`
ClientCAs []string `yaml:"clientcas,omitempty"`
LetsEncrypt struct {
CacheFile string `yaml:"cachefile,omitempty"`
Email string `yaml:"email,omitempty"`
} `yaml:"letsencrypt,omitempty"`
} `yaml:"tls,omitempty"`
Headers http.Header `yaml:"headers,omitempty"`
Debug struct {
Addr string `yaml:"addr,omitempty"`
} `yaml:"debug,omitempty"`
HTTP2 struct {
Disabled bool `yaml:"disabled,omitempty"`
} `yaml:"http2,omitempty"`
}{
TLS: struct {
Certificate string `yaml:"certificate,omitempty"`
Key string `yaml:"key,omitempty"`
ClientCAs []string `yaml:"clientcas,omitempty"`
LetsEncrypt struct {
CacheFile string `yaml:"cachefile,omitempty"`
Email string `yaml:"email,omitempty"`
} `yaml:"letsencrypt,omitempty"`
}{
ClientCAs: []string{"/path/to/ca.pem"},
},
Headers: http.Header{
"X-Content-Type-Options": []string{"nosniff"},
},
HTTP2: struct {
Disabled bool `yaml:"disabled,omitempty"`
}{
Disabled: false,
},
},
}
// configYamlV0_1 is a Version 0.1 yaml document representing configStruct
var configYamlV0_1 = `
version: 0.1
log:
fields:
environment: test
loglevel: info
storage:
s3:
region: us-east-1
bucket: my-bucket
rootdirectory: /registry
encrypt: true
secure: false
accesskey: SAMPLEACCESSKEY
secretkey: SUPERSECRET
host: ~
port: 42
auth:
silly:
realm: silly
service: silly
notifications:
endpoints:
- name: endpoint-1
url: http://example.com
headers:
Authorization: [Bearer <example>]
ignoredmediatypes:
- application/octet-stream
reporting:
bugsnag:
apikey: BugsnagApiKey
http:
clientcas:
- /path/to/ca.pem
headers:
X-Content-Type-Options: [nosniff]
`
// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory
// storage driver with no parameters
var inmemoryConfigYamlV0_1 = `
version: 0.1
loglevel: info
storage: inmemory
auth:
silly:
realm: silly
service: silly
notifications:
endpoints:
- name: endpoint-1
url: http://example.com
headers:
Authorization: [Bearer <example>]
ignoredmediatypes:
- application/octet-stream
http:
headers:
X-Content-Type-Options: [nosniff]
`
type ConfigSuite struct {
expectedConfig *Configuration
}
var _ = Suite(new(ConfigSuite))
func (suite *ConfigSuite) SetUpTest(c *C) {
os.Clearenv()
suite.expectedConfig = copyConfig(configStruct)
}
// TestMarshalRoundtrip validates that configStruct can be marshaled and
// unmarshaled without changing any parameters
func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) {
configBytes, err := yaml.Marshal(suite.expectedConfig)
c.Assert(err, IsNil)
config, err := Parse(bytes.NewReader(configBytes))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
}
// TestParseSimple validates that configYamlV0_1 can be parsed into a struct
// matching configStruct
func (suite *ConfigSuite) TestParseSimple(c *C) {
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
}
// TestParseInmemory validates that configuration yaml with storage provided as
// a string can be parsed into a Configuration struct with no storage parameters
func (suite *ConfigSuite) TestParseInmemory(c *C) {
suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}}
suite.expectedConfig.Reporting = Reporting{}
suite.expectedConfig.Log.Fields = nil
config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
}
// TestParseIncomplete validates that an incomplete yaml configuration cannot
// be parsed without providing environment variables to fill in the missing
// components.
func (suite *ConfigSuite) TestParseIncomplete(c *C) {
incompleteConfigYaml := "version: 0.1"
_, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml)))
c.Assert(err, NotNil)
suite.expectedConfig.Log.Fields = nil
suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}}
suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}}
suite.expectedConfig.Reporting = Reporting{}
suite.expectedConfig.Notifications = Notifications{}
suite.expectedConfig.HTTP.Headers = nil
// Note: this also tests that REGISTRY_STORAGE and
// REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY can be used together
os.Setenv("REGISTRY_STORAGE", "filesystem")
os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot")
os.Setenv("REGISTRY_AUTH", "silly")
os.Setenv("REGISTRY_AUTH_SILLY_REALM", "silly")
config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
}
// TestParseWithSameEnvStorage validates that providing environment variables
// that match the given storage type will only include environment-defined
// parameters and remove yaml-defined parameters
func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) {
suite.expectedConfig.Storage = Storage{"s3": Parameters{"region": "us-east-1"}}
os.Setenv("REGISTRY_STORAGE", "s3")
os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
}
// TestParseWithDifferentEnvStorageParams validates that providing environment variables that change
// and add to the given storage parameters will change and add parameters to the parsed
// Configuration struct
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) {
suite.expectedConfig.Storage.setParameter("region", "us-west-1")
suite.expectedConfig.Storage.setParameter("secure", true)
suite.expectedConfig.Storage.setParameter("newparam", "some Value")
os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-west-1")
os.Setenv("REGISTRY_STORAGE_S3_SECURE", "true")
os.Setenv("REGISTRY_STORAGE_S3_NEWPARAM", "some Value")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
}
// TestParseWithDifferentEnvStorageType validates that providing an environment variable that
// changes the storage type will be reflected in the parsed Configuration struct
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) {
suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}}
os.Setenv("REGISTRY_STORAGE", "inmemory")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
}
// TestParseWithDifferentEnvStorageTypeAndParams validates that providing an environment variable
// that changes the storage type will be reflected in the parsed Configuration struct and that
// environment storage parameters will also be included
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) {
suite.expectedConfig.Storage = Storage{"filesystem": Parameters{}}
suite.expectedConfig.Storage.setParameter("rootdirectory", "/tmp/testroot")
os.Setenv("REGISTRY_STORAGE", "filesystem")
os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
}
// TestParseWithSameEnvLoglevel validates that providing an environment variable defining the log
// level to the same as the one provided in the yaml will not change the parsed Configuration struct
func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) {
os.Setenv("REGISTRY_LOGLEVEL", "info")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
}
// TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the
// log level will override the value provided in the yaml document
func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) {
suite.expectedConfig.Loglevel = "error"
os.Setenv("REGISTRY_LOGLEVEL", "error")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
}
// TestParseInvalidLoglevel validates that the parser will fail to parse a
// configuration if the loglevel is malformed
func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) {
invalidConfigYaml := "version: 0.1\nloglevel: derp\nstorage: inmemory"
_, err := Parse(bytes.NewReader([]byte(invalidConfigYaml)))
c.Assert(err, NotNil)
os.Setenv("REGISTRY_LOGLEVEL", "derp")
_, err = Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, NotNil)
}
// TestParseWithDifferentEnvReporting validates that environment variables
// properly override reporting parameters
func (suite *ConfigSuite) TestParseWithDifferentEnvReporting(c *C) {
suite.expectedConfig.Reporting.Bugsnag.APIKey = "anotherBugsnagApiKey"
suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080"
suite.expectedConfig.Reporting.NewRelic.LicenseKey = "NewRelicLicenseKey"
suite.expectedConfig.Reporting.NewRelic.Name = "some NewRelic NAME"
os.Setenv("REGISTRY_REPORTING_BUGSNAG_APIKEY", "anotherBugsnagApiKey")
os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080")
os.Setenv("REGISTRY_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey")
os.Setenv("REGISTRY_REPORTING_NEWRELIC_NAME", "some NewRelic NAME")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
}
// TestParseInvalidVersion validates that the parser will fail to parse a newer configuration
// version than the CurrentVersion
func (suite *ConfigSuite) TestParseInvalidVersion(c *C) {
suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1)
configBytes, err := yaml.Marshal(suite.expectedConfig)
c.Assert(err, IsNil)
_, err = Parse(bytes.NewReader(configBytes))
c.Assert(err, NotNil)
}
// TestParseExtraneousVars validates that environment variables referring to
// nonexistent variables don't cause side effects.
func (suite *ConfigSuite) TestParseExtraneousVars(c *C) {
suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080"
// A valid environment variable
os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080")
// Environment variables which shouldn't set config items
os.Setenv("registry_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey")
os.Setenv("REPORTING_NEWRELIC_NAME", "some NewRelic NAME")
os.Setenv("REGISTRY_DUCKS", "quack")
os.Setenv("REGISTRY_REPORTING_ASDF", "ghjk")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
}
// TestParseEnvVarImplicitMaps validates that environment variables can set
// values in maps that don't already exist.
func (suite *ConfigSuite) TestParseEnvVarImplicitMaps(c *C) {
readonly := make(map[string]interface{})
readonly["enabled"] = true
maintenance := make(map[string]interface{})
maintenance["readonly"] = readonly
suite.expectedConfig.Storage["maintenance"] = maintenance
os.Setenv("REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED", "true")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
}
// TestParseEnvWrongTypeMap validates that incorrectly attempting to unmarshal a
// string over existing map fails.
func (suite *ConfigSuite) TestParseEnvWrongTypeMap(c *C) {
os.Setenv("REGISTRY_STORAGE_S3", "somestring")
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, NotNil)
}
// TestParseEnvWrongTypeStruct validates that incorrectly attempting to
// unmarshal a string into a struct fails.
func (suite *ConfigSuite) TestParseEnvWrongTypeStruct(c *C) {
os.Setenv("REGISTRY_STORAGE_LOG", "somestring")
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, NotNil)
}
// TestParseEnvWrongTypeSlice validates that incorrectly attempting to
// unmarshal a string into a slice fails.
func (suite *ConfigSuite) TestParseEnvWrongTypeSlice(c *C) {
os.Setenv("REGISTRY_LOG_HOOKS", "somestring")
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, NotNil)
}
// TestParseEnvMany tests several environment variable overrides.
// The result is not checked - the goal of this test is to detect panics
// from misuse of reflection.
func (suite *ConfigSuite) TestParseEnvMany(c *C) {
os.Setenv("REGISTRY_VERSION", "0.1")
os.Setenv("REGISTRY_LOG_LEVEL", "debug")
os.Setenv("REGISTRY_LOG_FORMATTER", "json")
os.Setenv("REGISTRY_LOG_HOOKS", "json")
os.Setenv("REGISTRY_LOG_FIELDS", "abc: xyz")
os.Setenv("REGISTRY_LOG_HOOKS", "- type: asdf")
os.Setenv("REGISTRY_LOGLEVEL", "debug")
os.Setenv("REGISTRY_STORAGE", "s3")
os.Setenv("REGISTRY_AUTH_PARAMS", "param1: value1")
os.Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2")
os.Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2")
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
}
func checkStructs(c *C, t reflect.Type, structsChecked map[string]struct{}) {
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Map || t.Kind() == reflect.Slice {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
return
}
if _, present := structsChecked[t.String()]; present {
// Already checked this type
return
}
structsChecked[t.String()] = struct{}{}
byUpperCase := make(map[string]int)
for i := 0; i < t.NumField(); i++ {
sf := t.Field(i)
// Check that the yaml tag does not contain an _.
yamlTag := sf.Tag.Get("yaml")
if strings.Contains(yamlTag, "_") {
c.Fatalf("yaml field name includes _ character: %s", yamlTag)
}
upper := strings.ToUpper(sf.Name)
if _, present := byUpperCase[upper]; present {
c.Fatalf("field name collision in configuration object: %s", sf.Name)
}
byUpperCase[upper] = i
checkStructs(c, sf.Type, structsChecked)
}
}
// TestValidateConfigStruct makes sure that the config struct has no members
// with yaml tags that would be ambiguous to the environment variable parser.
func (suite *ConfigSuite) TestValidateConfigStruct(c *C) {
structsChecked := make(map[string]struct{})
checkStructs(c, reflect.TypeOf(Configuration{}), structsChecked)
}
func copyConfig(config Configuration) *Configuration {
configCopy := new(Configuration)
configCopy.Version = MajorMinorVersion(config.Version.Major(), config.Version.Minor())
configCopy.Loglevel = config.Loglevel
configCopy.Log = config.Log
configCopy.Log.Fields = make(map[string]interface{}, len(config.Log.Fields))
for k, v := range config.Log.Fields {
configCopy.Log.Fields[k] = v
}
configCopy.Storage = Storage{config.Storage.Type(): Parameters{}}
for k, v := range config.Storage.Parameters() {
configCopy.Storage.setParameter(k, v)
}
configCopy.Reporting = Reporting{
Bugsnag: BugsnagReporting{config.Reporting.Bugsnag.APIKey, config.Reporting.Bugsnag.ReleaseStage, config.Reporting.Bugsnag.Endpoint},
NewRelic: NewRelicReporting{config.Reporting.NewRelic.LicenseKey, config.Reporting.NewRelic.Name, config.Reporting.NewRelic.Verbose},
}
configCopy.Auth = Auth{config.Auth.Type(): Parameters{}}
for k, v := range config.Auth.Parameters() {
configCopy.Auth.setParameter(k, v)
}
configCopy.Notifications = Notifications{Endpoints: []Endpoint{}}
for _, v := range config.Notifications.Endpoints {
configCopy.Notifications.Endpoints = append(configCopy.Notifications.Endpoints, v)
}
configCopy.HTTP.Headers = make(http.Header)
for k, v := range config.HTTP.Headers {
configCopy.HTTP.Headers[k] = v
}
return configCopy
}

View file

@ -0,0 +1,283 @@
package configuration
import (
"fmt"
"os"
"reflect"
"sort"
"strconv"
"strings"
"github.com/Sirupsen/logrus"
"gopkg.in/yaml.v2"
)
// Version is a major/minor version pair of the form Major.Minor
// Major version upgrades indicate structure or type changes
// Minor version upgrades should be strictly additive
type Version string
// MajorMinorVersion constructs a Version from its Major and Minor components
func MajorMinorVersion(major, minor uint) Version {
return Version(fmt.Sprintf("%d.%d", major, minor))
}
func (version Version) major() (uint, error) {
majorPart := strings.Split(string(version), ".")[0]
major, err := strconv.ParseUint(majorPart, 10, 0)
return uint(major), err
}
// Major returns the major version portion of a Version
func (version Version) Major() uint {
major, _ := version.major()
return major
}
func (version Version) minor() (uint, error) {
minorPart := strings.Split(string(version), ".")[1]
minor, err := strconv.ParseUint(minorPart, 10, 0)
return uint(minor), err
}
// Minor returns the minor version portion of a Version
func (version Version) Minor() uint {
minor, _ := version.minor()
return minor
}
// VersionedParseInfo defines how a specific version of a configuration should
// be parsed into the current version
type VersionedParseInfo struct {
// Version is the version which this parsing information relates to
Version Version
// ParseAs defines the type which a configuration file of this version
// should be parsed into
ParseAs reflect.Type
// ConversionFunc defines a method for converting the parsed configuration
// (of type ParseAs) into the current configuration version
// Note: this method signature is very unclear with the absence of generics
ConversionFunc func(interface{}) (interface{}, error)
}
type envVar struct {
name string
value string
}
type envVars []envVar
func (a envVars) Len() int { return len(a) }
func (a envVars) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a envVars) Less(i, j int) bool { return a[i].name < a[j].name }
// Parser can be used to parse a configuration file and environment of a defined
// version into a unified output structure
type Parser struct {
prefix string
mapping map[Version]VersionedParseInfo
env envVars
}
// NewParser returns a *Parser with the given environment prefix which handles
// versioned configurations which match the given parseInfos
func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser {
p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo)}
for _, parseInfo := range parseInfos {
p.mapping[parseInfo.Version] = parseInfo
}
for _, env := range os.Environ() {
envParts := strings.SplitN(env, "=", 2)
p.env = append(p.env, envVar{envParts[0], envParts[1]})
}
// We must sort the environment variables lexically by name so that
// more specific variables are applied before less specific ones
// (i.e. REGISTRY_STORAGE before
// REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY). This sucks, but it's a
// lot simpler and easier to get right than unmarshalling map entries
// into temporaries and merging with the existing entry.
sort.Sort(p.env)
return &p
}
// Parse reads in the given []byte and environment and writes the resulting
// configuration into the input v
//
// Environment variables may be used to override configuration parameters other
// than version, following the scheme below:
// v.Abc may be replaced by the value of PREFIX_ABC,
// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth
func (p *Parser) Parse(in []byte, v interface{}) error {
var versionedStruct struct {
Version Version
}
if err := yaml.Unmarshal(in, &versionedStruct); err != nil {
return err
}
parseInfo, ok := p.mapping[versionedStruct.Version]
if !ok {
return fmt.Errorf("Unsupported version: %q", versionedStruct.Version)
}
parseAs := reflect.New(parseInfo.ParseAs)
err := yaml.Unmarshal(in, parseAs.Interface())
if err != nil {
return err
}
for _, envVar := range p.env {
pathStr := envVar.name
if strings.HasPrefix(pathStr, strings.ToUpper(p.prefix)+"_") {
path := strings.Split(pathStr, "_")
err = p.overwriteFields(parseAs, pathStr, path[1:], envVar.value)
if err != nil {
return err
}
}
}
c, err := parseInfo.ConversionFunc(parseAs.Interface())
if err != nil {
return err
}
reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c)))
return nil
}
// overwriteFields replaces configuration values with alternate values specified
// through the environment. Precondition: an empty path slice must never be
// passed in.
func (p *Parser) overwriteFields(v reflect.Value, fullpath string, path []string, payload string) error {
for v.Kind() == reflect.Ptr {
if v.IsNil() {
panic("encountered nil pointer while handling environment variable " + fullpath)
}
v = reflect.Indirect(v)
}
switch v.Kind() {
case reflect.Struct:
return p.overwriteStruct(v, fullpath, path, payload)
case reflect.Map:
return p.overwriteMap(v, fullpath, path, payload)
case reflect.Interface:
if v.NumMethod() == 0 {
if !v.IsNil() {
return p.overwriteFields(v.Elem(), fullpath, path, payload)
}
// Interface was empty; create an implicit map
var template map[string]interface{}
wrappedV := reflect.MakeMap(reflect.TypeOf(template))
v.Set(wrappedV)
return p.overwriteMap(wrappedV, fullpath, path, payload)
}
}
return nil
}
func (p *Parser) overwriteStruct(v reflect.Value, fullpath string, path []string, payload string) error {
// Generate case-insensitive map of struct fields
byUpperCase := make(map[string]int)
for i := 0; i < v.NumField(); i++ {
sf := v.Type().Field(i)
upper := strings.ToUpper(sf.Name)
if _, present := byUpperCase[upper]; present {
panic(fmt.Sprintf("field name collision in configuration object: %s", sf.Name))
}
byUpperCase[upper] = i
}
fieldIndex, present := byUpperCase[path[0]]
if !present {
logrus.Warnf("Ignoring unrecognized environment variable %s", fullpath)
return nil
}
field := v.Field(fieldIndex)
sf := v.Type().Field(fieldIndex)
if len(path) == 1 {
// Env var specifies this field directly
fieldVal := reflect.New(sf.Type)
err := yaml.Unmarshal([]byte(payload), fieldVal.Interface())
if err != nil {
return err
}
field.Set(reflect.Indirect(fieldVal))
return nil
}
// If the field is nil, must create an object
switch sf.Type.Kind() {
case reflect.Map:
if field.IsNil() {
field.Set(reflect.MakeMap(sf.Type))
}
case reflect.Ptr:
if field.IsNil() {
field.Set(reflect.New(sf.Type))
}
}
err := p.overwriteFields(field, fullpath, path[1:], payload)
if err != nil {
return err
}
return nil
}
func (p *Parser) overwriteMap(m reflect.Value, fullpath string, path []string, payload string) error {
if m.Type().Key().Kind() != reflect.String {
// non-string keys unsupported
logrus.Warnf("Ignoring environment variable %s involving map with non-string keys", fullpath)
return nil
}
if len(path) > 1 {
// If a matching key exists, get its value and continue the
// overwriting process.
for _, k := range m.MapKeys() {
if strings.ToUpper(k.String()) == path[0] {
mapValue := m.MapIndex(k)
// If the existing value is nil, we want to
// recreate it instead of using this value.
if (mapValue.Kind() == reflect.Ptr ||
mapValue.Kind() == reflect.Interface ||
mapValue.Kind() == reflect.Map) &&
mapValue.IsNil() {
break
}
return p.overwriteFields(mapValue, fullpath, path[1:], payload)
}
}
}
// (Re)create this key
var mapValue reflect.Value
if m.Type().Elem().Kind() == reflect.Map {
mapValue = reflect.MakeMap(m.Type().Elem())
} else {
mapValue = reflect.New(m.Type().Elem())
}
if len(path) > 1 {
err := p.overwriteFields(mapValue, fullpath, path[1:], payload)
if err != nil {
return err
}
} else {
err := yaml.Unmarshal([]byte(payload), mapValue.Interface())
if err != nil {
return err
}
}
m.SetMapIndex(reflect.ValueOf(strings.ToLower(path[0])), reflect.Indirect(mapValue))
return nil
}

View file

@ -0,0 +1,85 @@
package context
import (
"sync"
"github.com/docker/distribution/uuid"
"golang.org/x/net/context"
)
// Context is a copy of Context from the golang.org/x/net/context package.
type Context interface {
context.Context
}
// instanceContext is a context that provides only an instance id. It is
// provided as the main background context.
type instanceContext struct {
Context
id string // id of context, logged as "instance.id"
once sync.Once // once protect generation of the id
}
func (ic *instanceContext) Value(key interface{}) interface{} {
if key == "instance.id" {
ic.once.Do(func() {
// We want to lazy initialize the UUID such that we don't
// call a random generator from the package initialization
// code. For various reasons random could not be available
// https://github.com/docker/distribution/issues/782
ic.id = uuid.Generate().String()
})
return ic.id
}
return ic.Context.Value(key)
}
var background = &instanceContext{
Context: context.Background(),
}
// Background returns a non-nil, empty Context. The background context
// provides a single key, "instance.id" that is globally unique to the
// process.
func Background() Context {
return background
}
// WithValue returns a copy of parent in which the value associated with key is
// val. Use context Values only for request-scoped data that transits processes
// and APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key, val interface{}) Context {
return context.WithValue(parent, key, val)
}
// stringMapContext is a simple context implementation that checks a map for a
// key, falling back to a parent if not present.
type stringMapContext struct {
context.Context
m map[string]interface{}
}
// WithValues returns a context that proxies lookups through a map. Only
// supports string keys.
func WithValues(ctx context.Context, m map[string]interface{}) context.Context {
mo := make(map[string]interface{}, len(m)) // make our own copy.
for k, v := range m {
mo[k] = v
}
return stringMapContext{
Context: ctx,
m: mo,
}
}
func (smc stringMapContext) Value(key interface{}) interface{} {
if ks, ok := key.(string); ok {
if v, ok := smc.m[ks]; ok {
return v
}
}
return smc.Context.Value(key)
}

89
vendor/github.com/docker/distribution/context/doc.go generated vendored Normal file
View file

@ -0,0 +1,89 @@
// Package context provides several utilities for working with
// golang.org/x/net/context in http requests. Primarily, the focus is on
// logging relevant request information but this package is not limited to
// that purpose.
//
// The easiest way to get started is to get the background context:
//
// ctx := context.Background()
//
// The returned context should be passed around your application and be the
// root of all other context instances. If the application has a version, this
// line should be called before anything else:
//
// ctx := context.WithVersion(context.Background(), version)
//
// The above will store the version in the context and will be available to
// the logger.
//
// Logging
//
// The most useful aspect of this package is GetLogger. This function takes
// any context.Context interface and returns the current logger from the
// context. Canonical usage looks like this:
//
// GetLogger(ctx).Infof("something interesting happened")
//
// GetLogger also takes optional key arguments. The keys will be looked up in
// the context and reported with the logger. The following example would
// return a logger that prints the version with each log message:
//
// ctx := context.Context(context.Background(), "version", version)
// GetLogger(ctx, "version").Infof("this log message has a version field")
//
// The above would print out a log message like this:
//
// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
//
// When used with WithLogger, we gain the ability to decorate the context with
// loggers that have information from disparate parts of the call stack.
// Following from the version example, we can build a new context with the
// configured logger such that we always print the version field:
//
// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
//
// Since the logger has been pushed to the context, we can now get the version
// field for free with our log messages. Future calls to GetLogger on the new
// context will have the version field:
//
// GetLogger(ctx).Infof("this log message has a version field")
//
// This becomes more powerful when we start stacking loggers. Let's say we
// have the version logger from above but also want a request id. Using the
// context above, in our request scoped function, we place another logger in
// the context:
//
// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
//
// When GetLogger is called on the new context, "http.request.id" will be
// included as a logger field, along with the original "version" field:
//
// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
//
// Note that this only affects the new context, the previous context, with the
// version field, can be used independently. Put another way, the new logger,
// added to the request context, is unique to that context and can have
// request scoped varaibles.
//
// HTTP Requests
//
// This package also contains several methods for working with http requests.
// The concepts are very similar to those described above. We simply place the
// request in the context using WithRequest. This makes the request variables
// available. GetRequestLogger can then be called to get request specific
// variables in a log line:
//
// ctx = WithRequest(ctx, req)
// GetRequestLogger(ctx).Infof("request variables")
//
// Like above, if we want to include the request data in all log messages in
// the context, we push the logger to a new context and use that one:
//
// ctx = WithLogger(ctx, GetRequestLogger(ctx))
//
// The concept is fairly powerful and ensures that calls throughout the stack
// can be traced in log messages. Using the fields like "http.request.id", one
// can analyze call flow for a particular request with a simple grep of the
// logs.
package context

366
vendor/github.com/docker/distribution/context/http.go generated vendored Normal file
View file

@ -0,0 +1,366 @@
package context
import (
"errors"
"net"
"net/http"
"strings"
"sync"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docker/distribution/uuid"
"github.com/gorilla/mux"
)
// Common errors used with this package.
var (
ErrNoRequestContext = errors.New("no http request in context")
ErrNoResponseWriterContext = errors.New("no http response in context")
)
func parseIP(ipStr string) net.IP {
ip := net.ParseIP(ipStr)
if ip == nil {
log.Warnf("invalid remote IP address: %q", ipStr)
}
return ip
}
// RemoteAddr extracts the remote address of the request, taking into
// account proxy headers.
func RemoteAddr(r *http.Request) string {
if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
proxies := strings.Split(prior, ",")
if len(proxies) > 0 {
remoteAddr := strings.Trim(proxies[0], " ")
if parseIP(remoteAddr) != nil {
return remoteAddr
}
}
}
// X-Real-Ip is less supported, but worth checking in the
// absence of X-Forwarded-For
if realIP := r.Header.Get("X-Real-Ip"); realIP != "" {
if parseIP(realIP) != nil {
return realIP
}
}
return r.RemoteAddr
}
// RemoteIP extracts the remote IP of the request, taking into
// account proxy headers.
func RemoteIP(r *http.Request) string {
addr := RemoteAddr(r)
// Try parsing it as "IP:port"
if ip, _, err := net.SplitHostPort(addr); err == nil {
return ip
}
return addr
}
// WithRequest places the request on the context. The context of the request
// is assigned a unique id, available at "http.request.id". The request itself
// is available at "http.request". Other common attributes are available under
// the prefix "http.request.". If a request is already present on the context,
// this method will panic.
func WithRequest(ctx Context, r *http.Request) Context {
if ctx.Value("http.request") != nil {
// NOTE(stevvooe): This needs to be considered a programming error. It
// is unlikely that we'd want to have more than one request in
// context.
panic("only one request per context")
}
return &httpRequestContext{
Context: ctx,
startedAt: time.Now(),
id: uuid.Generate().String(),
r: r,
}
}
// GetRequest returns the http request in the given context. Returns
// ErrNoRequestContext if the context does not have an http request associated
// with it.
func GetRequest(ctx Context) (*http.Request, error) {
if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok {
return r, nil
}
return nil, ErrNoRequestContext
}
// GetRequestID attempts to resolve the current request id, if possible. An
// error is return if it is not available on the context.
func GetRequestID(ctx Context) string {
return GetStringValue(ctx, "http.request.id")
}
// WithResponseWriter returns a new context and response writer that makes
// interesting response statistics available within the context.
func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
if closeNotifier, ok := w.(http.CloseNotifier); ok {
irwCN := &instrumentedResponseWriterCN{
instrumentedResponseWriter: instrumentedResponseWriter{
ResponseWriter: w,
Context: ctx,
},
CloseNotifier: closeNotifier,
}
return irwCN, irwCN
}
irw := instrumentedResponseWriter{
ResponseWriter: w,
Context: ctx,
}
return &irw, &irw
}
// GetResponseWriter returns the http.ResponseWriter from the provided
// context. If not present, ErrNoResponseWriterContext is returned. The
// returned instance provides instrumentation in the context.
func GetResponseWriter(ctx Context) (http.ResponseWriter, error) {
v := ctx.Value("http.response")
rw, ok := v.(http.ResponseWriter)
if !ok || rw == nil {
return nil, ErrNoResponseWriterContext
}
return rw, nil
}
// getVarsFromRequest let's us change request vars implementation for testing
// and maybe future changes.
var getVarsFromRequest = mux.Vars
// WithVars extracts gorilla/mux vars and makes them available on the returned
// context. Variables are available at keys with the prefix "vars.". For
// example, if looking for the variable "name", it can be accessed as
// "vars.name". Implementations that are accessing values need not know that
// the underlying context is implemented with gorilla/mux vars.
func WithVars(ctx Context, r *http.Request) Context {
return &muxVarsContext{
Context: ctx,
vars: getVarsFromRequest(r),
}
}
// GetRequestLogger returns a logger that contains fields from the request in
// the current context. If the request is not available in the context, no
// fields will display. Request loggers can safely be pushed onto the context.
func GetRequestLogger(ctx Context) Logger {
return GetLogger(ctx,
"http.request.id",
"http.request.method",
"http.request.host",
"http.request.uri",
"http.request.referer",
"http.request.useragent",
"http.request.remoteaddr",
"http.request.contenttype")
}
// GetResponseLogger reads the current response stats and builds a logger.
// Because the values are read at call time, pushing a logger returned from
// this function on the context will lead to missing or invalid data. Only
// call this at the end of a request, after the response has been written.
func GetResponseLogger(ctx Context) Logger {
l := getLogrusLogger(ctx,
"http.response.written",
"http.response.status",
"http.response.contenttype")
duration := Since(ctx, "http.request.startedat")
if duration > 0 {
l = l.WithField("http.response.duration", duration.String())
}
return l
}
// httpRequestContext makes information about a request available to context.
type httpRequestContext struct {
Context
startedAt time.Time
id string
r *http.Request
}
// Value returns a keyed element of the request for use in the context. To get
// the request itself, query "request". For other components, access them as
// "request.<component>". For example, r.RequestURI
func (ctx *httpRequestContext) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
if keyStr == "http.request" {
return ctx.r
}
if !strings.HasPrefix(keyStr, "http.request.") {
goto fallback
}
parts := strings.Split(keyStr, ".")
if len(parts) != 3 {
goto fallback
}
switch parts[2] {
case "uri":
return ctx.r.RequestURI
case "remoteaddr":
return RemoteAddr(ctx.r)
case "method":
return ctx.r.Method
case "host":
return ctx.r.Host
case "referer":
referer := ctx.r.Referer()
if referer != "" {
return referer
}
case "useragent":
return ctx.r.UserAgent()
case "id":
return ctx.id
case "startedat":
return ctx.startedAt
case "contenttype":
ct := ctx.r.Header.Get("Content-Type")
if ct != "" {
return ct
}
}
}
fallback:
return ctx.Context.Value(key)
}
type muxVarsContext struct {
Context
vars map[string]string
}
func (ctx *muxVarsContext) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
if keyStr == "vars" {
return ctx.vars
}
if strings.HasPrefix(keyStr, "vars.") {
keyStr = strings.TrimPrefix(keyStr, "vars.")
}
if v, ok := ctx.vars[keyStr]; ok {
return v
}
}
return ctx.Context.Value(key)
}
// instrumentedResponseWriterCN provides response writer information in a
// context. It implements http.CloseNotifier so that users can detect
// early disconnects.
type instrumentedResponseWriterCN struct {
instrumentedResponseWriter
http.CloseNotifier
}
// instrumentedResponseWriter provides response writer information in a
// context. This variant is only used in the case where CloseNotifier is not
// implemented by the parent ResponseWriter.
type instrumentedResponseWriter struct {
http.ResponseWriter
Context
mu sync.Mutex
status int
written int64
}
func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) {
n, err = irw.ResponseWriter.Write(p)
irw.mu.Lock()
irw.written += int64(n)
// Guess the likely status if not set.
if irw.status == 0 {
irw.status = http.StatusOK
}
irw.mu.Unlock()
return
}
func (irw *instrumentedResponseWriter) WriteHeader(status int) {
irw.ResponseWriter.WriteHeader(status)
irw.mu.Lock()
irw.status = status
irw.mu.Unlock()
}
func (irw *instrumentedResponseWriter) Flush() {
if flusher, ok := irw.ResponseWriter.(http.Flusher); ok {
flusher.Flush()
}
}
func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
if keyStr == "http.response" {
return irw
}
if !strings.HasPrefix(keyStr, "http.response.") {
goto fallback
}
parts := strings.Split(keyStr, ".")
if len(parts) != 3 {
goto fallback
}
irw.mu.Lock()
defer irw.mu.Unlock()
switch parts[2] {
case "written":
return irw.written
case "status":
return irw.status
case "contenttype":
contentType := irw.Header().Get("Content-Type")
if contentType != "" {
return contentType
}
}
}
fallback:
return irw.Context.Value(key)
}
func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
if keyStr == "http.response" {
return irw
}
}
return irw.instrumentedResponseWriter.Value(key)
}

View file

@ -0,0 +1,285 @@
package context
import (
"net/http"
"net/http/httptest"
"net/http/httputil"
"net/url"
"reflect"
"testing"
"time"
)
func TestWithRequest(t *testing.T) {
var req http.Request
start := time.Now()
req.Method = "GET"
req.Host = "example.com"
req.RequestURI = "/test-test"
req.Header = make(http.Header)
req.Header.Set("Referer", "foo.com/referer")
req.Header.Set("User-Agent", "test/0.1")
ctx := WithRequest(Background(), &req)
for _, testcase := range []struct {
key string
expected interface{}
}{
{
key: "http.request",
expected: &req,
},
{
key: "http.request.id",
},
{
key: "http.request.method",
expected: req.Method,
},
{
key: "http.request.host",
expected: req.Host,
},
{
key: "http.request.uri",
expected: req.RequestURI,
},
{
key: "http.request.referer",
expected: req.Referer(),
},
{
key: "http.request.useragent",
expected: req.UserAgent(),
},
{
key: "http.request.remoteaddr",
expected: req.RemoteAddr,
},
{
key: "http.request.startedat",
},
} {
v := ctx.Value(testcase.key)
if v == nil {
t.Fatalf("value not found for %q", testcase.key)
}
if testcase.expected != nil && v != testcase.expected {
t.Fatalf("%s: %v != %v", testcase.key, v, testcase.expected)
}
// Key specific checks!
switch testcase.key {
case "http.request.id":
if _, ok := v.(string); !ok {
t.Fatalf("request id not a string: %v", v)
}
case "http.request.startedat":
vt, ok := v.(time.Time)
if !ok {
t.Fatalf("value not a time: %v", v)
}
now := time.Now()
if vt.After(now) {
t.Fatalf("time generated too late: %v > %v", vt, now)
}
if vt.Before(start) {
t.Fatalf("time generated too early: %v < %v", vt, start)
}
}
}
}
type testResponseWriter struct {
flushed bool
status int
written int64
header http.Header
}
func (trw *testResponseWriter) Header() http.Header {
if trw.header == nil {
trw.header = make(http.Header)
}
return trw.header
}
func (trw *testResponseWriter) Write(p []byte) (n int, err error) {
if trw.status == 0 {
trw.status = http.StatusOK
}
n = len(p)
trw.written += int64(n)
return
}
func (trw *testResponseWriter) WriteHeader(status int) {
trw.status = status
}
func (trw *testResponseWriter) Flush() {
trw.flushed = true
}
func TestWithResponseWriter(t *testing.T) {
trw := testResponseWriter{}
ctx, rw := WithResponseWriter(Background(), &trw)
if ctx.Value("http.response") != rw {
t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), rw)
}
grw, err := GetResponseWriter(ctx)
if err != nil {
t.Fatalf("error getting response writer: %v", err)
}
if grw != rw {
t.Fatalf("unexpected response writer returned: %#v != %#v", grw, rw)
}
if ctx.Value("http.response.status") != 0 {
t.Fatalf("response status should always be a number and should be zero here: %v != 0", ctx.Value("http.response.status"))
}
if n, err := rw.Write(make([]byte, 1024)); err != nil {
t.Fatalf("unexpected error writing: %v", err)
} else if n != 1024 {
t.Fatalf("unexpected number of bytes written: %v != %v", n, 1024)
}
if ctx.Value("http.response.status") != http.StatusOK {
t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusOK)
}
if ctx.Value("http.response.written") != int64(1024) {
t.Fatalf("unexpected number reported bytes written: %v != %v", ctx.Value("http.response.written"), 1024)
}
// Make sure flush propagates
rw.(http.Flusher).Flush()
if !trw.flushed {
t.Fatalf("response writer not flushed")
}
// Write another status and make sure context is correct. This normally
// wouldn't work except for in this contrived testcase.
rw.WriteHeader(http.StatusBadRequest)
if ctx.Value("http.response.status") != http.StatusBadRequest {
t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusBadRequest)
}
}
func TestWithVars(t *testing.T) {
var req http.Request
vars := map[string]string{
"foo": "asdf",
"bar": "qwer",
}
getVarsFromRequest = func(r *http.Request) map[string]string {
if r != &req {
t.Fatalf("unexpected request: %v != %v", r, req)
}
return vars
}
ctx := WithVars(Background(), &req)
for _, testcase := range []struct {
key string
expected interface{}
}{
{
key: "vars",
expected: vars,
},
{
key: "vars.foo",
expected: "asdf",
},
{
key: "vars.bar",
expected: "qwer",
},
} {
v := ctx.Value(testcase.key)
if !reflect.DeepEqual(v, testcase.expected) {
t.Fatalf("%q: %v != %v", testcase.key, v, testcase.expected)
}
}
}
// SingleHostReverseProxy will insert an X-Forwarded-For header, and can be used to test
// RemoteAddr(). A fake RemoteAddr cannot be set on the HTTP request - it is overwritten
// at the transport layer to 127.0.0.1:<port> . However, as the X-Forwarded-For header
// just contains the IP address, it is different enough for testing.
func TestRemoteAddr(t *testing.T) {
var expectedRemote string
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
if r.RemoteAddr == expectedRemote {
t.Errorf("Unexpected matching remote addresses")
}
actualRemote := RemoteAddr(r)
if expectedRemote != actualRemote {
t.Errorf("Mismatching remote hosts: %v != %v", expectedRemote, actualRemote)
}
w.WriteHeader(200)
}))
defer backend.Close()
backendURL, err := url.Parse(backend.URL)
if err != nil {
t.Fatal(err)
}
proxy := httputil.NewSingleHostReverseProxy(backendURL)
frontend := httptest.NewServer(proxy)
defer frontend.Close()
// X-Forwarded-For set by proxy
expectedRemote = "127.0.0.1"
proxyReq, err := http.NewRequest("GET", frontend.URL, nil)
if err != nil {
t.Fatal(err)
}
_, err = http.DefaultClient.Do(proxyReq)
if err != nil {
t.Fatal(err)
}
// RemoteAddr in X-Real-Ip
getReq, err := http.NewRequest("GET", backend.URL, nil)
if err != nil {
t.Fatal(err)
}
expectedRemote = "1.2.3.4"
getReq.Header["X-Real-ip"] = []string{expectedRemote}
_, err = http.DefaultClient.Do(getReq)
if err != nil {
t.Fatal(err)
}
// Valid X-Real-Ip and invalid X-Forwarded-For
getReq.Header["X-forwarded-for"] = []string{"1.2.3"}
_, err = http.DefaultClient.Do(getReq)
if err != nil {
t.Fatal(err)
}
}

116
vendor/github.com/docker/distribution/context/logger.go generated vendored Normal file
View file

@ -0,0 +1,116 @@
package context
import (
"fmt"
"github.com/Sirupsen/logrus"
"runtime"
)
// Logger provides a leveled-logging interface.
type Logger interface {
// standard logger methods
Print(args ...interface{})
Printf(format string, args ...interface{})
Println(args ...interface{})
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
Fatalln(args ...interface{})
Panic(args ...interface{})
Panicf(format string, args ...interface{})
Panicln(args ...interface{})
// Leveled methods, from logrus
Debug(args ...interface{})
Debugf(format string, args ...interface{})
Debugln(args ...interface{})
Error(args ...interface{})
Errorf(format string, args ...interface{})
Errorln(args ...interface{})
Info(args ...interface{})
Infof(format string, args ...interface{})
Infoln(args ...interface{})
Warn(args ...interface{})
Warnf(format string, args ...interface{})
Warnln(args ...interface{})
}
// WithLogger creates a new context with provided logger.
func WithLogger(ctx Context, logger Logger) Context {
return WithValue(ctx, "logger", logger)
}
// GetLoggerWithField returns a logger instance with the specified field key
// and value without affecting the context. Extra specified keys will be
// resolved from the context.
func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger {
return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value)
}
// GetLoggerWithFields returns a logger instance with the specified fields
// without affecting the context. Extra specified keys will be resolved from
// the context.
func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger {
// must convert from interface{} -> interface{} to string -> interface{} for logrus.
lfields := make(logrus.Fields, len(fields))
for key, value := range fields {
lfields[fmt.Sprint(key)] = value
}
return getLogrusLogger(ctx, keys...).WithFields(lfields)
}
// GetLogger returns the logger from the current context, if present. If one
// or more keys are provided, they will be resolved on the context and
// included in the logger. While context.Value takes an interface, any key
// argument passed to GetLogger will be passed to fmt.Sprint when expanded as
// a logging key field. If context keys are integer constants, for example,
// its recommended that a String method is implemented.
func GetLogger(ctx Context, keys ...interface{}) Logger {
return getLogrusLogger(ctx, keys...)
}
// GetLogrusLogger returns the logrus logger for the context. If one more keys
// are provided, they will be resolved on the context and included in the
// logger. Only use this function if specific logrus functionality is
// required.
func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry {
var logger *logrus.Entry
// Get a logger, if it is present.
loggerInterface := ctx.Value("logger")
if loggerInterface != nil {
if lgr, ok := loggerInterface.(*logrus.Entry); ok {
logger = lgr
}
}
if logger == nil {
fields := logrus.Fields{}
// Fill in the instance id, if we have it.
instanceID := ctx.Value("instance.id")
if instanceID != nil {
fields["instance.id"] = instanceID
}
fields["go.version"] = runtime.Version()
// If no logger is found, just return the standard logger.
logger = logrus.StandardLogger().WithFields(fields)
}
fields := logrus.Fields{}
for _, key := range keys {
v := ctx.Value(key)
if v != nil {
fields[fmt.Sprint(key)] = v
}
}
return logger.WithFields(fields)
}

104
vendor/github.com/docker/distribution/context/trace.go generated vendored Normal file
View file

@ -0,0 +1,104 @@
package context
import (
"runtime"
"time"
"github.com/docker/distribution/uuid"
)
// WithTrace allocates a traced timing span in a new context. This allows a
// caller to track the time between calling WithTrace and the returned done
// function. When the done function is called, a log message is emitted with a
// "trace.duration" field, corresponding to the elapsed time and a
// "trace.func" field, corresponding to the function that called WithTrace.
//
// The logging keys "trace.id" and "trace.parent.id" are provided to implement
// dapper-like tracing. This function should be complemented with a WithSpan
// method that could be used for tracing distributed RPC calls.
//
// The main benefit of this function is to post-process log messages or
// intercept them in a hook to provide timing data. Trace ids and parent ids
// can also be linked to provide call tracing, if so required.
//
// Here is an example of the usage:
//
// func timedOperation(ctx Context) {
// ctx, done := WithTrace(ctx)
// defer done("this will be the log message")
// // ... function body ...
// }
//
// If the function ran for roughly 1s, such a usage would emit a log message
// as follows:
//
// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ...
//
// Notice that the function name is automatically resolved, along with the
// package and a trace id is emitted that can be linked with parent ids.
func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) {
if ctx == nil {
ctx = Background()
}
pc, file, line, _ := runtime.Caller(1)
f := runtime.FuncForPC(pc)
ctx = &traced{
Context: ctx,
id: uuid.Generate().String(),
start: time.Now(),
parent: GetStringValue(ctx, "trace.id"),
fnname: f.Name(),
file: file,
line: line,
}
return ctx, func(format string, a ...interface{}) {
GetLogger(ctx,
"trace.duration",
"trace.id",
"trace.parent.id",
"trace.func",
"trace.file",
"trace.line").
Debugf(format, a...)
}
}
// traced represents a context that is traced for function call timing. It
// also provides fast lookup for the various attributes that are available on
// the trace.
type traced struct {
Context
id string
parent string
start time.Time
fnname string
file string
line int
}
func (ts *traced) Value(key interface{}) interface{} {
switch key {
case "trace.start":
return ts.start
case "trace.duration":
return time.Since(ts.start)
case "trace.id":
return ts.id
case "trace.parent.id":
if ts.parent == "" {
return nil // must return nil to signal no parent.
}
return ts.parent
case "trace.func":
return ts.fnname
case "trace.file":
return ts.file
case "trace.line":
return ts.line
}
return ts.Context.Value(key)
}

View file

@ -0,0 +1,85 @@
package context
import (
"runtime"
"testing"
"time"
)
// TestWithTrace ensures that tracing has the expected values in the context.
func TestWithTrace(t *testing.T) {
pc, file, _, _ := runtime.Caller(0) // get current caller.
f := runtime.FuncForPC(pc)
base := []valueTestCase{
{
key: "trace.id",
notnilorempty: true,
},
{
key: "trace.file",
expected: file,
notnilorempty: true,
},
{
key: "trace.line",
notnilorempty: true,
},
{
key: "trace.start",
notnilorempty: true,
},
}
ctx, done := WithTrace(Background())
defer done("this will be emitted at end of test")
checkContextForValues(t, ctx, append(base, valueTestCase{
key: "trace.func",
expected: f.Name(),
}))
traced := func() {
parentID := ctx.Value("trace.id") // ensure the parent trace id is correct.
pc, _, _, _ := runtime.Caller(0) // get current caller.
f := runtime.FuncForPC(pc)
ctx, done := WithTrace(ctx)
defer done("this should be subordinate to the other trace")
time.Sleep(time.Second)
checkContextForValues(t, ctx, append(base, valueTestCase{
key: "trace.func",
expected: f.Name(),
}, valueTestCase{
key: "trace.parent.id",
expected: parentID,
}))
}
traced()
time.Sleep(time.Second)
}
type valueTestCase struct {
key string
expected interface{}
notnilorempty bool // just check not empty/not nil
}
func checkContextForValues(t *testing.T, ctx Context, values []valueTestCase) {
for _, testcase := range values {
v := ctx.Value(testcase.key)
if testcase.notnilorempty {
if v == nil || v == "" {
t.Fatalf("value was nil or empty for %q: %#v", testcase.key, v)
}
continue
}
if v != testcase.expected {
t.Fatalf("unexpected value for key %q: %v != %v", testcase.key, v, testcase.expected)
}
}
}

24
vendor/github.com/docker/distribution/context/util.go generated vendored Normal file
View file

@ -0,0 +1,24 @@
package context
import (
"time"
)
// Since looks up key, which should be a time.Time, and returns the duration
// since that time. If the key is not found, the value returned will be zero.
// This is helpful when inferring metrics related to context execution times.
func Since(ctx Context, key interface{}) time.Duration {
if startedAt, ok := ctx.Value(key).(time.Time); ok {
return time.Since(startedAt)
}
return 0
}
// GetStringValue returns a string value from the context. The empty string
// will be returned if not found.
func GetStringValue(ctx Context, key interface{}) (value string) {
if valuev, ok := ctx.Value(key).(string); ok {
value = valuev
}
return value
}

View file

@ -0,0 +1,16 @@
package context
// WithVersion stores the application version in the context. The new context
// gets a logger to ensure log messages are marked with the application
// version.
func WithVersion(ctx Context, version string) Context {
ctx = WithValue(ctx, "version", version)
// push a new logger onto the stack
return WithLogger(ctx, GetLogger(ctx, "version"))
}
// GetVersion returns the application version from the context. An empty
// string may returned if the version was not set on the context.
func GetVersion(ctx Context) string {
return GetStringValue(ctx, "version")
}

View file

@ -0,0 +1,19 @@
package context
import "testing"
func TestVersionContext(t *testing.T) {
ctx := Background()
if GetVersion(ctx) != "" {
t.Fatalf("context should not yet have a version")
}
expected := "2.1-whatever"
ctx = WithVersion(ctx, expected)
version := GetVersion(ctx)
if version != expected {
t.Fatalf("version was not set: %q != %q", version, expected)
}
}

View file

@ -0,0 +1,36 @@
# Apache HTTPd sample for Registry v1, v2 and mirror
3 containers involved
* Docker Registry v1 (registry 0.9.1)
* Docker Registry v2 (registry 2.0.0)
* Docker Registry v1 in mirror mode
HTTP for mirror and HTTPS for v1 & v2
* http://registry.example.com proxify Docker Registry 1.0 in Mirror mode
* https://registry.example.com proxify Docker Registry 1.0 or 2.0 in Hosting mode
## 3 Docker containers should be started
* Docker Registry 1.0 in Mirror mode : port 5001
* Docker Registry 1.0 in Hosting mode : port 5000
* Docker Registry 2.0 in Hosting mode : port 5002
### Registry v1
docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/docker-registry/storage/hosting-v1:/tmp -p 5000:5000 registry:0.9.1"
### Mirror
docker run -d -e SETTINGS_FLAVOR=dev -e STANDALONE=false -e MIRROR_SOURCE=https://registry-1.docker.io -e MIRROR_SOURCE_INDEX=https://index.docker.io \
-e MIRROR_TAGS_CACHE_TTL=172800 -v /var/lib/docker-registry/storage/mirror:/tmp -p 5001:5000 registry:0.9.1"
### Registry v2
docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/axway/docker-registry/storage/hosting2-v2:/tmp -p 5002:5000 registry:2"
# For Hosting mode access
* users should have account (valid-user) to be able to fetch images
* only users using account docker-deployer will be allowed to push images

View file

@ -0,0 +1,127 @@
#
# Sample Apache 2.x configuration where :
#
<VirtualHost *:80>
ServerName registry.example.com
ServerAlias www.registry.example.com
ProxyRequests off
ProxyPreserveHost on
# no proxy for /error/ (Apache HTTPd errors messages)
ProxyPass /error/ !
ProxyPass /_ping http://localhost:5001/_ping
ProxyPassReverse /_ping http://localhost:5001/_ping
ProxyPass /v1 http://localhost:5001/v1
ProxyPassReverse /v1 http://localhost:5001/v1
# Logs
ErrorLog ${APACHE_LOG_DIR}/mirror_error_log
CustomLog ${APACHE_LOG_DIR}/mirror_access_log combined env=!dontlog
</VirtualHost>
<VirtualHost *:443>
ServerName registry.example.com
ServerAlias www.registry.example.com
SSLEngine on
SSLCertificateFile /etc/apache2/ssl/registry.example.com.crt
SSLCertificateKeyFile /etc/apache2/ssl/registry.example.com.key
# Higher Strength SSL Ciphers
SSLProtocol all -SSLv2 -SSLv3 -TLSv1
SSLCipherSuite RC4-SHA:HIGH
SSLHonorCipherOrder on
# Logs
ErrorLog ${APACHE_LOG_DIR}/registry_error_ssl_log
CustomLog ${APACHE_LOG_DIR}/registry_access_ssl_log combined env=!dontlog
Header always set "Docker-Distribution-Api-Version" "registry/2.0"
Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0"
RequestHeader set X-Forwarded-Proto "https"
ProxyRequests off
ProxyPreserveHost on
# no proxy for /error/ (Apache HTTPd errors messages)
ProxyPass /error/ !
#
# Registry v1
#
ProxyPass /v1 http://localhost:5000/v1
ProxyPassReverse /v1 http://localhost:5000/v1
ProxyPass /_ping http://localhost:5000/_ping
ProxyPassReverse /_ping http://localhost:5000/_ping
# Authentication require for push
<Location /v1>
Order deny,allow
Allow from all
AuthName "Registry Authentication"
AuthType basic
AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd"
# Read access to authentified users
<Limit GET HEAD>
Require valid-user
</Limit>
# Write access to docker-deployer account only
<Limit POST PUT DELETE>
Require user docker-deployer
</Limit>
</Location>
# Allow ping to run unauthenticated.
<Location /v1/_ping>
Satisfy any
Allow from all
</Location>
# Allow ping to run unauthenticated.
<Location /_ping>
Satisfy any
Allow from all
</Location>
#
# Registry v2
#
ProxyPass /v2 http://localhost:5002/v2
ProxyPassReverse /v2 http://localhost:5002/v2
<Location /v2>
Order deny,allow
Allow from all
AuthName "Registry Authentication"
AuthType basic
AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd"
# Read access to authentified users
<Limit GET HEAD>
Require valid-user
</Limit>
# Write access to docker-deployer only
<Limit POST PUT DELETE>
Require user docker-deployer
</Limit>
</Location>
</VirtualHost>

View file

@ -0,0 +1,147 @@
# Docker Compose V1 + V2 registry
This compose configuration configures a `v1` and `v2` registry behind an `nginx`
proxy. By default, you can access the combined registry at `localhost:5000`.
The configuration does not support pushing images to `v2` and pulling from `v1`.
If a `docker` client has a version less than 1.6, Nginx will route its requests
to the 1.0 registry. Requests from newer clients will route to the 2.0 registry.
### Install Docker Compose
1. Open a new terminal on the host with your `distribution` source.
2. Get the `docker-compose` binary.
$ sudo wget https://github.com/docker/compose/releases/download/1.1.0/docker-compose-`uname -s`-`uname -m` -O /usr/local/bin/docker-compose
This command installs the binary in the `/usr/local/bin` directory.
3. Add executable permissions to the binary.
$ sudo chmod +x /usr/local/bin/docker-compose
## Build and run with Compose
1. In your terminal, navigate to the `distribution/contrib/compose` directory
This directory includes a single `docker-compose.yml` configuration.
nginx:
build: "nginx"
ports:
- "5000:5000"
links:
- registryv1:registryv1
- registryv2:registryv2
registryv1:
image: registry
ports:
- "5000"
registryv2:
build: "../../"
ports:
- "5000"
This configuration builds a new `nginx` image as specified by the
`nginx/Dockerfile` file. The 1.0 registry comes from Docker's official
public image. Finally, the registry 2.0 image is built from the
`distribution/Dockerfile` you've used previously.
2. Get a registry 1.0 image.
$ docker pull registry:0.9.1
The Compose configuration looks for this image locally. If you don't do this
step, later steps can fail.
3. Build `nginx`, the registry 2.0 image, and
$ docker-compose build
registryv1 uses an image, skipping
Building registryv2...
Step 0 : FROM golang:1.4
...
Removing intermediate container 9f5f5068c3f3
Step 4 : COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf
---> 74acc70fa106
Removing intermediate container edb84c2b40cb
Successfully built 74acc70fa106
The commmand outputs its progress until it completes.
4. Start your configuration with compose.
$ docker-compose up
Recreating compose_registryv1_1...
Recreating compose_registryv2_1...
Recreating compose_nginx_1...
Attaching to compose_registryv1_1, compose_registryv2_1, compose_nginx_1
...
5. In another terminal, display the running configuration.
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
a81ad2557702 compose_nginx:latest "nginx -g 'daemon of 8 minutes ago Up 8 minutes 80/tcp, 443/tcp, 0.0.0.0:5000->5000/tcp compose_nginx_1
0618437450dd compose_registryv2:latest "registry cmd/regist 8 minutes ago Up 8 minutes 0.0.0.0:32777->5000/tcp compose_registryv2_1
aa82b1ed8e61 registry:latest "docker-registry" 8 minutes ago Up 8 minutes 0.0.0.0:32776->5000/tcp compose_registryv1_1
### Explore a bit
1. Check for TLS on your `nginx` server.
$ curl -v https://localhost:5000
* Rebuilt URL to: https://localhost:5000/
* Hostname was NOT found in DNS cache
* Trying 127.0.0.1...
* Connected to localhost (127.0.0.1) port 5000 (#0)
* successfully set certificate verify locations:
* CAfile: none
CApath: /etc/ssl/certs
* SSLv3, TLS handshake, Client hello (1):
* SSLv3, TLS handshake, Server hello (2):
* SSLv3, TLS handshake, CERT (11):
* SSLv3, TLS alert, Server hello (2):
* SSL certificate problem: self signed certificate
* Closing connection 0
curl: (60) SSL certificate problem: self signed certificate
More details here: http://curl.haxx.se/docs/sslcerts.html
2. Tag the `v1` registry image.
$ docker tag registry:latest localhost:5000/registry_one:latest
2. Push it to the localhost.
$ docker push localhost:5000/registry_one:latest
If you are using the 1.6 Docker client, this pushes the image the `v2 `registry.
4. Use `curl` to list the image in the registry.
$ curl -v -X GET http://localhost:32777/v2/registry1/tags/list
* Hostname was NOT found in DNS cache
* Trying 127.0.0.1...
* Connected to localhost (127.0.0.1) port 32777 (#0)
> GET /v2/registry1/tags/list HTTP/1.1
> User-Agent: curl/7.36.0
> Host: localhost:32777
> Accept: */*
>
< HTTP/1.1 200 OK
< Content-Type: application/json; charset=utf-8
< Docker-Distribution-Api-Version: registry/2.0
< Date: Tue, 14 Apr 2015 22:34:13 GMT
< Content-Length: 39
<
{"name":"registry1","tags":["latest"]}
* Connection #0 to host localhost left intact
This example refers to the specific port assigned to the 2.0 registry. You saw
this port earlier, when you used `docker ps` to show your running containers.

View file

@ -0,0 +1,15 @@
nginx:
build: "nginx"
ports:
- "5000:5000"
links:
- registryv1:registryv1
- registryv2:registryv2
registryv1:
image: registry
ports:
- "5000"
registryv2:
build: "../../"
ports:
- "5000"

View file

@ -0,0 +1,6 @@
FROM nginx:1.7
COPY nginx.conf /etc/nginx/nginx.conf
COPY registry.conf /etc/nginx/conf.d/registry.conf
COPY docker-registry.conf /etc/nginx/docker-registry.conf
COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf

View file

@ -0,0 +1,6 @@
proxy_pass http://docker-registry-v2;
proxy_set_header Host $http_host; # required for docker client's sake
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 900;

View file

@ -0,0 +1,7 @@
proxy_pass http://docker-registry;
proxy_set_header Host $http_host; # required for docker client's sake
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Authorization ""; # For basic auth through nginx in v1 to work, please comment this line
proxy_read_timeout 900;

View file

@ -0,0 +1,27 @@
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
include /etc/nginx/conf.d/*.conf;
}

View file

@ -0,0 +1,41 @@
# Docker registry proxy for api versions 1 and 2
upstream docker-registry {
server registryv1:5000;
}
upstream docker-registry-v2 {
server registryv2:5000;
}
# No client auth or TLS
server {
listen 5000;
server_name localhost;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
chunked_transfer_encoding on;
location /v2/ {
# Do not allow connections from docker 1.5 and earlier
# docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents
if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) {
return 404;
}
# To add basic authentication to v2 use auth_basic setting plus add_header
# auth_basic "registry.localhost";
# auth_basic_user_file test.password;
# add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always;
include docker-registry-v2.conf;
}
location / {
include docker-registry.conf;
}
}

View file

@ -0,0 +1,9 @@
FROM distribution/golem:0.1
MAINTAINER Docker Distribution Team <distribution@docker.com>
RUN apk add --no-cache git
ENV TMPDIR /var/lib/docker/tmp
WORKDIR /go/src/github.com/docker/distribution/contrib/docker-integration

View file

@ -0,0 +1,63 @@
# Docker Registry Integration Testing
These integration tests cover interactions between registry clients such as
the docker daemon and the registry server. All tests can be run using the
[golem integration test runner](https://github.com/docker/golem)
The integration tests configure components using docker compose
(see docker-compose.yaml) and the runner can be using the golem
configuration file (see golem.conf).
## Running integration tests
### Run using multiversion script
The integration tests in the `contrib/docker-integration` directory can be simply
run by executing the run script `./run_multiversion.sh`. If there is no running
daemon to connect to, run as `./run_multiversion.sh -d`.
This command will build the distribution image from the locally checked out
version and run against multiple versions of docker defined in the script. To
run a specific version of the registry or docker, Golem will need to be
executed manually.
### Run manually using Golem
Using the golem tool directly allows running against multiple versions of
the registry and docker. Running against multiple versions of the registry
can be useful for testing changes in the docker daemon which are not
covered by the default run script.
#### Installing Golem
Golem is distributed as an executable binary which can be installed from
the [release page](https://github.com/docker/golem/releases/tag/v0.1).
#### Running golem with docker
Additionally golem can be run as a docker image requiring no additonal
installation.
`docker run --privileged -v "$GOPATH/src/github.com/docker/distribution/contrib/docker-integration:/test" -w /test distribution/golem golem -rundaemon .`
#### Golem custom images
Golem tests version of software by defining the docker image to test.
Run with registry 2.2.1 and docker 1.10.3
`golem -i golem-dind:latest,docker:1.10.3-dind,1.10.3 -i golem-distribution:latest,registry:2.2.1 .`
#### Use golem caching for developing tests
Golem allows caching image configuration to reduce test start up time.
Using this cache will allow tests with the same set of images to start
up quickly. This can be useful when developing tests and needing the
test to run quickly. If there are changes which effect the image (such as
building a new registry image), then startup time will be slower.
Run this command multiple times and after the first time test runs
should start much quicker.
`golem -cache ~/.cache/docker/golem -i golem-dind:latest,docker:1.10.3-dind,1.10.3 -i golem-distribution:latest,registry:2.2.1 .`

View file

@ -0,0 +1,91 @@
nginx:
build: "nginx"
ports:
- "5000:5000"
- "5002:5002"
- "5440:5440"
- "5441:5441"
- "5442:5442"
- "5443:5443"
- "5444:5444"
- "5445:5445"
- "5446:5446"
- "5447:5447"
- "5448:5448"
- "5554:5554"
- "5555:5555"
- "5556:5556"
- "5557:5557"
- "5558:5558"
- "5559:5559"
- "5600:5600"
- "6666:6666"
links:
- registryv2:registryv2
- malevolent:malevolent
- registryv2token:registryv2token
- tokenserver:tokenserver
- registryv2tokenoauth:registryv2tokenoauth
- registryv2tokenoauthnotls:registryv2tokenoauthnotls
- tokenserveroauth:tokenserveroauth
registryv2:
image: golem-distribution:latest
ports:
- "5000"
registryv2token:
image: golem-distribution:latest
ports:
- "5000"
volumes:
- ./tokenserver/registry-config.yml:/etc/docker/registry/config.yml
- ./tokenserver/certs/localregistry.cert:/etc/docker/registry/localregistry.cert
- ./tokenserver/certs/localregistry.key:/etc/docker/registry/localregistry.key
- ./tokenserver/certs/signing.cert:/etc/docker/registry/tokenbundle.pem
tokenserver:
build: "tokenserver"
command: "--debug -addr 0.0.0.0:5556 -issuer registry-test -passwd .htpasswd -tlscert tls.cert -tlskey tls.key -key sign.key -realm http://auth.localregistry:5556"
ports:
- "5556"
registryv2tokenoauth:
image: golem-distribution:latest
ports:
- "5000"
volumes:
- ./tokenserver-oauth/registry-config.yml:/etc/docker/registry/config.yml
- ./tokenserver-oauth/certs/localregistry.cert:/etc/docker/registry/localregistry.cert
- ./tokenserver-oauth/certs/localregistry.key:/etc/docker/registry/localregistry.key
- ./tokenserver-oauth/certs/signing.cert:/etc/docker/registry/tokenbundle.pem
registryv2tokenoauthnotls:
image: golem-distribution:latest
ports:
- "5000"
volumes:
- ./tokenserver-oauth/registry-config-notls.yml:/etc/docker/registry/config.yml
- ./tokenserver-oauth/certs/signing.cert:/etc/docker/registry/tokenbundle.pem
tokenserveroauth:
build: "tokenserver-oauth"
command: "--debug -addr 0.0.0.0:5559 -issuer registry-test -passwd .htpasswd -tlscert tls.cert -tlskey tls.key -key sign.key -realm http://auth.localregistry:5559"
ports:
- "5559"
malevolent:
image: "dmcgowan/malevolent:0.1.0"
command: "-l 0.0.0.0:6666 -r http://registryv2:5000 -c /certs/localregistry.cert -k /certs/localregistry.key"
links:
- registryv2:registryv2
volumes:
- ./malevolent-certs:/certs:ro
ports:
- "6666"
docker:
image: golem-dind:latest
container_name: dockerdaemon
command: "docker daemon --debug -s $DOCKER_GRAPHDRIVER"
privileged: true
environment:
DOCKER_GRAPHDRIVER:
volumes:
- /etc/generated_certs.d:/etc/docker/certs.d
- /var/lib/docker
links:
- nginx:localregistry
- nginx:auth.localregistry

View file

@ -0,0 +1,18 @@
[[suite]]
dind=true
images=[ "nginx:1.9", "dmcgowan/token-server:simple", "dmcgowan/token-server:oauth", "dmcgowan/malevolent:0.1.0" ]
[[suite.pretest]]
command="sh ./install_certs.sh /etc/generated_certs.d"
[[suite.testrunner]]
command="bats -t ."
format="tap"
env=["TEST_REPO=hello-world", "TEST_TAG=latest", "TEST_USER=testuser", "TEST_PASSWORD=passpassword", "TEST_REGISTRY=localregistry", "TEST_SKIP_PULL=true"]
[[suite.customimage]]
tag="golem-distribution:latest"
default="registry:2.2.1"
[[suite.customimage]]
tag="golem-dind:latest"
default="docker:1.10.1-dind"
version="1.10.1"

View file

@ -0,0 +1,101 @@
# has_digest enforces the last output line is "Digest: sha256:..."
# the input is the output from a docker push cli command
function has_digest() {
filtered=$(echo "$1" |sed -rn '/[dD]igest\: sha(256|384|512)/ p')
[ "$filtered" != "" ]
# See http://wiki.alpinelinux.org/wiki/Regex#BREs before making changes to regex
digest=$(expr "$filtered" : ".*\(sha[0-9]\{3,3\}:[a-z0-9]*\)")
}
# tempImage creates a new image using the provided name
# requires bats
function tempImage() {
dir=$(mktemp -d)
run dd if=/dev/urandom of="$dir/f" bs=1024 count=512
cat <<DockerFileContent > "$dir/Dockerfile"
FROM scratch
COPY f /f
CMD []
DockerFileContent
cp_t $dir "/tmpbuild/"
exec_t "cd /tmpbuild/; docker build --no-cache -t $1 .; rm -rf /tmpbuild/"
}
# skip basic auth tests with Docker 1.6, where they don't pass due to
# certificate issues, requires bats
function basic_auth_version_check() {
run sh -c 'docker version | fgrep -q "Client version: 1.6."'
if [ "$status" -eq 0 ]; then
skip "Basic auth tests don't support 1.6.x"
fi
}
# login issues a login to docker to the provided server
# uses user, password, and email variables set outside of function
# requies bats
function login() {
rm -f /root/.docker/config.json
run docker_t login -u $user -p $password -e $email $1
if [ "$status" -ne 0 ]; then
echo $output
fi
[ "$status" -eq 0 ]
# First line is WARNING about credential save or email deprecation (maybe both)
[ "${lines[2]}" = "Login Succeeded" -o "${lines[1]}" = "Login Succeeded" ]
}
function login_oauth() {
login $@
tmpFile=$(mktemp)
get_file_t /root/.docker/config.json $tmpFile
run awk -v RS="" "/\"$1\": \\{[[:space:]]+\"auth\": \"[[:alnum:]]+\",[[:space:]]+\"identitytoken\"/ {exit 3}" $tmpFile
[ "$status" -eq 3 ]
}
function parse_version() {
version=$(echo "$1" | cut -d '-' -f1) # Strip anything after '-'
major=$(echo "$version" | cut -d . -f1)
minor=$(echo "$version" | cut -d . -f2)
rev=$(echo "$version" | cut -d . -f3)
version=$((major * 1000 * 1000 + minor * 1000 + rev))
}
function version_check() {
name=$1
checkv=$2
minv=$3
parse_version "$checkv"
v=$version
parse_version "$minv"
if [ "$v" -lt "$version" ]; then
skip "$name version \"$checkv\" does not meet required version \"$minv\""
fi
}
function get_file_t() {
docker cp dockerdaemon:$1 $2
}
function cp_t() {
docker cp $1 dockerdaemon:$2
}
function exec_t() {
docker exec dockerdaemon sh -c "$@"
}
function docker_t() {
docker exec dockerdaemon docker $@
}
# build reates a new docker image id from another image
function build() {
docker exec -i dockerdaemon docker build --no-cache -t $1 - <<DOCKERFILE
FROM $2
MAINTAINER distribution@docker.com
DOCKERFILE
}

View file

@ -0,0 +1,50 @@
#!/bin/sh
set -e
hostname="localregistry"
installdir="$1"
install_ca() {
mkdir -p $1/$hostname:$2
cp ./nginx/ssl/registry-ca+ca.pem $1/$hostname:$2/ca.crt
if [ "$3" != "" ]; then
cp ./nginx/ssl/registry-$3+client-cert.pem $1/$hostname:$2/client.cert
cp ./nginx/ssl/registry-$3+client-key.pem $1/$hostname:$2/client.key
fi
}
install_test_certs() {
install_ca $1 5440
install_ca $1 5441
install_ca $1 5442 ca
install_ca $1 5443 noca
install_ca $1 5444 ca
install_ca $1 5447 ca
# For test remove CA
rm $1/${hostname}:5447/ca.crt
install_ca $1 5448
install_ca $1 5600
}
install_ca_file() {
mkdir -p $2
cp $1 $2/ca.crt
}
append_ca_file() {
mkdir -p $2
cat $1 >> $2/ca.crt
}
install_test_certs $installdir
# Malevolent server
install_ca_file ./malevolent-certs/ca.pem $installdir/$hostname:6666
# Token server
install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5554
install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5555
install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5557
install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5558
append_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5600

View file

@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDETCCAfugAwIBAgIQZRKt7OeG+TlC2riszYwQQTALBgkqhkiG9w0BAQswJjER
MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDgyMDIz
MjE0OVoXDTE4MDgwNDIzMjE0OVowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV
BAMTDWxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQDPdsUBStNMz4coXfQVIJIafG85VkngM4fV7hrg7AbiGLCWvq8cWOrYM50G9Wmo
twK1WeQ6bigYOjINgSfTxcy3adciVZIIJyXqboz6n2V0yRPWpakof939bvuAurAP
tSqQ2V5fGN0ZZn4J4IbXMSovKwo7sG3X6i4q/8DYHZ/mKjvCRMPC3MGWqunknpkm
dzyKbIFHaDKlAqIOwTsDhHvGzm/9n3D+h4sl5ZPBobuBEV2u5GR0H5ujak4+Kczt
thCWtRkzCfnjW0TEanheSYJGu8OgCGoFjQnHotgqvOO6iHZCsrB3gf8WQeou+y9e
+OyLZv3FmqdC9SXr3b0LGQTFAgMBAAGjOjA4MA4GA1UdDwEB/wQEAwIAoDAMBgNV
HRMBAf8EAjAAMBgGA1UdEQQRMA+CDWxvY2FscmVnaXN0cnkwCwYJKoZIhvcNAQEL
A4IBAQC/PP2Y9QVhO8t4BXML1QpNRWqXG8Gg0P1XIh6M6FoxcGIodLdbzui828YB
wm9ZlyKars+nDdgLdQWawdV7hSd6s2NeQlHYQSGLsdTAVkgIxiD7D2Tw3kAZ6Zrj
dPikoVAc+rBMm/BXQLzy95IAbBVOHOpBkOOgF+TYxeLnOc3GzbUqBi1Pq97DMaxr
DaDuywH55P/6v7qt610UIsZ6+RZ78iiRx4Q+oRxEqGT0rXI76gVxOFabbJuFr1n1
kEWa3u/BssJzX3KVAm7oUtaBnj2SH5fokFmvZ5lBXA4QO/5doOa8yZiFFvvQs7EY
SWDxLrvS33UCtsCcpPggjehnxKaC
-----END CERTIFICATE-----

View file

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAz3bFAUrTTM+HKF30FSCSGnxvOVZJ4DOH1e4a4OwG4hiwlr6v
HFjq2DOdBvVpqLcCtVnkOm4oGDoyDYEn08XMt2nXIlWSCCcl6m6M+p9ldMkT1qWp
KH/d/W77gLqwD7UqkNleXxjdGWZ+CeCG1zEqLysKO7Bt1+ouKv/A2B2f5io7wkTD
wtzBlqrp5J6ZJnc8imyBR2gypQKiDsE7A4R7xs5v/Z9w/oeLJeWTwaG7gRFdruRk
dB+bo2pOPinM7bYQlrUZMwn541tExGp4XkmCRrvDoAhqBY0Jx6LYKrzjuoh2QrKw
d4H/FkHqLvsvXvjsi2b9xZqnQvUl6929CxkExQIDAQABAoIBAQCZjCUI7NFwwxQc
m1UAogeglMJZJHUu+9SoUD8Sg34grvdbyqueBm1iMOkiclaOKU1W3b4eRNNmAwRy
nEnW4km+4hX48m5PnHHijYnIIFsd0YjeT+Pf9qtdXFvGjeWq6oIjjM3dAnD50LKu
KsCB2oCHQoqjXNQfftJGvt2C1oI2/WvdOR4prnGXElVfASswX4PkP5LCfLhIx+Fr
7ErfaRIKigLSaAWLKaw3IlL12Q/KkuGcnzYIzIRwY4VJ64ENN6M3+KknfGovQItL
sCxceSe61THDP9AAI3Mequm8z3H0CImOWhJCge5l7ttLLMXZXqGxDCVx+3zvqlCa
X0cgGSVBAoGBAOvTN3oJJx1vnh1mRj8+hqzFq1bjm4T/Wp314QWLeo++43II4uMM
5hxUlO5ViY1sKxQrGwK+9c9ddxAvm5OAFFkzgW9EhDCu0tXUb2/vAJQ93SgqbcRu
coXWJpk0eNW/ouk2s1X8dzs+sCs3a4H64fEEj8yhwoyovjfucspsn7t1AoGBAOE2
ayLKx7CcWCiD/VGNvP7714MDst2isyq8reg8LEMmAaXR2IWWj5eGwKrImTQCsrjW
P37aBp1lcWuuYRKl/WEGBy6JLNdATyUoYc1Yo+8YdenekkOtOHHJerlK3OKi3ZVp
q4HJY9wzKg/wYLcbTmjjzKj+OBIZWwig73XUHwoRAoGBAJnuIrYbp1aFdvXFvnCl
xY6c8DwlEWx8qY+V4S2XX4bYmOnkdwSxdLplU1lGqCSRyIS/pj/imdyjK4Z7LNfY
sG+RORmB5a9JTgGZSqwLm5snzmXbXA7t8P7/S+6Q25baIeKMe/7SbplTT/bFk/0h
371MtvhhVfYuZwtnL7KFuLXJAoGBAMQ3UHKYsBC8tsZd8Pf8AL07mFHKiC04Etfa
Wb5rpri+RVM+mGITgnmnavehHHHHJAWMjPetZ3P8rSv/Ww4PVsoQoXM3Cr1jh1E9
dLCfWPz4l8syIscaBYKF4wnLItXGxj3mOgoy93EjlrMaYHlILjGOv4JBM4L5WmoT
JW7IaF6xAoGAZ4K8MwU/cAah8VinMmLGxvWWuBSgTTebuY5zN603MvFLKv5necuc
BZfTTxD+gOnxRT6QAh++tOsbBmsgR9HmTSlQSSgw1L7cwGyXzLCDYw+5K/03KXSU
DaFdgtfcDDJO8WtjOgjyTRzEAOsqFta1ige4pIu5fTilNVMQlhts5Iw=
-----END RSA PRIVATE KEY-----

View file

@ -0,0 +1,192 @@
#!/usr/bin/env bats
# This tests various expected error scenarios when pulling bad content
load helpers
host="localregistry:6666"
base="malevolent-test"
function setup() {
tempImage $base:latest
}
@test "Test malevolent proxy pass through" {
docker_t tag -f $base:latest $host/$base/nochange:latest
run docker_t push $host/$base/nochange:latest
echo $output
[ "$status" -eq 0 ]
has_digest "$output"
run docker_t pull $host/$base/nochange:latest
echo "$output"
[ "$status" -eq 0 ]
}
@test "Test malevolent image name change" {
imagename="$host/$base/rename"
image="$imagename:lastest"
docker_t tag -f $base:latest $image
run docker_t push $image
[ "$status" -eq 0 ]
has_digest "$output"
# Pull attempt should fail to verify manifest digest
run docker_t pull "$imagename@$digest"
echo "$output"
[ "$status" -ne 0 ]
}
@test "Test malevolent altered layer" {
image="$host/$base/addfile:latest"
tempImage $image
run docker_t push $image
echo "$output"
[ "$status" -eq 0 ]
has_digest "$output"
# Remove image to ensure layer is pulled and digest verified
docker_t rmi -f $image
run docker_t pull $image
echo "$output"
[ "$status" -ne 0 ]
}
@test "Test malevolent altered layer (by digest)" {
imagename="$host/$base/addfile"
image="$imagename:latest"
tempImage $image
run docker_t push $image
echo "$output"
[ "$status" -eq 0 ]
has_digest "$output"
# Remove image to ensure layer is pulled and digest verified
docker_t rmi -f $image
run docker_t pull "$imagename@$digest"
echo "$output"
[ "$status" -ne 0 ]
}
@test "Test malevolent poisoned images" {
truncid="777cf9284131"
poison="${truncid}d77ca0863fb7f054c0a276d7e227b5e9a5d62b497979a481fa32"
image1="$host/$base/image1/poison:$poison"
tempImage $image1
run docker_t push $image1
echo "$output"
[ "$status" -eq 0 ]
has_digest "$output"
image2="$host/$base/image2/poison:$poison"
tempImage $image2
run docker_t push $image2
echo "$output"
[ "$status" -eq 0 ]
has_digest "$output"
# Remove image to ensure layer is pulled and digest verified
docker_t rmi -f $image1
docker_t rmi -f $image2
run docker_t pull $image1
echo "$output"
[ "$status" -eq 0 ]
run docker_t pull $image2
echo "$output"
[ "$status" -eq 0 ]
# Test if there are multiple images
run docker_t images
echo "$output"
[ "$status" -eq 0 ]
# Test images have same ID and not the poison
id1=$(docker_t inspect --format="{{.Id}}" $image1)
id2=$(docker_t inspect --format="{{.Id}}" $image2)
# Remove old images
docker_t rmi -f $image1
docker_t rmi -f $image2
[ "$id1" != "$id2" ]
[ "$id1" != "$truncid" ]
[ "$id2" != "$truncid" ]
}
@test "Test malevolent altered identical images" {
truncid1="777cf9284131"
poison1="${truncid1}d77ca0863fb7f054c0a276d7e227b5e9a5d62b497979a481fa32"
truncid2="888cf9284131"
poison2="${truncid2}d77ca0863fb7f054c0a276d7e227b5e9a5d62b497979a481fa64"
image1="$host/$base/image1/alteredid:$poison1"
tempImage $image1
run docker_t push $image1
echo "$output"
[ "$status" -eq 0 ]
has_digest "$output"
image2="$host/$base/image2/alteredid:$poison2"
docker_t tag -f $image1 $image2
run docker_t push $image2
echo "$output"
[ "$status" -eq 0 ]
has_digest "$output"
# Remove image to ensure layer is pulled and digest verified
docker_t rmi -f $image1
docker_t rmi -f $image2
run docker_t pull $image1
echo "$output"
[ "$status" -eq 0 ]
run docker_t pull $image2
echo "$output"
[ "$status" -eq 0 ]
# Test if there are multiple images
run docker_t images
echo "$output"
[ "$status" -eq 0 ]
# Test images have same ID and not the poison
id1=$(docker_t inspect --format="{{.Id}}" $image1)
id2=$(docker_t inspect --format="{{.Id}}" $image2)
# Remove old images
docker_t rmi -f $image1
docker_t rmi -f $image2
[ "$id1" == "$id2" ]
[ "$id1" != "$truncid1" ]
[ "$id2" != "$truncid2" ]
}
@test "Test malevolent resumeable pull" {
version_check docker "$GOLEM_DIND_VERSION" "1.11.0"
version_check registry "$GOLEM_DISTRIBUTION_VERSION" "2.3.0"
imagename="$host/$base/resumeable"
image="$imagename:latest"
tempImage $image
run docker_t push $image
echo "$output"
[ "$status" -eq 0 ]
has_digest "$output"
# Remove image to ensure layer is pulled and digest verified
docker_t rmi -f $image
run docker_t pull "$imagename@$digest"
echo "$output"
[ "$status" -eq 0 ]
}

View file

@ -0,0 +1,10 @@
FROM nginx:1.9
COPY nginx.conf /etc/nginx/nginx.conf
COPY registry.conf /etc/nginx/conf.d/registry.conf
COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf
COPY registry-noauth.conf /etc/nginx/registry-noauth.conf
COPY registry-basic.conf /etc/nginx/registry-basic.conf
COPY test.passwd /etc/nginx/test.passwd
COPY ssl /etc/nginx/ssl
COPY v1 /var/www/html/v1

View file

@ -0,0 +1,6 @@
proxy_pass http://docker-registry-v2;
proxy_set_header Host $http_host; # required for docker client's sake
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 900;

View file

@ -0,0 +1,61 @@
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
include /etc/nginx/conf.d/*.conf;
}
# Setup TCP proxies
stream {
# Malevolent proxy
server {
listen 6666;
proxy_pass malevolent:6666;
}
# Registry configured for token server
server {
listen 5554;
listen 5555;
proxy_pass registryv2token:5000;
}
# Token server
server {
listen 5556;
proxy_pass tokenserver:5556;
}
# Registry configured for token server with oauth
server {
listen 5557;
listen 5558;
proxy_pass registryv2tokenoauth:5000;
}
# Token server with oauth
server {
listen 5559;
proxy_pass tokenserveroauth:5559;
}
}

View file

@ -0,0 +1,8 @@
client_max_body_size 0;
chunked_transfer_encoding on;
location /v2/ {
auth_basic "registry.localhost";
auth_basic_user_file test.passwd;
add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always;
include docker-registry-v2.conf;
}

View file

@ -0,0 +1,5 @@
client_max_body_size 0;
chunked_transfer_encoding on;
location /v2/ {
include docker-registry-v2.conf;
}

View file

@ -0,0 +1,260 @@
# Docker registry proxy for api version 2
upstream docker-registry-v2 {
server registryv2:5000;
}
# No client auth or TLS
server {
listen 5000;
server_name localhost;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
chunked_transfer_encoding on;
location /v2/ {
# Do not allow connections from docker 1.5 and earlier
# docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents
if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) {
return 404;
}
include docker-registry-v2.conf;
}
}
# No client auth or TLS (V2 Only)
server {
listen 5002;
server_name localhost;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
chunked_transfer_encoding on;
location / {
include docker-registry-v2.conf;
}
}
# TLS Configuration chart
# Username/Password: testuser/passpassword
# | ca | client | basic | notes
# 5440 | yes | no | no | Tests CA certificate
# 5441 | yes | no | yes | Tests basic auth over TLS
# 5442 | yes | yes | no | Tests client auth with client CA
# 5443 | yes | yes | no | Tests client auth without client CA
# 5444 | yes | yes | yes | Tests using basic auth + tls auth
# 5445 | no | no | no | Tests insecure using TLS
# 5446 | no | no | yes | Tests sending credentials to server with insecure TLS
# 5447 | no | yes | no | Tests client auth to insecure
# 5448 | yes | no | no | Bad SSL version
server {
listen 5440;
server_name localhost;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem;
include registry-noauth.conf;
}
server {
listen 5441;
server_name localhost;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem;
include registry-basic.conf;
}
server {
listen 5442;
listen 5443;
server_name localhost;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem;
ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem;
ssl_verify_client on;
include registry-noauth.conf;
}
server {
listen 5444;
server_name localhost;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem;
ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem;
ssl_verify_client on;
include registry-basic.conf;
}
server {
listen 5445;
server_name localhost;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem;
include registry-noauth.conf;
}
server {
listen 5446;
server_name localhost;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem;
include registry-basic.conf;
}
server {
listen 5447;
server_name localhost;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem;
ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem;
ssl_verify_client on;
include registry-noauth.conf;
}
server {
listen 5448;
server_name localhost;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem;
ssl_protocols SSLv3;
include registry-noauth.conf;
}
# Add configuration for localregistry server_name
# Requires configuring /etc/hosts to use
# Set /etc/hosts entry to external IP, not 127.0.0.1 for testing
# Docker secure/insecure registry features
server {
listen 5440;
server_name localregistry;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem;
include registry-noauth.conf;
}
server {
listen 5441;
server_name localregistry;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem;
include registry-basic.conf;
}
server {
listen 5442;
listen 5443;
server_name localregistry;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem;
ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem;
ssl_verify_client on;
include registry-noauth.conf;
}
server {
listen 5444;
server_name localregistry;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem;
ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem;
ssl_verify_client on;
include registry-basic.conf;
}
server {
listen 5445;
server_name localregistry;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem;
include registry-noauth.conf;
}
server {
listen 5446;
server_name localregistry;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem;
include registry-basic.conf;
}
server {
listen 5447;
server_name localregistry;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem;
ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem;
ssl_verify_client on;
include registry-noauth.conf;
}
server {
listen 5448;
server_name localregistry;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem;
ssl_protocols SSLv3;
include registry-noauth.conf;
}
# V1 search test
# Registry configured with token auth and no tls
# TLS termination done by nginx, search results
# served by nginx
upstream docker-registry-v2-oauth {
server registryv2tokenoauthnotls:5000;
}
server {
listen 5600;
server_name localregistry;
ssl on;
ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem;
ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem;
root /var/www/html;
client_max_body_size 0;
chunked_transfer_encoding on;
location /v2/ {
proxy_buffering off;
proxy_pass http://docker-registry-v2-oauth;
proxy_set_header Host $http_host; # required for docker client's sake
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 900;
}
location /v1/search {
if ($http_authorization !~ "Bearer [a-zA-Z0-9\._-]+") {
return 401;
}
try_files /v1/search.json =404;
add_header Content-Type application/json;
}
}

View file

@ -0,0 +1 @@
testuser:$apr1$YmLhHjm6$AjP4z8J1WgcUNxU8J4ue5.

View file

@ -0,0 +1 @@
{"num_pages":1,"num_results":2,"page":1,"page_size": 25,"query":"testsearch","results":[{"description":"","is_automated":false,"is_official":false,"is_trusted":false, "name":"dmcgowan/testsearch-1","star_count":1000},{"description":"Some automated build","is_automated":true,"is_official":false,"is_trusted":false,"name":"dmcgowan/testsearch-2","star_count":10}]}

View file

@ -0,0 +1,64 @@
#!/usr/bin/env bash
# Run the integration tests with multiple versions of the Docker engine
set -e
set -x
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
if [ "$TMPDIR" != "" ] && [ ! -d "$TMPDIR" ]; then
mkdir -p $TMPDIR
fi
cachedir=`mktemp -t -d golem-cache.XXXXXX`
trap "rm -rf $cachedir" EXIT
if [ "$1" == "-d" ]; then
# Drivers to use for Docker engines the tests are going to create.
STORAGE_DRIVER=${STORAGE_DRIVER:-overlay}
docker daemon --log-level=panic --storage-driver="$STORAGE_DRIVER" &
DOCKER_PID=$!
# Wait for it to become reachable.
tries=10
until docker version &> /dev/null; do
(( tries-- ))
if [ $tries -le 0 ]; then
echo >&2 "error: daemon failed to start"
exit 1
fi
sleep 1
done
trap "kill $DOCKER_PID" EXIT
fi
distimage=$(docker build -q $DIR/../..)
fullversion=$(git describe --match 'v[0-9]*' --dirty='.m' --always)
distversion=${fullversion:1}
echo "Testing image $distimage with distribution version $distversion"
# Pull needed images before invoking golem to get pull time
# These images are defined in golem.conf
time docker pull nginx:1.9
time docker pull golang:1.6
time docker pull registry:0.9.1
time docker pull dmcgowan/token-server:simple
time docker pull dmcgowan/token-server:oauth
time docker pull distribution/golem-runner:0.1-bats
time docker pull docker:1.9.1-dind
time docker pull docker:1.10.3-dind
time docker pull docker:1.11.1-dind
golem -cache $cachedir \
-i "golem-distribution:latest,$distimage,$distversion" \
-i "golem-dind:latest,docker:1.9.1-dind,1.9.1" \
-i "golem-dind:latest,docker:1.10.3-dind,1.10.3" \
-i "golem-dind:latest,docker:1.11.1-dind,1.11.1" \
$DIR

View file

@ -0,0 +1,109 @@
#!/usr/bin/env bats
# Registry host name, should be set to non-localhost address and match
# DNS name in nginx/ssl certificates and what is installed in /etc/docker/cert.d
load helpers
hostname="localregistry"
base="hello-world"
image="${base}:latest"
# Login information, should match values in nginx/test.passwd
user=${TEST_USER:-"testuser"}
password=${TEST_PASSWORD:-"passpassword"}
email="distribution@docker.com"
function setup() {
tempImage $image
}
@test "Test valid certificates" {
docker_t tag -f $image $hostname:5440/$image
run docker_t push $hostname:5440/$image
[ "$status" -eq 0 ]
has_digest "$output"
}
@test "Test basic auth" {
basic_auth_version_check
login $hostname:5441
docker_t tag -f $image $hostname:5441/$image
run docker_t push $hostname:5441/$image
[ "$status" -eq 0 ]
has_digest "$output"
}
@test "Test basic auth with build" {
basic_auth_version_check
login $hostname:5441
image1=$hostname:5441/$image-build
image2=$hostname:5441/$image-build-2
tempImage $image1
run docker_t push $image1
[ "$status" -eq 0 ]
has_digest "$output"
docker_t rmi $image1
run build $image2 $image1
echo $output
[ "$status" -eq 0 ]
run docker_t push $image2
echo $output
[ "$status" -eq 0 ]
has_digest "$output"
}
@test "Test TLS client auth" {
docker_t tag -f $image $hostname:5442/$image
run docker_t push $hostname:5442/$image
[ "$status" -eq 0 ]
has_digest "$output"
}
@test "Test TLS client with invalid certificate authority fails" {
docker_t tag -f $image $hostname:5443/$image
run docker_t push $hostname:5443/$image
[ "$status" -ne 0 ]
}
@test "Test basic auth with TLS client auth" {
basic_auth_version_check
login $hostname:5444
docker_t tag -f $image $hostname:5444/$image
run docker_t push $hostname:5444/$image
[ "$status" -eq 0 ]
has_digest "$output"
}
@test "Test unknown certificate authority fails" {
docker_t tag -f $image $hostname:5445/$image
run docker_t push $hostname:5445/$image
[ "$status" -ne 0 ]
}
@test "Test basic auth with unknown certificate authority fails" {
run login $hostname:5446
[ "$status" -ne 0 ]
docker_t tag -f $image $hostname:5446/$image
run docker_t push $hostname:5446/$image
[ "$status" -ne 0 ]
}
@test "Test TLS client auth to server with unknown certificate authority fails" {
docker_t tag -f $image $hostname:5447/$image
run docker_t push $hostname:5447/$image
[ "$status" -ne 0 ]
}
@test "Test failure to connect to server fails to fallback to SSLv3" {
docker_t tag -f $image $hostname:5448/$image
run docker_t push $hostname:5448/$image
[ "$status" -ne 0 ]
}

View file

@ -0,0 +1,135 @@
#!/usr/bin/env bats
# This tests contacting a registry using a token server
load helpers
user="testuser"
password="testpassword"
email="a@nowhere.com"
base="hello-world"
@test "Test token server login" {
run docker_t login -u $user -p $password -e $email localregistry:5554
echo $output
[ "$status" -eq 0 ]
# First line is WARNING about credential save or email deprecation
[ "${lines[2]}" = "Login Succeeded" -o "${lines[1]}" = "Login Succeeded" ]
}
@test "Test token server bad login" {
run docker_t login -u "testuser" -p "badpassword" -e $email localregistry:5554
[ "$status" -ne 0 ]
run docker_t login -u "baduser" -p "testpassword" -e $email localregistry:5554
[ "$status" -ne 0 ]
}
@test "Test push and pull with token auth" {
login localregistry:5555
image="localregistry:5555/testuser/token"
build $image "$base:latest"
run docker_t push $image
echo $output
[ "$status" -eq 0 ]
docker_t rmi $image
docker_t pull $image
}
@test "Test push and pull with token auth wrong namespace" {
login localregistry:5555
image="localregistry:5555/notuser/token"
build $image "$base:latest"
run docker_t push $image
[ "$status" -ne 0 ]
}
@test "Test oauth token server login" {
version_check docker "$GOLEM_DIND_VERSION" "1.11.0"
login_oauth localregistry:5557
}
@test "Test oauth token server bad login" {
version_check docker "$GOLEM_DIND_VERSION" "1.11.0"
run docker_t login -u "testuser" -p "badpassword" -e $email localregistry:5557
[ "$status" -ne 0 ]
run docker_t login -u "baduser" -p "testpassword" -e $email localregistry:5557
[ "$status" -ne 0 ]
}
@test "Test oauth push and pull with token auth" {
version_check docker "$GOLEM_DIND_VERSION" "1.11.0"
login_oauth localregistry:5558
image="localregistry:5558/testuser/token"
build $image "$base:latest"
run docker_t push $image
echo $output
[ "$status" -eq 0 ]
docker_t rmi $image
docker_t pull $image
}
@test "Test oauth push and build with token auth" {
version_check docker "$GOLEM_DIND_VERSION" "1.11.0"
login_oauth localregistry:5558
image="localregistry:5558/testuser/token-build"
tempImage $image
run docker_t push $image
echo $output
[ "$status" -eq 0 ]
has_digest "$output"
docker_t rmi $image
image2="localregistry:5558/testuser/token-build-2"
run build $image2 $image
echo $output
[ "$status" -eq 0 ]
run docker_t push $image2
echo $output
[ "$status" -eq 0 ]
has_digest "$output"
}
@test "Test oauth push and pull with token auth wrong namespace" {
version_check docker "$GOLEM_DIND_VERSION" "1.11.0"
login_oauth localregistry:5558
image="localregistry:5558/notuser/token"
build $image "$base:latest"
run docker_t push $image
[ "$status" -ne 0 ]
}
@test "Test oauth with v1 search" {
version_check docker "$GOLEM_DIND_VERSION" "1.12.0"
run docker_t search localregistry:5600/testsearch
[ "$status" -ne 0 ]
login_oauth localregistry:5600
run docker_t search localregistry:5600/testsearch
echo $output
[ "$status" -eq 0 ]
echo $output | grep "testsearch-1"
echo $output | grep "testsearch-2"
}

View file

@ -0,0 +1 @@
testuser:$2y$05$T2MlBvkN1R/yICNnLuf1leOlOfAY0DvybctbbWUFKlojfkShVgn4m

View file

@ -0,0 +1,8 @@
FROM dmcgowan/token-server:oauth
WORKDIR /
COPY ./.htpasswd /.htpasswd
COPY ./certs/auth.localregistry.cert /tls.cert
COPY ./certs/auth.localregistry.key /tls.key
COPY ./certs/signing.key /sign.key

View file

@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDHDCCAgagAwIBAgIRAKhhQMnqZx+hkOmoUYgPb+kwCwYJKoZIhvcNAQELMCYx
ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw
MDQyMzFaFw0xOTAxMTIwMDQyMzFaMDAxETAPBgNVBAoTCFF1aWNrVExTMRswGQYD
VQQDExJhdXRoLmxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
ggEKAoIBAQD1tUf1EghBlIRrE83yF4zDgRu7vH2Jo0kygKJUWtQQe+DfXyjjE/fg
FdKnnoEjsIeF9hxNbTt0ldDz7/n97pbMhoiXULi9iq4jlgSzVL2XEAgrON0YSY/c
Lmmd1KSa/pOUZr2WMAYPZ+FdQfE1W7SMNbErPefBqYdFzpZ+esAtvbajYwIjl8Vy
9c4bidx4vgnNrR9GcFYibjC5sj8syh/OtbzzqiVGT8YcPpmMG6KNRkausa4gqpon
NKYG8C3WDaiPCLYKcvFrFfdEWF/m2oj14eXACXT9iwp8r4bsLgXrZwqcpKOWfVRu
qHC8aV476EYgxWCAOANExUdUaRt5wL/jAgMBAAGjPzA9MA4GA1UdDwEB/wQEAwIA
oDAMBgNVHRMBAf8EAjAAMB0GA1UdEQQWMBSCEmF1dGgubG9jYWxyZWdpc3RyeTAL
BgkqhkiG9w0BAQsDggEBABxPGK9FdGDxcLowNsExKnnZvmQT3H0u+Dux1gkp0AhH
KOrmx3LUENUKLSgotzx133tgOgR5lzAWVFy7bhLwlPhOslxf2oEfztsAMd/tY8rW
PrG2ZqYqlzEQQ9INbAc3woo5A3slN07uhP3F16jNqoMM4zRmw6Ba70CluGKT7x5+
xVjKoWITLjWDXT5m35PnsN8CpBaFzXYcod/5p9XwCFp0s+aNxfpZECCV/3yqIr+J
ALzroPh43FAlG96o4NyYZ2Msp63newN19R2+TgpV4nXuw2mLVDpvetP7RRqnpvj/
qwRgt5j4hFjJWb61M0ELL7A9fA71h1ImdGCvnArdBQs=
-----END CERTIFICATE-----

View file

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA9bVH9RIIQZSEaxPN8heMw4Ebu7x9iaNJMoCiVFrUEHvg318o
4xP34BXSp56BI7CHhfYcTW07dJXQ8+/5/e6WzIaIl1C4vYquI5YEs1S9lxAIKzjd
GEmP3C5pndSkmv6TlGa9ljAGD2fhXUHxNVu0jDWxKz3nwamHRc6WfnrALb22o2MC
I5fFcvXOG4nceL4Jza0fRnBWIm4wubI/LMofzrW886olRk/GHD6ZjBuijUZGrrGu
IKqaJzSmBvAt1g2ojwi2CnLxaxX3RFhf5tqI9eHlwAl0/YsKfK+G7C4F62cKnKSj
ln1UbqhwvGleO+hGIMVggDgDRMVHVGkbecC/4wIDAQABAoIBAQCrsjXKRwOF8CZo
PLqZBWPT6hBbK+f9miC4LbNBhwbRTf9hl7mWlImOCTHe95/+NIk/Ty+P21jEqzwM
ehETJPoziX9BXaL6sEHnlBlMx1aEjStoKKA3LJBeqAAdzk4IEQVHmlO4824IreqJ
pF7Njnunzo0zTlr4tWJVoXsAfv5z9tNtdkxYBbIa0fjfGtlqXU3gLq58FCON3mB/
NGc0AyA1UFGp0FzpdEcwTGD4InsXbcmsl2l/VPBJuZbryITRqWs6BbK++80DRhNt
afMhP+IzKrWSCp0rBYrqqz6AevtlKdEfQK1yXPEjN/63QLMevt8mF/1JCp//TQnf
Z6bIQbAhAoGBAP7vFA0PcvoXt9MXvvAwrKY1s6pNw4nWPG27qY1/m+DkBwP8IQms
4AWGv1wscZzXJYTvaLO5/qjmGUj50ohcVEvyZJioh1pKXA8Chxvd6rBA/O/Lj5E0
3MOSA5Q0gxJ0Mhv0zGbbyN5fY8D8zhxoqQP4LoW+UdZG2Oi6JxsQ9c9dAoGBAPa8
U3bGuM5OGA9EWP7mkB/VnjDTL1aEIN3cOHbHIKwH/loxdYcNMBE7vwxV1CzgIzXT
wsL0iE15fQdK938u0+um8aH5QtbWNI8tdk1XVjEC/i3C7N6WVUutneCKUDb4QxiB
9OvWCbNNiN+xTKBBM93YlwO3GYfrW9Pmm9q1+hg/AoGBALJlUS22gun50PxaIJZq
KVcCO2DQnCYHki/j48mN4+HjD/m85M2lePrFCYIR48syTyIQer9SR5+frVAA6k/b
9G1VCQo+3MDVSkiCp1Nb3tBKGfYgB65ARMBinDiI6rPuNeaUTrkn0g+yxtaU0hLV
Nnj9omia/x+oYj+xjI4HN0xNAoGARy92dSJIV104m88ATip/EnAzP6ruUWu1f8z1
jW9OAdQckjEK03f+kjpGmGx61qekAPejjVO3r4KJi/0ZAtyjz61OsYiUvB748wYO
x6mW+HUAmHtQk7eTzE2+6vV8xx9BXGTCIPiTu+N2xfMFRIcLS8odZ7j/6LMCv1Qd
SzCNg0kCgYBaNlEs4pK1VxZZpEWwVmFpgIxfEfxLIaGrek6wBTcCn/VA2M0oHuez
mlMio8VY0yWPBJz30JflDiTmYIvteLPMHT0N0J6isiXLhzJSFI4+cAMLE2Q5v8rz
W+W5/L8YZeierW0qJat1BrgStaf5ZLpiOc9pKBSwycydPH5BfVdK/A==
-----END RSA PRIVATE KEY-----

View file

@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDETCCAfugAwIBAgIQN7rT95eAy75c4n6/AsDJODALBgkqhkiG9w0BAQswJjER
MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE2MDEyODAw
NDIzMloXDTE5MDExMjAwNDIzMlowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV
BAMTDWxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQDLi75QEkl/qekcoOJlNv9y1IXvrbU2ssl4ViJiZRjWx+/CkyCCOyf9YUpAgRLr
Pskqde2mwhuNP8yBlOBb17Sapz7N3+hJi5j9vLBAFcamPeF3PqxjFv7j5TKkRmSI
dFYQclREwMUd3qEH322KkqOnsEEfdmCgFqWORe+QR5AxzxQP3Pnd4OYH1yZCh0MQ
P2pJgrxxf2I5I/m1AUgoHV1cdBbCv9LGohJPpMtwPC0dJpgMFcnf6hT37At236AY
V437HiRruY7iPWkYFrSPWpwdslJ32MZvRN5RS163jZXjiZ7qWnQOiiDJfXe4evB/
yQLN4m0qVQxsMz7rkY7OsqaXAgMBAAGjOjA4MA4GA1UdDwEB/wQEAwIAoDAMBgNV
HRMBAf8EAjAAMBgGA1UdEQQRMA+CDWxvY2FscmVnaXN0cnkwCwYJKoZIhvcNAQEL
A4IBAQAyUb3EuMaOylBeV8+4KeBiE4lxykDOwLLSk3jXRsVVtfJpX3v8l5vwo/Jf
iG8tzzz+7uiskI96u3TsekUtVkUxujfKevMP+369K/59s7NRmwwlFMyB2fvL14B2
oweVjWvM/8fZl6irtFdbJFXXRm7paKso5cmfImxhojAwohgcd4XTVLE/7juYa582
AaBdRuIiyL71MU9qa1mC5+57AaSLPYaPKpahemgYYkV1Z403Kd6rXchxdQ8JIAL8
+0oYTSC+svnz1tUU/V5E5id9LQaTmDN5iIVFhNpqAaZmR45UI86woWvnkMb8Ants
4aknwTwY3300PuTqBdQufvOFDRN5
-----END CERTIFICATE-----

View file

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAy4u+UBJJf6npHKDiZTb/ctSF7621NrLJeFYiYmUY1sfvwpMg
gjsn/WFKQIES6z7JKnXtpsIbjT/MgZTgW9e0mqc+zd/oSYuY/bywQBXGpj3hdz6s
Yxb+4+UypEZkiHRWEHJURMDFHd6hB99tipKjp7BBH3ZgoBaljkXvkEeQMc8UD9z5
3eDmB9cmQodDED9qSYK8cX9iOSP5tQFIKB1dXHQWwr/SxqIST6TLcDwtHSaYDBXJ
3+oU9+wLdt+gGFeN+x4ka7mO4j1pGBa0j1qcHbJSd9jGb0TeUUtet42V44me6lp0
DoogyX13uHrwf8kCzeJtKlUMbDM+65GOzrKmlwIDAQABAoIBAF6vFMp+lz4RteSh
Wm8m1FGAVwWVUpStOlcGClynFpTi0L88XYT3K7UMStQSttBDlqRv0ysdZF+ia+lj
bbKLdvHyFp8CJzX/AB4YZgyJlKzEYFtuBhbaHZu5hIMyU5W+OELSTCznV0p7w4C8
CGLLr+FTdhfCo1QU9NJn6fa9s2/XRdSClBBalAHYs0ZS7ZckaF/sPiC/VapfBMet
qjJXNYiO6pXYriGWKF9zdAMfk2CM0BVWbnwQZkMSEQirrTcJwm3ezyloXCv2nywK
/VzbUT1HJVyzo5oAwTd0MwDc2oEMiFzlfO028zY4LDltpia+SyWvFi5NaIqzFESc
yLgJacECgYEA3jvH+ZQHQf42Md8TCciokaYvwWIKJdk4WRjbvE5cBZekyXAm7/3b
/1VFDKsy2RPlfmfHP3wy9rlnjzsRveB5qaclgS8aI67AYsWd/yRgfRatl7Ve9bHl
LY6VM5L/DZTxykcqivwjc77XoDuBfUKs6tyuSLQku+FOTbLtNYlUCHECgYEA6nkR
lkXufyLmDhNb3093RsYvPcs1kGaIIGTnz3cxWNh485DgsyLBuYQ5ugupQkzM8YSt
ohDTmVpggqjlXQxCg0Zw8gkEV0v8KsLGjn1CuTJg/mBArXlelq1FEeRAYC9/YfOz
ocXegHV7wDKKtcraNZFsEc7Z0LwbC9wtzSFG44cCgYASkMX1CLPOhJE8e1lY0OWc
PVjx++HDJbF6aAQ7aARyBygiF/d4xylw3EvHcinuTqY2eC8CE7siN3z6T0H9Ldqc
HLWaZDf30SqLVd0MKprQ+GsKKIHFXtY5hxbZ1ybtmIrWjjl0oPnJOqFC5pW7xC0z
9bmtozcKZxkmjpMYjN9zUQKBgQCqV6KLRerqunPgLfhE1/qTlE+l2QflDFhBEI3I
j5NuNHZKnSphehK7sHAv1WD2Jc2OeRGb+BWCB8Ktqf5YBxwbOwW7EQnyUeW1OyP9
SMs8uHj21P6oCNDLLr5LLUQHnPoyM1aBZLstICzziMR1JhY5bJjSpzBfEQmlKCSu
LkrN6QKBgQCRXrBJRUxeJj7wCnCSq0Clf9NhCpQnwo4bEx8sKlj8K8ku8MvwQwoM
3KfWc7bOl6A2/mM/k4yoHtBMM9X9xqYtsgeFhxuiWBcfTmTxWh73LQ48Kgbrgodt
6yTccnjr7OtBidD85c6lgjAUgcL43QY8mlw0OhzXAZ2R5HWFp4ht+w==
-----END RSA PRIVATE KEY-----

View file

@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC9TCCAd+gAwIBAgIRAJ6IIisIZxL86oe3oeoAgWUwCwYJKoZIhvcNAQELMCYx
ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw
MDQyMzNaFw0xOTAxMTIwMDQyMzNaMBMxETAPBgNVBAoTCFF1aWNrVExTMIIBIjAN
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVnt
QCQQWjkWVpOz8L2A29BRvv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40r
Xd/MNKAn0kFsSb6BIKmUwPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH3
9mAmV+x0kTzFR/78ZDD5CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+K
IY8FQ6yN6l27MK56wlj4hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTA
NwpsIBfdoUVbI+j2ivn+ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQAB
ozUwMzAOBgNVHQ8BAf8EBAMCAKAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0T
AQH/BAIwADALBgkqhkiG9w0BAQsDggEBAJq3JzTLrIWCF8rHLTTm1icE9PjOO0sV
a1wrmdJ6NwRbJ66dLZ/4G/NZjVOnce9WFHYLFSEG+wx5YVUPuJXpJaSdy0h8F0Uw
hiJwgeVsGg7vcf4G6mWHrsauDOhylnD31UtYPX1Ao/jcntyyf+gCQpY1J/B8l1yU
LNOwvWLVLpZwZ4ehbKA/UnDXgA+3uHvpzl//cPe0cnt+Mhrgzk5mIMwVR6zCZw1G
oVutAHpv2PXxRwTMu51J+QtSL2b2w3mGHxDLpmz8UdXOtkxdpmDT8kIOtX0T5yGL
29F3fa81iZPs02GWjSGOfOzmCCvaA4C5KJvY/WulF7OOgwvrBpQwqTI=
-----END CERTIFICATE-----

View file

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVntQCQQWjkWVpOz8L2A29BR
vv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40rXd/MNKAn0kFsSb6BIKmU
wPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH39mAmV+x0kTzFR/78ZDD5
CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+KIY8FQ6yN6l27MK56wlj4
hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTANwpsIBfdoUVbI+j2ivn+
ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQABAoIBAD2tiNZv6DImSXo+
sq0qQomEf/OBvWPFMnWppd/NK/TXa+UPHO4I0MjoDJqIEC6zCU+fC4d2St1MmlrT
/X85vPFRw8mGwGxfHeRSLxEVj04I5GDYTWy0JQUrJUk/cTKp2/Bwm/RaylTyFAM0
caYrSpvD69vjuTDFr7PDxM6iaqM53zK/vD8kCe81z+wN0UbAKsLlUOKztjH6SzL9
uVOkekIT/j3L2xxyQhjmhfA3TuCP4uNK/+6/4ovl9Nj4pQsFomsCk4phgqy9SOm1
4yufmVd8k7J3cppMlMPNc+7tqe2Xn593Y8QT95y3yhtkFECF70yBw64HMDDpA22p
5b/JV9ECgYEA9H4RBXOwbdjcpCa9H3mFjHqUQCqNme1vOSGiflZh9KBCDKgdqugm
KHpvAECADie0p6XRHpxRvufKnGFkJwedfeiKz51T+0dqgPxWncYT1TC+cAjOSzfM
wBpUOcAyvTTviwGbg4bLanHo4remzCbcnRvHQX4YfPFCjT9GhsU+XEUCgYEA5ubz
IlSu1wwFJpoO24ZykGUyqGUQXzR0NrXiLrpF0764qjmHyF8SPJPv1XegSxP/nUTz
SjVfJ7wye/X9qlOpBY8mzy9qQMMKc1cQBV1yVW8IRZ7pMYQZO7qmrZD/DWTa5qWt
pqSbIH2FKedELsKJA/SBtczKjspOdDKyh0UelsMCgYA7DyTfc0XAEy2hPXZb3wgC
mi2rnlvcPf2rCFPvPsCkzf2GfynDehaVmpWrsuj8Al1iTezI/yvD+Mv5oJEH2JAT
tROq+S8rOOIiTFJEBHAQBJlMCOSESPNdyD5mQOZAzEO9CWNejzYd/WwrL//Luut5
zBcC3AngTIsuAYXw0j6xHQKBgQDamkAJep7k3W5q82OplgoUhpqFLtlnKSP1QBFZ
J+U/6Mqv7jONEeUUEQL42H6bVd2kqUikMw9ZcSVikquLfBUDPFoDwOIZWg4k0IJM
cgHyvGHad+5SgLva/oUawbGWnqtXvfc/U4vCINPXrimxE1/grLW4xp/mu8W24OCA
jIG/PQKBgD/Apl+sfqiB/6ONBjjIswA4yFkEXHSZNpAgcPwhA+cO5D0afEWz2HIx
VeOh5NjN1EL0hX8clFW4bfkK1Vr0kjvbMUXnBWaibUgpiVQl9O9WjaKQLZrp4sRu
x2kJ07Qn6ri7f/lsqOELZwBy95iHWRdePptaAKkRGxJstHI7dgUt
-----END RSA PRIVATE KEY-----

View file

@ -0,0 +1,15 @@
version: 0.1
loglevel: debug
storage:
cache:
blobdescriptor: inmemory
filesystem:
rootdirectory: /tmp/registry-dev
http:
addr: 0.0.0.0:5000
auth:
token:
realm: "https://auth.localregistry:5559/token/"
issuer: "registry-test"
service: "registry-test"
rootcertbundle: "/etc/docker/registry/tokenbundle.pem"

View file

@ -0,0 +1,18 @@
version: 0.1
loglevel: debug
storage:
cache:
blobdescriptor: inmemory
filesystem:
rootdirectory: /tmp/registry-dev
http:
addr: 0.0.0.0:5000
tls:
certificate: "/etc/docker/registry/localregistry.cert"
key: "/etc/docker/registry/localregistry.key"
auth:
token:
realm: "https://auth.localregistry:5559/token/"
issuer: "registry-test"
service: "registry-test"
rootcertbundle: "/etc/docker/registry/tokenbundle.pem"

View file

@ -0,0 +1 @@
testuser:$2y$05$T2MlBvkN1R/yICNnLuf1leOlOfAY0DvybctbbWUFKlojfkShVgn4m

View file

@ -0,0 +1,8 @@
FROM dmcgowan/token-server:simple
WORKDIR /
COPY ./.htpasswd /.htpasswd
COPY ./certs/auth.localregistry.cert /tls.cert
COPY ./certs/auth.localregistry.key /tls.key
COPY ./certs/signing.key /sign.key

View file

@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDHDCCAgagAwIBAgIRAKhhQMnqZx+hkOmoUYgPb+kwCwYJKoZIhvcNAQELMCYx
ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw
MDQyMzFaFw0xOTAxMTIwMDQyMzFaMDAxETAPBgNVBAoTCFF1aWNrVExTMRswGQYD
VQQDExJhdXRoLmxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
ggEKAoIBAQD1tUf1EghBlIRrE83yF4zDgRu7vH2Jo0kygKJUWtQQe+DfXyjjE/fg
FdKnnoEjsIeF9hxNbTt0ldDz7/n97pbMhoiXULi9iq4jlgSzVL2XEAgrON0YSY/c
Lmmd1KSa/pOUZr2WMAYPZ+FdQfE1W7SMNbErPefBqYdFzpZ+esAtvbajYwIjl8Vy
9c4bidx4vgnNrR9GcFYibjC5sj8syh/OtbzzqiVGT8YcPpmMG6KNRkausa4gqpon
NKYG8C3WDaiPCLYKcvFrFfdEWF/m2oj14eXACXT9iwp8r4bsLgXrZwqcpKOWfVRu
qHC8aV476EYgxWCAOANExUdUaRt5wL/jAgMBAAGjPzA9MA4GA1UdDwEB/wQEAwIA
oDAMBgNVHRMBAf8EAjAAMB0GA1UdEQQWMBSCEmF1dGgubG9jYWxyZWdpc3RyeTAL
BgkqhkiG9w0BAQsDggEBABxPGK9FdGDxcLowNsExKnnZvmQT3H0u+Dux1gkp0AhH
KOrmx3LUENUKLSgotzx133tgOgR5lzAWVFy7bhLwlPhOslxf2oEfztsAMd/tY8rW
PrG2ZqYqlzEQQ9INbAc3woo5A3slN07uhP3F16jNqoMM4zRmw6Ba70CluGKT7x5+
xVjKoWITLjWDXT5m35PnsN8CpBaFzXYcod/5p9XwCFp0s+aNxfpZECCV/3yqIr+J
ALzroPh43FAlG96o4NyYZ2Msp63newN19R2+TgpV4nXuw2mLVDpvetP7RRqnpvj/
qwRgt5j4hFjJWb61M0ELL7A9fA71h1ImdGCvnArdBQs=
-----END CERTIFICATE-----

View file

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA9bVH9RIIQZSEaxPN8heMw4Ebu7x9iaNJMoCiVFrUEHvg318o
4xP34BXSp56BI7CHhfYcTW07dJXQ8+/5/e6WzIaIl1C4vYquI5YEs1S9lxAIKzjd
GEmP3C5pndSkmv6TlGa9ljAGD2fhXUHxNVu0jDWxKz3nwamHRc6WfnrALb22o2MC
I5fFcvXOG4nceL4Jza0fRnBWIm4wubI/LMofzrW886olRk/GHD6ZjBuijUZGrrGu
IKqaJzSmBvAt1g2ojwi2CnLxaxX3RFhf5tqI9eHlwAl0/YsKfK+G7C4F62cKnKSj
ln1UbqhwvGleO+hGIMVggDgDRMVHVGkbecC/4wIDAQABAoIBAQCrsjXKRwOF8CZo
PLqZBWPT6hBbK+f9miC4LbNBhwbRTf9hl7mWlImOCTHe95/+NIk/Ty+P21jEqzwM
ehETJPoziX9BXaL6sEHnlBlMx1aEjStoKKA3LJBeqAAdzk4IEQVHmlO4824IreqJ
pF7Njnunzo0zTlr4tWJVoXsAfv5z9tNtdkxYBbIa0fjfGtlqXU3gLq58FCON3mB/
NGc0AyA1UFGp0FzpdEcwTGD4InsXbcmsl2l/VPBJuZbryITRqWs6BbK++80DRhNt
afMhP+IzKrWSCp0rBYrqqz6AevtlKdEfQK1yXPEjN/63QLMevt8mF/1JCp//TQnf
Z6bIQbAhAoGBAP7vFA0PcvoXt9MXvvAwrKY1s6pNw4nWPG27qY1/m+DkBwP8IQms
4AWGv1wscZzXJYTvaLO5/qjmGUj50ohcVEvyZJioh1pKXA8Chxvd6rBA/O/Lj5E0
3MOSA5Q0gxJ0Mhv0zGbbyN5fY8D8zhxoqQP4LoW+UdZG2Oi6JxsQ9c9dAoGBAPa8
U3bGuM5OGA9EWP7mkB/VnjDTL1aEIN3cOHbHIKwH/loxdYcNMBE7vwxV1CzgIzXT
wsL0iE15fQdK938u0+um8aH5QtbWNI8tdk1XVjEC/i3C7N6WVUutneCKUDb4QxiB
9OvWCbNNiN+xTKBBM93YlwO3GYfrW9Pmm9q1+hg/AoGBALJlUS22gun50PxaIJZq
KVcCO2DQnCYHki/j48mN4+HjD/m85M2lePrFCYIR48syTyIQer9SR5+frVAA6k/b
9G1VCQo+3MDVSkiCp1Nb3tBKGfYgB65ARMBinDiI6rPuNeaUTrkn0g+yxtaU0hLV
Nnj9omia/x+oYj+xjI4HN0xNAoGARy92dSJIV104m88ATip/EnAzP6ruUWu1f8z1
jW9OAdQckjEK03f+kjpGmGx61qekAPejjVO3r4KJi/0ZAtyjz61OsYiUvB748wYO
x6mW+HUAmHtQk7eTzE2+6vV8xx9BXGTCIPiTu+N2xfMFRIcLS8odZ7j/6LMCv1Qd
SzCNg0kCgYBaNlEs4pK1VxZZpEWwVmFpgIxfEfxLIaGrek6wBTcCn/VA2M0oHuez
mlMio8VY0yWPBJz30JflDiTmYIvteLPMHT0N0J6isiXLhzJSFI4+cAMLE2Q5v8rz
W+W5/L8YZeierW0qJat1BrgStaf5ZLpiOc9pKBSwycydPH5BfVdK/A==
-----END RSA PRIVATE KEY-----

View file

@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDETCCAfugAwIBAgIQN7rT95eAy75c4n6/AsDJODALBgkqhkiG9w0BAQswJjER
MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE2MDEyODAw
NDIzMloXDTE5MDExMjAwNDIzMlowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV
BAMTDWxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQDLi75QEkl/qekcoOJlNv9y1IXvrbU2ssl4ViJiZRjWx+/CkyCCOyf9YUpAgRLr
Pskqde2mwhuNP8yBlOBb17Sapz7N3+hJi5j9vLBAFcamPeF3PqxjFv7j5TKkRmSI
dFYQclREwMUd3qEH322KkqOnsEEfdmCgFqWORe+QR5AxzxQP3Pnd4OYH1yZCh0MQ
P2pJgrxxf2I5I/m1AUgoHV1cdBbCv9LGohJPpMtwPC0dJpgMFcnf6hT37At236AY
V437HiRruY7iPWkYFrSPWpwdslJ32MZvRN5RS163jZXjiZ7qWnQOiiDJfXe4evB/
yQLN4m0qVQxsMz7rkY7OsqaXAgMBAAGjOjA4MA4GA1UdDwEB/wQEAwIAoDAMBgNV
HRMBAf8EAjAAMBgGA1UdEQQRMA+CDWxvY2FscmVnaXN0cnkwCwYJKoZIhvcNAQEL
A4IBAQAyUb3EuMaOylBeV8+4KeBiE4lxykDOwLLSk3jXRsVVtfJpX3v8l5vwo/Jf
iG8tzzz+7uiskI96u3TsekUtVkUxujfKevMP+369K/59s7NRmwwlFMyB2fvL14B2
oweVjWvM/8fZl6irtFdbJFXXRm7paKso5cmfImxhojAwohgcd4XTVLE/7juYa582
AaBdRuIiyL71MU9qa1mC5+57AaSLPYaPKpahemgYYkV1Z403Kd6rXchxdQ8JIAL8
+0oYTSC+svnz1tUU/V5E5id9LQaTmDN5iIVFhNpqAaZmR45UI86woWvnkMb8Ants
4aknwTwY3300PuTqBdQufvOFDRN5
-----END CERTIFICATE-----

View file

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAy4u+UBJJf6npHKDiZTb/ctSF7621NrLJeFYiYmUY1sfvwpMg
gjsn/WFKQIES6z7JKnXtpsIbjT/MgZTgW9e0mqc+zd/oSYuY/bywQBXGpj3hdz6s
Yxb+4+UypEZkiHRWEHJURMDFHd6hB99tipKjp7BBH3ZgoBaljkXvkEeQMc8UD9z5
3eDmB9cmQodDED9qSYK8cX9iOSP5tQFIKB1dXHQWwr/SxqIST6TLcDwtHSaYDBXJ
3+oU9+wLdt+gGFeN+x4ka7mO4j1pGBa0j1qcHbJSd9jGb0TeUUtet42V44me6lp0
DoogyX13uHrwf8kCzeJtKlUMbDM+65GOzrKmlwIDAQABAoIBAF6vFMp+lz4RteSh
Wm8m1FGAVwWVUpStOlcGClynFpTi0L88XYT3K7UMStQSttBDlqRv0ysdZF+ia+lj
bbKLdvHyFp8CJzX/AB4YZgyJlKzEYFtuBhbaHZu5hIMyU5W+OELSTCznV0p7w4C8
CGLLr+FTdhfCo1QU9NJn6fa9s2/XRdSClBBalAHYs0ZS7ZckaF/sPiC/VapfBMet
qjJXNYiO6pXYriGWKF9zdAMfk2CM0BVWbnwQZkMSEQirrTcJwm3ezyloXCv2nywK
/VzbUT1HJVyzo5oAwTd0MwDc2oEMiFzlfO028zY4LDltpia+SyWvFi5NaIqzFESc
yLgJacECgYEA3jvH+ZQHQf42Md8TCciokaYvwWIKJdk4WRjbvE5cBZekyXAm7/3b
/1VFDKsy2RPlfmfHP3wy9rlnjzsRveB5qaclgS8aI67AYsWd/yRgfRatl7Ve9bHl
LY6VM5L/DZTxykcqivwjc77XoDuBfUKs6tyuSLQku+FOTbLtNYlUCHECgYEA6nkR
lkXufyLmDhNb3093RsYvPcs1kGaIIGTnz3cxWNh485DgsyLBuYQ5ugupQkzM8YSt
ohDTmVpggqjlXQxCg0Zw8gkEV0v8KsLGjn1CuTJg/mBArXlelq1FEeRAYC9/YfOz
ocXegHV7wDKKtcraNZFsEc7Z0LwbC9wtzSFG44cCgYASkMX1CLPOhJE8e1lY0OWc
PVjx++HDJbF6aAQ7aARyBygiF/d4xylw3EvHcinuTqY2eC8CE7siN3z6T0H9Ldqc
HLWaZDf30SqLVd0MKprQ+GsKKIHFXtY5hxbZ1ybtmIrWjjl0oPnJOqFC5pW7xC0z
9bmtozcKZxkmjpMYjN9zUQKBgQCqV6KLRerqunPgLfhE1/qTlE+l2QflDFhBEI3I
j5NuNHZKnSphehK7sHAv1WD2Jc2OeRGb+BWCB8Ktqf5YBxwbOwW7EQnyUeW1OyP9
SMs8uHj21P6oCNDLLr5LLUQHnPoyM1aBZLstICzziMR1JhY5bJjSpzBfEQmlKCSu
LkrN6QKBgQCRXrBJRUxeJj7wCnCSq0Clf9NhCpQnwo4bEx8sKlj8K8ku8MvwQwoM
3KfWc7bOl6A2/mM/k4yoHtBMM9X9xqYtsgeFhxuiWBcfTmTxWh73LQ48Kgbrgodt
6yTccnjr7OtBidD85c6lgjAUgcL43QY8mlw0OhzXAZ2R5HWFp4ht+w==
-----END RSA PRIVATE KEY-----

View file

@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC9TCCAd+gAwIBAgIRAJ6IIisIZxL86oe3oeoAgWUwCwYJKoZIhvcNAQELMCYx
ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw
MDQyMzNaFw0xOTAxMTIwMDQyMzNaMBMxETAPBgNVBAoTCFF1aWNrVExTMIIBIjAN
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVnt
QCQQWjkWVpOz8L2A29BRvv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40r
Xd/MNKAn0kFsSb6BIKmUwPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH3
9mAmV+x0kTzFR/78ZDD5CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+K
IY8FQ6yN6l27MK56wlj4hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTA
NwpsIBfdoUVbI+j2ivn+ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQAB
ozUwMzAOBgNVHQ8BAf8EBAMCAKAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0T
AQH/BAIwADALBgkqhkiG9w0BAQsDggEBAJq3JzTLrIWCF8rHLTTm1icE9PjOO0sV
a1wrmdJ6NwRbJ66dLZ/4G/NZjVOnce9WFHYLFSEG+wx5YVUPuJXpJaSdy0h8F0Uw
hiJwgeVsGg7vcf4G6mWHrsauDOhylnD31UtYPX1Ao/jcntyyf+gCQpY1J/B8l1yU
LNOwvWLVLpZwZ4ehbKA/UnDXgA+3uHvpzl//cPe0cnt+Mhrgzk5mIMwVR6zCZw1G
oVutAHpv2PXxRwTMu51J+QtSL2b2w3mGHxDLpmz8UdXOtkxdpmDT8kIOtX0T5yGL
29F3fa81iZPs02GWjSGOfOzmCCvaA4C5KJvY/WulF7OOgwvrBpQwqTI=
-----END CERTIFICATE-----

View file

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVntQCQQWjkWVpOz8L2A29BR
vv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40rXd/MNKAn0kFsSb6BIKmU
wPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH39mAmV+x0kTzFR/78ZDD5
CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+KIY8FQ6yN6l27MK56wlj4
hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTANwpsIBfdoUVbI+j2ivn+
ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQABAoIBAD2tiNZv6DImSXo+
sq0qQomEf/OBvWPFMnWppd/NK/TXa+UPHO4I0MjoDJqIEC6zCU+fC4d2St1MmlrT
/X85vPFRw8mGwGxfHeRSLxEVj04I5GDYTWy0JQUrJUk/cTKp2/Bwm/RaylTyFAM0
caYrSpvD69vjuTDFr7PDxM6iaqM53zK/vD8kCe81z+wN0UbAKsLlUOKztjH6SzL9
uVOkekIT/j3L2xxyQhjmhfA3TuCP4uNK/+6/4ovl9Nj4pQsFomsCk4phgqy9SOm1
4yufmVd8k7J3cppMlMPNc+7tqe2Xn593Y8QT95y3yhtkFECF70yBw64HMDDpA22p
5b/JV9ECgYEA9H4RBXOwbdjcpCa9H3mFjHqUQCqNme1vOSGiflZh9KBCDKgdqugm
KHpvAECADie0p6XRHpxRvufKnGFkJwedfeiKz51T+0dqgPxWncYT1TC+cAjOSzfM
wBpUOcAyvTTviwGbg4bLanHo4remzCbcnRvHQX4YfPFCjT9GhsU+XEUCgYEA5ubz
IlSu1wwFJpoO24ZykGUyqGUQXzR0NrXiLrpF0764qjmHyF8SPJPv1XegSxP/nUTz
SjVfJ7wye/X9qlOpBY8mzy9qQMMKc1cQBV1yVW8IRZ7pMYQZO7qmrZD/DWTa5qWt
pqSbIH2FKedELsKJA/SBtczKjspOdDKyh0UelsMCgYA7DyTfc0XAEy2hPXZb3wgC
mi2rnlvcPf2rCFPvPsCkzf2GfynDehaVmpWrsuj8Al1iTezI/yvD+Mv5oJEH2JAT
tROq+S8rOOIiTFJEBHAQBJlMCOSESPNdyD5mQOZAzEO9CWNejzYd/WwrL//Luut5
zBcC3AngTIsuAYXw0j6xHQKBgQDamkAJep7k3W5q82OplgoUhpqFLtlnKSP1QBFZ
J+U/6Mqv7jONEeUUEQL42H6bVd2kqUikMw9ZcSVikquLfBUDPFoDwOIZWg4k0IJM
cgHyvGHad+5SgLva/oUawbGWnqtXvfc/U4vCINPXrimxE1/grLW4xp/mu8W24OCA
jIG/PQKBgD/Apl+sfqiB/6ONBjjIswA4yFkEXHSZNpAgcPwhA+cO5D0afEWz2HIx
VeOh5NjN1EL0hX8clFW4bfkK1Vr0kjvbMUXnBWaibUgpiVQl9O9WjaKQLZrp4sRu
x2kJ07Qn6ri7f/lsqOELZwBy95iHWRdePptaAKkRGxJstHI7dgUt
-----END RSA PRIVATE KEY-----

View file

@ -0,0 +1,18 @@
version: 0.1
loglevel: debug
storage:
cache:
blobdescriptor: inmemory
filesystem:
rootdirectory: /tmp/registry-dev
http:
addr: 0.0.0.0:5000
tls:
certificate: "/etc/docker/registry/localregistry.cert"
key: "/etc/docker/registry/localregistry.key"
auth:
token:
realm: "https://auth.localregistry:5556/token/"
issuer: "registry-test"
service: "registry-test"
rootcertbundle: "/etc/docker/registry/tokenbundle.pem"

View file

@ -0,0 +1,38 @@
package main
import (
"net/http"
"github.com/docker/distribution/registry/api/errcode"
)
var (
errGroup = "tokenserver"
// ErrorBadTokenOption is returned when a token parameter is invalid
ErrorBadTokenOption = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "BAD_TOKEN_OPTION",
Message: "bad token option",
Description: `This error may be returned when a request for a
token contains an option which is not valid`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorMissingRequiredField is returned when a required form field is missing
ErrorMissingRequiredField = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MISSING_REQUIRED_FIELD",
Message: "missing required field",
Description: `This error may be returned when a request for a
token does not contain a required form field`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorUnsupportedValue is returned when a form field has an unsupported value
ErrorUnsupportedValue = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "UNSUPPORTED_VALUE",
Message: "unsupported value",
Description: `This error may be returned when a request for a
token contains a form field with an unsupported value`,
HTTPStatusCode: http.StatusBadRequest,
})
)

View file

@ -0,0 +1,425 @@
package main
import (
"encoding/json"
"flag"
"math/rand"
"net/http"
"strconv"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/auth"
_ "github.com/docker/distribution/registry/auth/htpasswd"
"github.com/docker/libtrust"
"github.com/gorilla/mux"
)
var (
enforceRepoClass bool
)
func main() {
var (
issuer = &TokenIssuer{}
pkFile string
addr string
debug bool
err error
passwdFile string
realm string
cert string
certKey string
)
flag.StringVar(&issuer.Issuer, "issuer", "distribution-token-server", "Issuer string for token")
flag.StringVar(&pkFile, "key", "", "Private key file")
flag.StringVar(&addr, "addr", "localhost:8080", "Address to listen on")
flag.BoolVar(&debug, "debug", false, "Debug mode")
flag.StringVar(&passwdFile, "passwd", ".htpasswd", "Passwd file")
flag.StringVar(&realm, "realm", "", "Authentication realm")
flag.StringVar(&cert, "tlscert", "", "Certificate file for TLS")
flag.StringVar(&certKey, "tlskey", "", "Certificate key for TLS")
flag.BoolVar(&enforceRepoClass, "enforce-class", false, "Enforce policy for single repository class")
flag.Parse()
if debug {
logrus.SetLevel(logrus.DebugLevel)
}
if pkFile == "" {
issuer.SigningKey, err = libtrust.GenerateECP256PrivateKey()
if err != nil {
logrus.Fatalf("Error generating private key: %v", err)
}
logrus.Debugf("Using newly generated key with id %s", issuer.SigningKey.KeyID())
} else {
issuer.SigningKey, err = libtrust.LoadKeyFile(pkFile)
if err != nil {
logrus.Fatalf("Error loading key file %s: %v", pkFile, err)
}
logrus.Debugf("Loaded private key with id %s", issuer.SigningKey.KeyID())
}
if realm == "" {
logrus.Fatalf("Must provide realm")
}
ac, err := auth.GetAccessController("htpasswd", map[string]interface{}{
"realm": realm,
"path": passwdFile,
})
if err != nil {
logrus.Fatalf("Error initializing access controller: %v", err)
}
// TODO: Make configurable
issuer.Expiration = 15 * time.Minute
ctx := context.Background()
ts := &tokenServer{
issuer: issuer,
accessController: ac,
refreshCache: map[string]refreshToken{},
}
router := mux.NewRouter()
router.Path("/token/").Methods("GET").Handler(handlerWithContext(ctx, ts.getToken))
router.Path("/token/").Methods("POST").Handler(handlerWithContext(ctx, ts.postToken))
if cert == "" {
err = http.ListenAndServe(addr, router)
} else if certKey == "" {
logrus.Fatalf("Must provide certficate (-tlscert) and key (-tlskey)")
} else {
err = http.ListenAndServeTLS(addr, cert, certKey, router)
}
if err != nil {
logrus.Infof("Error serving: %v", err)
}
}
// handlerWithContext wraps the given context-aware handler by setting up the
// request context from a base context.
func handlerWithContext(ctx context.Context, handler func(context.Context, http.ResponseWriter, *http.Request)) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := context.WithRequest(ctx, r)
logger := context.GetRequestLogger(ctx)
ctx = context.WithLogger(ctx, logger)
handler(ctx, w, r)
})
}
func handleError(ctx context.Context, err error, w http.ResponseWriter) {
ctx, w = context.WithResponseWriter(ctx, w)
if serveErr := errcode.ServeJSON(w, err); serveErr != nil {
context.GetResponseLogger(ctx).Errorf("error sending error response: %v", serveErr)
return
}
context.GetResponseLogger(ctx).Info("application error")
}
var refreshCharacters = []rune("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
const refreshTokenLength = 15
func newRefreshToken() string {
s := make([]rune, refreshTokenLength)
for i := range s {
s[i] = refreshCharacters[rand.Intn(len(refreshCharacters))]
}
return string(s)
}
type refreshToken struct {
subject string
service string
}
type tokenServer struct {
issuer *TokenIssuer
accessController auth.AccessController
refreshCache map[string]refreshToken
}
type tokenResponse struct {
Token string `json:"access_token"`
RefreshToken string `json:"refresh_token,omitempty"`
ExpiresIn int `json:"expires_in,omitempty"`
}
var repositoryClassCache = map[string]string{}
func filterAccessList(ctx context.Context, scope string, requestedAccessList []auth.Access) []auth.Access {
if !strings.HasSuffix(scope, "/") {
scope = scope + "/"
}
grantedAccessList := make([]auth.Access, 0, len(requestedAccessList))
for _, access := range requestedAccessList {
if access.Type == "repository" {
if !strings.HasPrefix(access.Name, scope) {
context.GetLogger(ctx).Debugf("Resource scope not allowed: %s", access.Name)
continue
}
if enforceRepoClass {
if class, ok := repositoryClassCache[access.Name]; ok {
if class != access.Class {
context.GetLogger(ctx).Debugf("Different repository class: %q, previously %q", access.Class, class)
continue
}
} else if strings.EqualFold(access.Action, "push") {
repositoryClassCache[access.Name] = access.Class
}
}
} else if access.Type == "registry" {
if access.Name != "catalog" {
context.GetLogger(ctx).Debugf("Unknown registry resource: %s", access.Name)
continue
}
// TODO: Limit some actions to "admin" users
} else {
context.GetLogger(ctx).Debugf("Skipping unsupported resource type: %s", access.Type)
continue
}
grantedAccessList = append(grantedAccessList, access)
}
return grantedAccessList
}
type acctSubject struct{}
func (acctSubject) String() string { return "acctSubject" }
type requestedAccess struct{}
func (requestedAccess) String() string { return "requestedAccess" }
type grantedAccess struct{}
func (grantedAccess) String() string { return "grantedAccess" }
// getToken handles authenticating the request and authorizing access to the
// requested scopes.
func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *http.Request) {
context.GetLogger(ctx).Info("getToken")
params := r.URL.Query()
service := params.Get("service")
scopeSpecifiers := params["scope"]
var offline bool
if offlineStr := params.Get("offline_token"); offlineStr != "" {
var err error
offline, err = strconv.ParseBool(offlineStr)
if err != nil {
handleError(ctx, ErrorBadTokenOption.WithDetail(err), w)
return
}
}
requestedAccessList := ResolveScopeSpecifiers(ctx, scopeSpecifiers)
authorizedCtx, err := ts.accessController.Authorized(ctx, requestedAccessList...)
if err != nil {
challenge, ok := err.(auth.Challenge)
if !ok {
handleError(ctx, err, w)
return
}
// Get response context.
ctx, w = context.WithResponseWriter(ctx, w)
challenge.SetHeaders(w)
handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail(challenge.Error()), w)
context.GetResponseLogger(ctx).Info("get token authentication challenge")
return
}
ctx = authorizedCtx
username := context.GetStringValue(ctx, "auth.user.name")
ctx = context.WithValue(ctx, acctSubject{}, username)
ctx = context.WithLogger(ctx, context.GetLogger(ctx, acctSubject{}))
context.GetLogger(ctx).Info("authenticated client")
ctx = context.WithValue(ctx, requestedAccess{}, requestedAccessList)
ctx = context.WithLogger(ctx, context.GetLogger(ctx, requestedAccess{}))
grantedAccessList := filterAccessList(ctx, username, requestedAccessList)
ctx = context.WithValue(ctx, grantedAccess{}, grantedAccessList)
ctx = context.WithLogger(ctx, context.GetLogger(ctx, grantedAccess{}))
token, err := ts.issuer.CreateJWT(username, service, grantedAccessList)
if err != nil {
handleError(ctx, err, w)
return
}
context.GetLogger(ctx).Info("authorized client")
response := tokenResponse{
Token: token,
ExpiresIn: int(ts.issuer.Expiration.Seconds()),
}
if offline {
response.RefreshToken = newRefreshToken()
ts.refreshCache[response.RefreshToken] = refreshToken{
subject: username,
service: service,
}
}
ctx, w = context.WithResponseWriter(ctx, w)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
context.GetResponseLogger(ctx).Info("get token complete")
}
type postTokenResponse struct {
Token string `json:"access_token"`
Scope string `json:"scope,omitempty"`
ExpiresIn int `json:"expires_in,omitempty"`
IssuedAt string `json:"issued_at,omitempty"`
RefreshToken string `json:"refresh_token,omitempty"`
}
// postToken handles authenticating the request and authorizing access to the
// requested scopes.
func (ts *tokenServer) postToken(ctx context.Context, w http.ResponseWriter, r *http.Request) {
grantType := r.PostFormValue("grant_type")
if grantType == "" {
handleError(ctx, ErrorMissingRequiredField.WithDetail("missing grant_type value"), w)
return
}
service := r.PostFormValue("service")
if service == "" {
handleError(ctx, ErrorMissingRequiredField.WithDetail("missing service value"), w)
return
}
clientID := r.PostFormValue("client_id")
if clientID == "" {
handleError(ctx, ErrorMissingRequiredField.WithDetail("missing client_id value"), w)
return
}
var offline bool
switch r.PostFormValue("access_type") {
case "", "online":
case "offline":
offline = true
default:
handleError(ctx, ErrorUnsupportedValue.WithDetail("unknown access_type value"), w)
return
}
requestedAccessList := ResolveScopeList(ctx, r.PostFormValue("scope"))
var subject string
var rToken string
switch grantType {
case "refresh_token":
rToken = r.PostFormValue("refresh_token")
if rToken == "" {
handleError(ctx, ErrorUnsupportedValue.WithDetail("missing refresh_token value"), w)
return
}
rt, ok := ts.refreshCache[rToken]
if !ok || rt.service != service {
handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail("invalid refresh token"), w)
return
}
subject = rt.subject
case "password":
ca, ok := ts.accessController.(auth.CredentialAuthenticator)
if !ok {
handleError(ctx, ErrorUnsupportedValue.WithDetail("password grant type not supported"), w)
return
}
subject = r.PostFormValue("username")
if subject == "" {
handleError(ctx, ErrorUnsupportedValue.WithDetail("missing username value"), w)
return
}
password := r.PostFormValue("password")
if password == "" {
handleError(ctx, ErrorUnsupportedValue.WithDetail("missing password value"), w)
return
}
if err := ca.AuthenticateUser(subject, password); err != nil {
handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail("invalid credentials"), w)
return
}
default:
handleError(ctx, ErrorUnsupportedValue.WithDetail("unknown grant_type value"), w)
return
}
ctx = context.WithValue(ctx, acctSubject{}, subject)
ctx = context.WithLogger(ctx, context.GetLogger(ctx, acctSubject{}))
context.GetLogger(ctx).Info("authenticated client")
ctx = context.WithValue(ctx, requestedAccess{}, requestedAccessList)
ctx = context.WithLogger(ctx, context.GetLogger(ctx, requestedAccess{}))
grantedAccessList := filterAccessList(ctx, subject, requestedAccessList)
ctx = context.WithValue(ctx, grantedAccess{}, grantedAccessList)
ctx = context.WithLogger(ctx, context.GetLogger(ctx, grantedAccess{}))
token, err := ts.issuer.CreateJWT(subject, service, grantedAccessList)
if err != nil {
handleError(ctx, err, w)
return
}
context.GetLogger(ctx).Info("authorized client")
response := postTokenResponse{
Token: token,
ExpiresIn: int(ts.issuer.Expiration.Seconds()),
IssuedAt: time.Now().UTC().Format(time.RFC3339),
Scope: ToScopeList(grantedAccessList),
}
if offline {
rToken = newRefreshToken()
ts.refreshCache[rToken] = refreshToken{
subject: subject,
service: service,
}
}
if rToken != "" {
response.RefreshToken = rToken
}
ctx, w = context.WithResponseWriter(ctx, w)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
context.GetResponseLogger(ctx).Info("post token complete")
}

View file

@ -0,0 +1,219 @@
package main
import (
"crypto"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"regexp"
"strings"
"time"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/auth"
"github.com/docker/distribution/registry/auth/token"
"github.com/docker/libtrust"
)
// ResolveScopeSpecifiers converts a list of scope specifiers from a token
// request's `scope` query parameters into a list of standard access objects.
func ResolveScopeSpecifiers(ctx context.Context, scopeSpecs []string) []auth.Access {
requestedAccessSet := make(map[auth.Access]struct{}, 2*len(scopeSpecs))
for _, scopeSpecifier := range scopeSpecs {
// There should be 3 parts, separated by a `:` character.
parts := strings.SplitN(scopeSpecifier, ":", 3)
if len(parts) != 3 {
context.GetLogger(ctx).Infof("ignoring unsupported scope format %s", scopeSpecifier)
continue
}
resourceType, resourceName, actions := parts[0], parts[1], parts[2]
resourceType, resourceClass := splitResourceClass(resourceType)
if resourceType == "" {
continue
}
// Actions should be a comma-separated list of actions.
for _, action := range strings.Split(actions, ",") {
requestedAccess := auth.Access{
Resource: auth.Resource{
Type: resourceType,
Class: resourceClass,
Name: resourceName,
},
Action: action,
}
// Add this access to the requested access set.
requestedAccessSet[requestedAccess] = struct{}{}
}
}
requestedAccessList := make([]auth.Access, 0, len(requestedAccessSet))
for requestedAccess := range requestedAccessSet {
requestedAccessList = append(requestedAccessList, requestedAccess)
}
return requestedAccessList
}
var typeRegexp = regexp.MustCompile(`^([a-z0-9]+)(\([a-z0-9]+\))?$`)
func splitResourceClass(t string) (string, string) {
matches := typeRegexp.FindStringSubmatch(t)
if len(matches) < 2 {
return "", ""
}
if len(matches) == 2 || len(matches[2]) < 2 {
return matches[1], ""
}
return matches[1], matches[2][1 : len(matches[2])-1]
}
// ResolveScopeList converts a scope list from a token request's
// `scope` parameter into a list of standard access objects.
func ResolveScopeList(ctx context.Context, scopeList string) []auth.Access {
scopes := strings.Split(scopeList, " ")
return ResolveScopeSpecifiers(ctx, scopes)
}
func scopeString(a auth.Access) string {
if a.Class != "" {
return fmt.Sprintf("%s(%s):%s:%s", a.Type, a.Class, a.Name, a.Action)
}
return fmt.Sprintf("%s:%s:%s", a.Type, a.Name, a.Action)
}
// ToScopeList converts a list of access to a
// scope list string
func ToScopeList(access []auth.Access) string {
var s []string
for _, a := range access {
s = append(s, scopeString(a))
}
return strings.Join(s, ",")
}
// TokenIssuer represents an issuer capable of generating JWT tokens
type TokenIssuer struct {
Issuer string
SigningKey libtrust.PrivateKey
Expiration time.Duration
}
// CreateJWT creates and signs a JSON Web Token for the given subject and
// audience with the granted access.
func (issuer *TokenIssuer) CreateJWT(subject string, audience string, grantedAccessList []auth.Access) (string, error) {
// Make a set of access entries to put in the token's claimset.
resourceActionSets := make(map[auth.Resource]map[string]struct{}, len(grantedAccessList))
for _, access := range grantedAccessList {
actionSet, exists := resourceActionSets[access.Resource]
if !exists {
actionSet = map[string]struct{}{}
resourceActionSets[access.Resource] = actionSet
}
actionSet[access.Action] = struct{}{}
}
accessEntries := make([]*token.ResourceActions, 0, len(resourceActionSets))
for resource, actionSet := range resourceActionSets {
actions := make([]string, 0, len(actionSet))
for action := range actionSet {
actions = append(actions, action)
}
accessEntries = append(accessEntries, &token.ResourceActions{
Type: resource.Type,
Class: resource.Class,
Name: resource.Name,
Actions: actions,
})
}
randomBytes := make([]byte, 15)
_, err := io.ReadFull(rand.Reader, randomBytes)
if err != nil {
return "", err
}
randomID := base64.URLEncoding.EncodeToString(randomBytes)
now := time.Now()
signingHash := crypto.SHA256
var alg string
switch issuer.SigningKey.KeyType() {
case "RSA":
alg = "RS256"
case "EC":
alg = "ES256"
default:
panic(fmt.Errorf("unsupported signing key type %q", issuer.SigningKey.KeyType()))
}
joseHeader := token.Header{
Type: "JWT",
SigningAlg: alg,
}
if x5c := issuer.SigningKey.GetExtendedField("x5c"); x5c != nil {
joseHeader.X5c = x5c.([]string)
} else {
var jwkMessage json.RawMessage
jwkMessage, err = issuer.SigningKey.PublicKey().MarshalJSON()
if err != nil {
return "", err
}
joseHeader.RawJWK = &jwkMessage
}
exp := issuer.Expiration
if exp == 0 {
exp = 5 * time.Minute
}
claimSet := token.ClaimSet{
Issuer: issuer.Issuer,
Subject: subject,
Audience: audience,
Expiration: now.Add(exp).Unix(),
NotBefore: now.Unix(),
IssuedAt: now.Unix(),
JWTID: randomID,
Access: accessEntries,
}
var (
joseHeaderBytes []byte
claimSetBytes []byte
)
if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil {
return "", fmt.Errorf("unable to encode jose header: %s", err)
}
if claimSetBytes, err = json.Marshal(claimSet); err != nil {
return "", fmt.Errorf("unable to encode claim set: %s", err)
}
encodedJoseHeader := joseBase64Encode(joseHeaderBytes)
encodedClaimSet := joseBase64Encode(claimSetBytes)
encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet)
var signatureBytes []byte
if signatureBytes, _, err = issuer.SigningKey.Sign(strings.NewReader(encodingToSign), signingHash); err != nil {
return "", fmt.Errorf("unable to sign jwt payload: %s", err)
}
signature := joseBase64Encode(signatureBytes)
return fmt.Sprintf("%s.%s", encodingToSign, signature), nil
}
func joseBase64Encode(data []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(data), "=")
}

16
vendor/github.com/docker/distribution/docs/README.md generated vendored Normal file
View file

@ -0,0 +1,16 @@
# The docs have been moved!
The documentation for Registry has been merged into
[the general documentation repo](https://github.com/docker/docker.github.io).
Commit history has been preserved.
The docs for Registry are now here:
https://github.com/docker/docker.github.io/tree/master/registry
> Note: The definitive [./spec directory](spec/) directory and
[configuration.md](configuration.md) file will be maintained in this repository
and be refreshed periodically in
[the general documentation repo](https://github.com/docker/docker.github.io).
As always, the docs in the general repo remain open-source and we appreciate
your feedback and pull requests!

View file

@ -0,0 +1,52 @@
---
published: false
---
# Architecture
## Design
**TODO(stevvooe):** Discuss the architecture of the registry, internally and externally, in a few different deployment scenarios.
### Eventual Consistency
> **NOTE:** This section belongs somewhere, perhaps in a design document. We
> are leaving this here so the information is not lost.
Running the registry on eventually consistent backends has been part of the
design from the beginning. This section covers some of the approaches to
dealing with this reality.
There are a few classes of issues that we need to worry about when
implementing something on top of the storage drivers:
1. Read-After-Write consistency (see this [article on
s3](http://shlomoswidler.com/2009/12/read-after-write-consistency-in-amazon.html)).
2. [Write-Write Conflicts](http://en.wikipedia.org/wiki/Write%E2%80%93write_conflict).
In reality, the registry must worry about these kinds of errors when doing the
following:
1. Accepting data into a temporary upload file may not have latest data block
yet (read-after-write).
2. Moving uploaded data into its blob location (write-write race).
3. Modifying the "current" manifest for given tag (write-write race).
4. A whole slew of operations around deletes (read-after-write, delete-write
races, garbage collection, etc.).
The backend path layout employs a few techniques to avoid these problems:
1. Large writes are done to private upload directories. This alleviates most
of the corruption potential under multiple writers by avoiding multiple
writers.
2. Constraints in storage driver implementations, such as support for writing
after the end of a file to extend it.
3. Digest verification to avoid data corruption.
4. Manifest files are stored by digest and cannot change.
5. All other non-content files (links, hashes, etc.) are written as an atomic
unit. Anything that requires additions and deletions is broken out into
separate "files". Last writer still wins.
Unfortunately, one must play this game when trying to build something like
this on top of eventually consistent storage systems. If we run into serious
problems, we can wrap the storagedrivers in a shared consistency layer but
that would increase complexity and hinder registry cluster performance.

File diff suppressed because it is too large Load diff

5485
vendor/github.com/docker/distribution/docs/spec/api.md generated vendored Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,12 @@
---
title: "Docker Registry Token Authentication"
description: "Docker Registry v2 authentication schema"
keywords: ["registry, on-prem, images, tags, repository, distribution, authentication, advanced"]
---
# Docker Registry v2 authentication
See the [Token Authentication Specification](token.md),
[Token Authentication Implementation](jwt.md),
[Token Scope Documentation](scope.md),
[OAuth2 Token Authentication](oauth.md) for more information.

View file

@ -0,0 +1,329 @@
---
title: "Token Authentication Implementation"
description: "Describe the reference implementation of the Docker Registry v2 authentication schema"
keywords: ["registry, on-prem, images, tags, repository, distribution, JWT authentication, advanced"]
---
# Docker Registry v2 Bearer token specification
This specification covers the `docker/distribution` implementation of the
v2 Registry's authentication schema. Specifically, it describes the JSON
Web Token schema that `docker/distribution` has adopted to implement the
client-opaque Bearer token issued by an authentication service and
understood by the registry.
This document borrows heavily from the [JSON Web Token Draft Spec](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32)
## Getting a Bearer Token
For this example, the client makes an HTTP GET request to the following URL:
```
https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push
```
The token server should first attempt to authenticate the client using any
authentication credentials provided with the request. As of Docker 1.8, the
registry client in the Docker Engine only supports Basic Authentication to
these token servers. If an attempt to authenticate to the token server fails,
the token server should return a `401 Unauthorized` response indicating that
the provided credentials are invalid.
Whether the token server requires authentication is up to the policy of that
access control provider. Some requests may require authentication to determine
access (such as pushing or pulling a private repository) while others may not
(such as pulling from a public repository).
After authenticating the client (which may simply be an anonymous client if
no attempt was made to authenticate), the token server must next query its
access control list to determine whether the client has the requested scope. In
this example request, if I have authenticated as user `jlhawn`, the token
server will determine what access I have to the repository `samalba/my-app`
hosted by the entity `registry.docker.io`.
Once the token server has determined what access the client has to the
resources requested in the `scope` parameter, it will take the intersection of
the set of requested actions on each resource and the set of actions that the
client has in fact been granted. If the client only has a subset of the
requested access **it must not be considered an error** as it is not the
responsibility of the token server to indicate authorization errors as part of
this workflow.
Continuing with the example request, the token server will find that the
client's set of granted access to the repository is `[pull, push]` which when
intersected with the requested access `[pull, push]` yields an equal set. If
the granted access set was found only to be `[pull]` then the intersected set
would only be `[pull]`. If the client has no access to the repository then the
intersected set would be empty, `[]`.
It is this intersected set of access which is placed in the returned token.
The server will now construct a JSON Web Token to sign and return. A JSON Web
Token has 3 main parts:
1. Headers
The header of a JSON Web Token is a standard JOSE header. The "typ" field
will be "JWT" and it will also contain the "alg" which identifies the
signing algorithm used to produce the signature. It also must have a "kid"
field, representing the ID of the key which was used to sign the token.
The "kid" field has to be in a libtrust fingerprint compatible format.
Such a format can be generated by following steps:
1. Take the DER encoded public key which the JWT token was signed against.
2. Create a SHA256 hash out of it and truncate to 240bits.
3. Split the result into 12 base32 encoded groups with `:` as delimiter.
Here is an example JOSE Header for a JSON Web Token (formatted with
whitespace for readability):
```
{
"typ": "JWT",
"alg": "ES256",
"kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6"
}
```
It specifies that this object is going to be a JSON Web token signed using
the key with the given ID using the Elliptic Curve signature algorithm
using a SHA256 hash.
2. Claim Set
The Claim Set is a JSON struct containing these standard registered claim
name fields:
<dl>
<dt>
<code>iss</code> (Issuer)
</dt>
<dd>
The issuer of the token, typically the fqdn of the authorization
server.
</dd>
<dt>
<code>sub</code> (Subject)
</dt>
<dd>
The subject of the token; the name or id of the client which
requested it. This should be empty (`""`) if the client did not
authenticate.
</dd>
<dt>
<code>aud</code> (Audience)
</dt>
<dd>
The intended audience of the token; the name or id of the service
which will verify the token to authorize the client/subject.
</dd>
<dt>
<code>exp</code> (Expiration)
</dt>
<dd>
The token should only be considered valid up to this specified date
and time.
</dd>
<dt>
<code>nbf</code> (Not Before)
</dt>
<dd>
The token should not be considered valid before this specified date
and time.
</dd>
<dt>
<code>iat</code> (Issued At)
</dt>
<dd>
Specifies the date and time which the Authorization server
generated this token.
</dd>
<dt>
<code>jti</code> (JWT ID)
</dt>
<dd>
A unique identifier for this token. Can be used by the intended
audience to prevent replays of the token.
</dd>
</dl>
The Claim Set will also contain a private claim name unique to this
authorization server specification:
<dl>
<dt>
<code>access</code>
</dt>
<dd>
An array of access entry objects with the following fields:
<dl>
<dt>
<code>type</code>
</dt>
<dd>
The type of resource hosted by the service.
</dd>
<dt>
<code>name</code>
</dt>
<dd>
The name of the resource of the given type hosted by the
service.
</dd>
<dt>
<code>actions</code>
</dt>
<dd>
An array of strings which give the actions authorized on
this resource.
</dd>
</dl>
</dd>
</dl>
Here is an example of such a JWT Claim Set (formatted with whitespace for
readability):
```
{
"iss": "auth.docker.com",
"sub": "jlhawn",
"aud": "registry.docker.com",
"exp": 1415387315,
"nbf": 1415387015,
"iat": 1415387015,
"jti": "tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws",
"access": [
{
"type": "repository",
"name": "samalba/my-app",
"actions": [
"pull",
"push"
]
}
]
}
```
3. Signature
The authorization server will produce a JOSE header and Claim Set with no
extraneous whitespace, i.e., the JOSE Header from above would be
```
{"typ":"JWT","alg":"ES256","kid":"PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6"}
```
and the Claim Set from above would be
```
{"iss":"auth.docker.com","sub":"jlhawn","aud":"registry.docker.com","exp":1415387315,"nbf":1415387015,"iat":1415387015,"jti":"tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws","access":[{"type":"repository","name":"samalba/my-app","actions":["push","pull"]}]}
```
The utf-8 representation of this JOSE header and Claim Set are then
url-safe base64 encoded (sans trailing '=' buffer), producing:
```
eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0
```
for the JOSE Header and
```
eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0
```
for the Claim Set. These two are concatenated using a '.' character,
yielding the string:
```
eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0
```
This is then used as the payload to a the `ES256` signature algorithm
specified in the JOSE header and specified fully in [Section 3.4 of the JSON Web Algorithms (JWA)
draft specification](https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-38#section-3.4)
This example signature will use the following ECDSA key for the server:
```
{
"kty": "EC",
"crv": "P-256",
"kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6",
"d": "R7OnbfMaD5J2jl7GeE8ESo7CnHSBm_1N2k9IXYFrKJA",
"x": "m7zUpx3b-zmVE5cymSs64POG9QcyEpJaYCD82-549_Q",
"y": "dU3biz8sZ_8GPB-odm8Wxz3lNDr1xcAQQPQaOcr1fmc"
}
```
A resulting signature of the above payload using this key is:
```
QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w
```
Concatenating all of these together with a `.` character gives the
resulting JWT:
```
eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w
```
This can now be placed in an HTTP response and returned to the client to use to
authenticate to the audience service:
```
HTTP/1.1 200 OK
Content-Type: application/json
{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w"}
```
## Using the signed token
Once the client has a token, it will try the registry request again with the
token placed in the HTTP `Authorization` header like so:
```
Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw
```
This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1)
## Verifying the token
The registry must now verify the token presented by the user by inspecting the
claim set within. The registry will:
- Ensure that the issuer (`iss` claim) is an authority it trusts.
- Ensure that the registry identifies as the audience (`aud` claim).
- Check that the current time is between the `nbf` and `exp` claim times.
- If enforcing single-use tokens, check that the JWT ID (`jti` claim) value has
not been seen before.
- To enforce this, the registry may keep a record of `jti`s it has seen for
up to the `exp` time of the token to prevent token replays.
- Check the `access` claim value and use the identified resources and the list
of actions authorized to determine whether the token grants the required
level of access for the operation the client is attempting to perform.
- Verify that the signature of the token is valid.
If any of these requirements are not met, the registry will return a
`403 Forbidden` response to indicate that the token is invalid.
**Note**: it is only at this point in the workflow that an authorization error
may occur. The token server should *not* return errors when the user does not
have the requested authorization. Instead, the returned token should indicate
whatever of the requested scope the client does have (the intersection of
requested and granted access). If the token does not supply proper
authorization then the registry will return the appropriate error.
At no point in this process should the registry need to call back to the
authorization server. The registry only needs to be supplied with the trusted
public keys to verify the token signatures.

View file

@ -0,0 +1,190 @@
---
title: "Oauth2 Token Authentication"
description: "Specifies the Docker Registry v2 authentication"
keywords: ["registry, on-prem, images, tags, repository, distribution, oauth2, advanced"]
---
# Docker Registry v2 authentication using OAuth2
This document describes support for the OAuth2 protocol within the authorization
server. [RFC6749](https://tools.ietf.org/html/rfc6749) should be used as a
reference for the protocol and HTTP endpoints described here.
**Note**: Not all token servers implement oauth2. If the request to the endpoint
returns `404` using the HTTP `POST` method, refer to
[Token Documentation](token.md) for using the HTTP `GET` method supported by all
token servers.
## Refresh token format
The format of the refresh token is completely opaque to the client and should be
determined by the authorization server. The authorization should ensure the
token is sufficiently long and is responsible for storing any information about
long-lived tokens which may be needed for revoking. Any information stored
inside the token will not be extracted and presented by clients.
## Getting a token
POST /token
#### Headers
Content-Type: application/x-www-form-urlencoded
#### Post parameters
<dl>
<dt>
<code>grant_type</code>
</dt>
<dd>
(REQUIRED) Type of grant used to get token. When getting a refresh token
using credentials this type should be set to "password" and have the
accompanying username and password paramters. Type "authorization_code"
is reserved for future use for authenticating to an authorization server
without having to send credentials directly from the client. When
requesting an access token with a refresh token this should be set to
"refresh_token".
</dd>
<dt>
<code>service</code>
</dt>
<dd>
(REQUIRED) The name of the service which hosts the resource to get
access for. Refresh tokens will only be good for getting tokens for
this service.
</dd>
<dt>
<code>client_id</code>
</dt>
<dd>
(REQUIRED) String identifying the client. This client_id does not need
to be registered with the authorization server but should be set to a
meaningful value in order to allow auditing keys created by unregistered
clients. Accepted syntax is defined in
[RFC6749 Appendix A.1](https://tools.ietf.org/html/rfc6749#appendix-A.1)
</dd>
<dt>
<code>access_type</code>
</dt>
<dd>
(OPTIONAL) Access which is being requested. If "offline" is provided
then a refresh token will be returned. The default is "online" only
returning short lived access token. If the grant type is "refresh_token"
this will only return the same refresh token and not a new one.
</dd>
<dt>
<code>scope</code>
</dt>
<dd>
(OPTIONAL) The resource in question, formatted as one of the space-delimited
entries from the <code>scope</code> parameters from the <code>WWW-Authenticate</code> header
shown above. This query parameter should only be specified once but may
contain multiple scopes using the scope list format defined in the scope
grammar. If multiple <code>scope</code> is provided from
<code>WWW-Authenticate</code> header the scopes should first be
converted to a scope list before requesting the token. The above example
would be specified as: <code>scope=repository:samalba/my-app:push</code>.
When requesting a refresh token the scopes may be empty since the
refresh token will not be limited by this scope, only the provided short
lived access token will have the scope limitation.
</dd>
<dt>
<code>refresh_token</code>
</dt>
<dd>
(OPTIONAL) The refresh token to use for authentication when grant type "refresh_token" is used.
</dd>
<dt>
<code>username</code>
</dt>
<dd>
(OPTIONAL) The username to use for authentication when grant type "password" is used.
</dd>
<dt>
<code>password</code>
</dt>
<dd>
(OPTIONAL) The password to use for authentication when grant type "password" is used.
</dd>
</dl>
#### Response fields
<dl>
<dt>
<code>access_token</code>
</dt>
<dd>
(REQUIRED) An opaque <code>Bearer</code> token that clients should
supply to subsequent requests in the <code>Authorization</code> header.
This token should not be attempted to be parsed or understood by the
client but treated as opaque string.
</dd>
<dt>
<code>scope</code>
</dt>
<dd>
(REQUIRED) The scope granted inside the access token. This may be the
same scope as requested or a subset. This requirement is stronger than
specified in [RFC6749 Section 4.2.2](https://tools.ietf.org/html/rfc6749#section-4.2.2)
by strictly requiring the scope in the return value.
</dd>
<dt>
<code>expires_in</code>
</dt>
<dd>
(REQUIRED) The duration in seconds since the token was issued that it
will remain valid. When omitted, this defaults to 60 seconds. For
compatibility with older clients, a token should never be returned with
less than 60 seconds to live.
</dd>
<dt>
<code>issued_at</code>
</dt>
<dd>
(Optional) The <a href="https://www.ietf.org/rfc/rfc3339.txt">RFC3339</a>-serialized UTC
standard time at which a given token was issued. If <code>issued_at</code> is omitted, the
expiration is from when the token exchange completed.
</dd>
<dt>
<code>refresh_token</code>
</dt>
<dd>
(Optional) Token which can be used to get additional access tokens for
the same subject with different scopes. This token should be kept secure
by the client and only sent to the authorization server which issues
bearer tokens. This field will only be set when `access_type=offline` is
provided in the request.
</dd>
</dl>
#### Example getting refresh token
```
POST /token HTTP/1.1
Host: auth.docker.io
Content-Type: application/x-www-form-urlencoded
grant_type=password&username=johndoe&password=A3ddj3w&service=hub.docker.io&client_id=dockerengine&access_type=offline
HTTP/1.1 200 OK
Content-Type: application/json
{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5","expires_in":900,"scope":""}
```
#### Example refreshing an Access Token
```
POST /token HTTP/1.1
Host: auth.docker.io
Content-Type: application/x-www-form-urlencoded
grant_type=refresh_token&refresh_token=kas9Da81Dfa8&service=registry-1.docker.io&client_id=dockerengine&scope=repository:samalba/my-app:pull,push
HTTP/1.1 200 OK
Content-Type: application/json
{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5":"expires_in":900,"scope":"repository:samalba/my-app:pull,repository:samalba/my-app:push"}
```

View file

@ -0,0 +1,148 @@
---
title: "Token Scope Documentation"
description: "Describes the scope and access fields used for registry authorization tokens"
keywords: ["registry, on-prem, images, tags, repository, distribution, advanced, access, scope"]
---
# Docker Registry Token Scope and Access
Tokens used by the registry are always restricted what resources they may
be used to access, where those resources may be accessed, and what actions
may be done on those resources. Tokens always have the context of a user which
the token was originally created for. This document describes how these
restrictions are represented and enforced by the authorization server and
resource providers.
## Scope Components
### Subject (Authenticated User)
The subject represents the user for which a token is valid. Any actions
performed using an access token should be considered on behalf of the subject.
This is included in the `sub` field of access token JWT. A refresh token should
be limited to a single subject and only be able to give out access tokens for
that subject.
### Audience (Resource Provider)
The audience represents a resource provider which is intended to be able to
perform the actions specified in the access token. Any resource provider which
does not match the audience should not use that access token. The audience is
included in the `aud` field of the access token JWT. A refresh token should be
limited to a single audience and only be able to give out access tokens for that
audience.
### Resource Type
The resource type represents the type of resource which the resource name is
intended to represent. This type may be specific to a resource provider but must
be understood by the authorization server in order to validate the subject
is authorized for a specific resource.
#### Resource Class
The resource type might have a resource class which further classifies the
the resource name within the resource type. A class is not required and
is specific to the resource type.
#### Example Resource Types
- `repository` - represents a single repository within a registry. A
repository may represent many manifest or content blobs, but the resource type
is considered the collections of those items. Actions which may be performed on
a `repository` are `pull` for accessing the collection and `push` for adding to
it. By default the `repository` type has the class of `image`.
- `repository(plugin)` - represents a single repository of plugins within a
registry. A plugin repository has the same content and actions as a repository.
- `registry` - represents the entire registry. Used for administrative actions
or lookup operations that span an entire registry.
### Resource Name
The resource name represent the name which identifies a resource for a resource
provider. A resource is identified by this name and the provided resource type.
An example of a resource name would be the name component of an image tag, such
as "samalba/myapp" or "hostname/samalba/myapp".
### Resource Actions
The resource actions define the actions which the access token allows to be
performed on the identified resource. These actions are type specific but will
normally have actions identifying read and write access on the resource. Example
for the `repository` type are `pull` for read access and `push` for write
access.
## Authorization Server Use
Each access token request may include a scope and an audience. The subject is
always derived from the passed in credentials or refresh token. When using
a refresh token the passed in audience must match the audience defined for
the refresh token. The audience (resource provider) is provided using the
`service` field. Multiple resource scopes may be provided using multiple `scope`
fields on the `GET` request. The `POST` request only takes in a single
`scope` field but may use a space to separate a list of multiple resource
scopes.
### Resource Scope Grammar
```
scope := resourcescope [ ' ' resourcescope ]*
resourcescope := resourcetype ":" resourcename ":" action [ ',' action ]*
resourcetype := resourcetypevalue [ '(' resourcetypevalue ')' ]
resourcetypevalue := /[a-z0-9]+/
resourcename := [ hostname '/' ] component [ '/' component ]*
hostname := hostcomponent ['.' hostcomponent]* [':' port-number]
hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
port-number := /[0-9]+/
action := /[a-z]*/
component := alpha-numeric [ separator alpha-numeric ]*
alpha-numeric := /[a-z0-9]+/
separator := /[_.]|__|[-]*/
```
Full reference grammar is defined
[here](https://godoc.org/github.com/docker/distribution/reference). Currently
the scope name grammar is a subset of the reference grammar.
> **NOTE:** that the `resourcename` may contain one `:` due to a possible port
> number in the hostname component of the `resourcename`, so a naive
> implementation that interprets the first three `:`-delimited tokens of a
> `scope` to be the `resourcetype`, `resourcename`, and a list of `action`
> would be insufficient.
## Resource Provider Use
Once a resource provider has verified the authenticity of the scope through
JWT access token verification, the resource provider must ensure that scope
satisfies the request. The resource provider should match the given audience
according to name or URI the resource provider uses to identify itself. Any
denial based on subject is not defined here and is up to resource provider, the
subject is mainly provided for audit logs and any other user-specific rules
which may need to be provided but are not defined by the authorization server.
The resource provider must ensure that ANY resource being accessed as the
result of a request has the appropriate access scope. Both the resource type
and resource name must match the accessed resource and an appropriate action
scope must be included.
When appropriate authorization is not provided either due to lack of scope
or missing token, the resource provider to return a `WWW-AUTHENTICATE` HTTP
header with the `realm` as the authorization server, the `service` as the
expected audience identifying string, and a `scope` field for each required
resource scope to complete the request.
## JWT Access Tokens
Each JWT access token may only have a single subject and audience but multiple
resource scopes. The subject and audience are put into standard JWT fields
`sub` and `aud`. The resource scope is put into the `access` field. The
structure of the access field can be seen in the
[jwt documentation](jwt.md).
## Refresh Tokens
A refresh token must be defined for a single subject and audience. Further
restricting scope to specific type, name, and actions combinations should be
done by fetching an access token using the refresh token. Since the refresh
token is not scoped to specific resources for an audience, extra care should
be taken to only use the refresh token to negotiate new access tokens directly
with the authorization server, and never with a resource provider.

View file

@ -0,0 +1,250 @@
---
title: "Token Authentication Specification"
description: "Specifies the Docker Registry v2 authentication"
keywords: ["registry, on-prem, images, tags, repository, distribution, Bearer authentication, advanced"]
---
# Docker Registry v2 authentication via central service
This document outlines the v2 Docker registry authentication scheme:
![v2 registry auth](../../images/v2-registry-auth.png)
1. Attempt to begin a push/pull operation with the registry.
2. If the registry requires authorization it will return a `401 Unauthorized`
HTTP response with information on how to authenticate.
3. The registry client makes a request to the authorization service for a
Bearer token.
4. The authorization service returns an opaque Bearer token representing the
client's authorized access.
5. The client retries the original request with the Bearer token embedded in
the request's Authorization header.
6. The Registry authorizes the client by validating the Bearer token and the
claim set embedded within it and begins the push/pull session as usual.
## Requirements
- Registry clients which can understand and respond to token auth challenges
returned by the resource server.
- An authorization server capable of managing access controls to their
resources hosted by any given service (such as repositories in a Docker
Registry).
- A Docker Registry capable of trusting the authorization server to sign tokens
which clients can use for authorization and the ability to verify these
tokens for single use or for use during a sufficiently short period of time.
## Authorization Server Endpoint Descriptions
The described server is meant to serve as a standalone access control manager
for resources hosted by other services which wish to authenticate and manage
authorizations using a separate access control manager.
A service like this is used by the official Docker Registry to authenticate
clients and verify their authorization to Docker image repositories.
As of Docker 1.6, the registry client within the Docker Engine has been updated
to handle such an authorization workflow.
## How to authenticate
Registry V1 clients first contact the index to initiate a push or pull. Under
the Registry V2 workflow, clients should contact the registry first. If the
registry server requires authentication it will return a `401 Unauthorized`
response with a `WWW-Authenticate` header detailing how to authenticate to this
registry.
For example, say I (username `jlhawn`) am attempting to push an image to the
repository `samalba/my-app`. For the registry to authorize this, I will need
`push` access to the `samalba/my-app` repository. The registry will first
return this response:
```
HTTP/1.1 401 Unauthorized
Content-Type: application/json; charset=utf-8
Docker-Distribution-Api-Version: registry/2.0
Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push"
Date: Thu, 10 Sep 2015 19:32:31 GMT
Content-Length: 235
Strict-Transport-Security: max-age=31536000
{"errors":[{"code":"UNAUTHORIZED","message":"access to the requested resource is not authorized","detail":[{"Type":"repository","Name":"samalba/my-app","Action":"pull"},{"Type":"repository","Name":"samalba/my-app","Action":"push"}]}]}
```
Note the HTTP Response Header indicating the auth challenge:
```
Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push"
```
This format is documented in [Section 3 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-3)
This challenge indicates that the registry requires a token issued by the
specified token server and that the request the client is attempting will
need to include sufficient access entries in its claim set. To respond to this
challenge, the client will need to make a `GET` request to the URL
`https://auth.docker.io/token` using the `service` and `scope` values from the
`WWW-Authenticate` header.
## Requesting a Token
Defines getting a bearer and refresh token using the token endpoint.
#### Query Parameters
<dl>
<dt>
<code>service</code>
</dt>
<dd>
The name of the service which hosts the resource.
</dd>
<dt>
<code>offline_token</code>
</dt>
<dd>
Whether to return a refresh token along with the bearer token. A refresh
token is capable of getting additional bearer tokens for the same
subject with different scopes. The refresh token does not have an
expiration and should be considered completely opaque to the client.
</dd>
<dt>
<code>client_id</code>
</dt>
<dd>
String identifying the client. This client_id does not need
to be registered with the authorization server but should be set to a
meaningful value in order to allow auditing keys created by unregistered
clients. Accepted syntax is defined in
[RFC6749 Appendix A.1](https://tools.ietf.org/html/rfc6749#appendix-A.1).
</dd>
<dt>
<code>scope</code>
</dt>
<dd>
The resource in question, formatted as one of the space-delimited
entries from the <code>scope</code> parameters from the <code>WWW-Authenticate</code> header
shown above. This query parameter should be specified multiple times if
there is more than one <code>scope</code> entry from the <code>WWW-Authenticate</code>
header. The above example would be specified as:
<code>scope=repository:samalba/my-app:push</code>. The scope field may
be empty to request a refresh token without providing any resource
permissions to the returned bearer token.
</dd>
</dl>
#### Token Response Fields
<dl>
<dt>
<code>token</code>
</dt>
<dd>
An opaque <code>Bearer</code> token that clients should supply to subsequent
requests in the <code>Authorization</code> header.
</dd>
<dt>
<code>access_token</code>
</dt>
<dd>
For compatibility with OAuth 2.0, we will also accept <code>token</code> under the name
<code>access_token</code>. At least one of these fields <b>must</b> be specified, but
both may also appear (for compatibility with older clients). When both are specified,
they should be equivalent; if they differ the client's choice is undefined.
</dd>
<dt>
<code>expires_in</code>
</dt>
<dd>
(Optional) The duration in seconds since the token was issued that it
will remain valid. When omitted, this defaults to 60 seconds. For
compatibility with older clients, a token should never be returned with
less than 60 seconds to live.
</dd>
<dt>
<code>issued_at</code>
</dt>
<dd>
(Optional) The <a href="https://www.ietf.org/rfc/rfc3339.txt">RFC3339</a>-serialized UTC
standard time at which a given token was issued. If <code>issued_at</code> is omitted, the
expiration is from when the token exchange completed.
</dd>
<dt>
<code>refresh_token</code>
</dt>
<dd>
(Optional) Token which can be used to get additional access tokens for
the same subject with different scopes. This token should be kept secure
by the client and only sent to the authorization server which issues
bearer tokens. This field will only be set when `offline_token=true` is
provided in the request.
</dd>
</dl>
#### Example
For this example, the client makes an HTTP GET request to the following URL:
```
https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push
```
The token server should first attempt to authenticate the client using any
authentication credentials provided with the request. From Docker 1.11 the
Docker engine supports both Basic Authentication and [OAuth2](oauth.md) for
getting tokens. Docker 1.10 and before, the registry client in the Docker Engine
only supports Basic Authentication. If an attempt to authenticate to the token
server fails, the token server should return a `401 Unauthorized` response
indicating that the provided credentials are invalid.
Whether the token server requires authentication is up to the policy of that
access control provider. Some requests may require authentication to determine
access (such as pushing or pulling a private repository) while others may not
(such as pulling from a public repository).
After authenticating the client (which may simply be an anonymous client if
no attempt was made to authenticate), the token server must next query its
access control list to determine whether the client has the requested scope. In
this example request, if I have authenticated as user `jlhawn`, the token
server will determine what access I have to the repository `samalba/my-app`
hosted by the entity `registry.docker.io`.
Once the token server has determined what access the client has to the
resources requested in the `scope` parameter, it will take the intersection of
the set of requested actions on each resource and the set of actions that the
client has in fact been granted. If the client only has a subset of the
requested access **it must not be considered an error** as it is not the
responsibility of the token server to indicate authorization errors as part of
this workflow.
Continuing with the example request, the token server will find that the
client's set of granted access to the repository is `[pull, push]` which when
intersected with the requested access `[pull, push]` yields an equal set. If
the granted access set was found only to be `[pull]` then the intersected set
would only be `[pull]`. If the client has no access to the repository then the
intersected set would be empty, `[]`.
It is this intersected set of access which is placed in the returned token.
The server then constructs an implementation-specific token with this
intersected set of access, and returns it to the Docker client to use to
authenticate to the audience service (within the indicated window of time):
```
HTTP/1.1 200 OK
Content-Type: application/json
{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w", "expires_in": 3600,"issued_at": "2009-11-10T23:00:00Z"}
```
## Using the Bearer token
Once the client has a token, it will try the registry request again with the
token placed in the HTTP `Authorization` header like so:
```
Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw
```
This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1)

View file

@ -0,0 +1,30 @@
---
published: false
---
# Distribution API Implementations
This is a list of known implementations of the Distribution API spec.
## [Docker Distribution Registry](https://github.com/docker/distribution)
Docker distribution is the reference implementation of the distribution API
specification. It aims to fully implement the entire specification.
### Releases
#### 2.0.1 (_in development_)
Implements API 2.0.1
_Known Issues_
- No resumable push support
- Content ranges ignored
- Blob upload status will always return a starting range of 0
#### 2.0.0
Implements API 2.0.0
_Known Issues_
- No resumable push support
- No PATCH implementation for blob upload
- Content ranges ignored

View file

@ -0,0 +1,12 @@
---
title: "Reference Overview"
description: "Explains registry JSON objects"
keywords: ["registry, service, images, repository, json"]
---
# Docker Registry Reference
* [HTTP API V2](api.md)
* [Storage Driver](../storage-drivers/index.md)
* [Token Authentication Specification](auth/token.md)
* [Token Authentication Implementation](auth/jwt.md)

View file

@ -0,0 +1,90 @@
---
published: false
title: "Docker Distribution JSON Canonicalization"
description: "Explains registry JSON objects"
keywords: ["registry, service, images, repository, json"]
---
# Docker Distribution JSON Canonicalization
To provide consistent content hashing of JSON objects throughout Docker
Distribution APIs, the specification defines a canonical JSON format. Adopting
such a canonicalization also aids in caching JSON responses.
Note that protocols should not be designed to depend on identical JSON being
generated across different versions or clients. The canonicalization rules are
merely useful for caching and consistency.
## Rules
Compliant JSON should conform to the following rules:
1. All generated JSON should comply with [RFC
7159](http://www.ietf.org/rfc/rfc7159.txt).
2. Resulting "JSON text" shall always be encoded in UTF-8.
3. Unless a canonical key order is defined for a particular schema, object
keys shall always appear in lexically sorted order.
4. All whitespace between tokens should be removed.
5. No "trailing commas" are allowed in object or array definitions.
6. The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e".
Ampersand "&" is escaped to "\u0026".
## Examples
The following is a simple example of a canonicalized JSON string:
```json
{"asdf":1,"qwer":[],"zxcv":[{},true,1000000000,"tyui"]}
```
## Reference
### Other Canonicalizations
The OLPC project specifies [Canonical
JSON](http://wiki.laptop.org/go/Canonical_JSON). While this is used in
[TUF](http://theupdateframework.com/), which may be used with other
distribution-related protocols, this alternative format has been proposed in
case the original source changes. Specifications complying with either this
specification or an alternative should explicitly call out the
canonicalization format. Except for key ordering, this specification is mostly
compatible.
### Go
In Go, the [`encoding/json`](http://golang.org/pkg/encoding/json/) library
will emit canonical JSON by default. Simply using `json.Marshal` will suffice
in most cases:
```go
incoming := map[string]interface{}{
"asdf": 1,
"qwer": []interface{}{},
"zxcv": []interface{}{
map[string]interface{}{},
true,
int(1e9),
"tyui",
},
}
canonical, err := json.Marshal(incoming)
if err != nil {
// ... handle error
}
```
To apply canonical JSON format spacing to an existing serialized JSON buffer, one
can use
[`json.Indent`](http://golang.org/src/encoding/json/indent.go?s=1918:1989#L65)
with the following arguments:
```go
incoming := getBytes()
var canonical bytes.Buffer
if err := json.Indent(&canonical, incoming, "", ""); err != nil {
// ... handle error
}
```

View file

@ -0,0 +1,163 @@
---
title: "Image Manifest V 2, Schema 1 "
description: "image manifest for the Registry."
keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced, manifest"]
---
# Image Manifest Version 2, Schema 1
This document outlines the format of of the V2 image manifest. The image
manifest described herein was introduced in the Docker daemon in the [v1.3.0
release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453).
It is a provisional manifest to provide a compatibility with the [V1 Image
format](https://github.com/docker/docker/blob/master/image/spec/v1.md), as the
requirements are defined for the [V2 Schema 2
image](https://github.com/docker/distribution/pull/62).
Image manifests describe the various constituents of a docker image. Image
manifests can be serialized to JSON format with the following media types:
Manifest Type | Media Type
------------- | -------------
manifest | "application/vnd.docker.distribution.manifest.v1+json"
signed manifest | "application/vnd.docker.distribution.manifest.v1+prettyjws"
*Note that "application/json" will also be accepted for schema 1.*
References:
- [Proposal: JSON Registry API V2.1](https://github.com/docker/docker/issues/9015)
- [Proposal: Provenance step 1 - Transform images for validation and verification](https://github.com/docker/docker/issues/8093)
## *Manifest* Field Descriptions
Manifest provides the base accessible fields for working with V2 image format
in the registry.
- **`name`** *string*
name is the name of the image's repository
- **`tag`** *string*
tag is the tag of the image
- **`architecture`** *string*
architecture is the host architecture on which this image is intended to
run. This is for information purposes and not currently used by the engine
- **`fsLayers`** *array*
fsLayers is a list of filesystem layer blob sums contained in this image.
An fsLayer is a struct consisting of the following fields
- **`blobSum`** *digest.Digest*
blobSum is the digest of the referenced filesystem image layer. A
digest must be a sha256 hash.
- **`history`** *array*
history is a list of unstructured historical data for v1 compatibility. It
contains ID of the image layer and ID of the layer's parent layers.
history is a struct consisting of the following fields
- **`v1Compatibility`** string
V1Compatibility is the raw V1 compatibility information. This will
contain the JSON object describing the V1 of this image.
- **`schemaVersion`** *int*
SchemaVersion is the image manifest schema that this image follows.
>**Note**:the length of `history` must be equal to the length of `fsLayers` and
>entries in each are correlated by index.
## Signed Manifests
Signed manifests provides an envelope for a signed image manifest. A signed
manifest consists of an image manifest along with an additional field
containing the signature of the manifest.
The docker client can verify signed manifests and displays a message to the user.
### Signing Manifests
Image manifests can be signed in two different ways: with a *libtrust* private
key or an x509 certificate chain. When signing with an x509 certificate chain,
the public key of the first element in the chain must be the public key
corresponding with the sign key.
### Signed Manifest Field Description
Signed manifests include an image manifest and a list of signatures generated
by *libtrust*. A signature consists of the following fields:
- **`header`** *[JOSE](http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2)*
A [JSON Web Signature](http://self-issued.info/docs/draft-ietf-jose-json-web-signature.html)
- **`signature`** *string*
A signature for the image manifest, signed by a *libtrust* private key
- **`protected`** *string*
The signed protected header
## Example Manifest
*Example showing the official 'hello-world' image manifest.*
```
{
"name": "hello-world",
"tag": "latest",
"architecture": "amd64",
"fsLayers": [
{
"blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
},
{
"blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
},
{
"blobSum": "sha256:cc8567d70002e957612902a8e985ea129d831ebe04057d88fb644857caa45d11"
},
{
"blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
}
],
"history": [
{
"v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
},
{
"v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
},
],
"schemaVersion": 1,
"signatures": [
{
"header": {
"jwk": {
"crv": "P-256",
"kid": "OD6I:6DRK:JXEJ:KBM4:255X:NSAA:MUSF:E4VM:ZI6W:CUN2:L4Z6:LSF4",
"kty": "EC",
"x": "3gAwX48IQ5oaYQAYSxor6rYYc_6yjuLCjtQ9LUakg4A",
"y": "t72ge6kIA1XOjqjVoEOiPPAURltJFBMGDSQvEGVB010"
},
"alg": "ES256"
},
"signature": "XREm0L8WNn27Ga_iE_vRnTxVMhhYY0Zst_FfkKopg6gWSoTOZTuW4rK0fg_IqnKkEKlbD83tD46LKEGi5aIVFg",
"protected": "eyJmb3JtYXRMZW5ndGgiOjY2MjgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wNC0wOFQxODo1Mjo1OVoifQ"
}
]
}
```

View file

@ -0,0 +1,295 @@
---
title: "Image Manifest V 2, Schema 2 "
description: "image manifest for the Registry."
keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced, manifest"]
---
# Image Manifest Version 2, Schema 2
This document outlines the format of of the V2 image manifest, schema version 2.
The original (and provisional) image manifest for V2 (schema 1), was introduced
in the Docker daemon in the [v1.3.0
release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453)
and is specified in the [schema 1 manifest definition](manifest-v2-1.md)
This second schema version has two primary goals. The first is to allow
multi-architecture images, through a "fat manifest" which references image
manifests for platform-specific versions of an image. The second is to
move the Docker engine towards content-addressable images, by supporting
an image model where the image's configuration can be hashed to generate
an ID for the image.
# Media Types
The following media types are used by the manifest formats described here, and
the resources they reference:
- `application/vnd.docker.distribution.manifest.v1+json`: schema1 (existing manifest format)
- `application/vnd.docker.distribution.manifest.v2+json`: New image manifest format (schemaVersion = 2)
- `application/vnd.docker.distribution.manifest.list.v2+json`: Manifest list, aka "fat manifest"
- `application/vnd.docker.container.image.v1+json`: Container config JSON
- `application/vnd.docker.image.rootfs.diff.tar.gzip`: "Layer", as a gzipped tar
- `application/vnd.docker.image.rootfs.foreign.diff.tar.gzip`: "Layer", as a gzipped tar that should never be pushed
- `application/vnd.docker.plugin.v1+json`: Plugin config JSON
## Manifest List
The manifest list is the "fat manifest" which points to specific image manifests
for one or more platforms. Its use is optional, and relatively few images will
use one of these manifests. A client will distinguish a manifest list from an
image manifest based on the Content-Type returned in the HTTP response.
## *Manifest List* Field Descriptions
- **`schemaVersion`** *int*
This field specifies the image manifest schema version as an integer. This
schema uses the version `2`.
- **`mediaType`** *string*
The MIME type of the manifest list. This should be set to
`application/vnd.docker.distribution.manifest.list.v2+json`.
- **`manifests`** *array*
The manifests field contains a list of manifests for specific platforms.
Fields of an object in the manifests list are:
- **`mediaType`** *string*
The MIME type of the referenced object. This will generally be
`application/vnd.docker.image.manifest.v2+json`, but it could also
be `application/vnd.docker.image.manifest.v1+json` if the manifest
list references a legacy schema-1 manifest.
- **`size`** *int*
The size in bytes of the object. This field exists so that a client
will have an expected size for the content before validating. If the
length of the retrieved content does not match the specified length,
the content should not be trusted.
- **`digest`** *string*
The digest of the content, as defined by the
[Registry V2 HTTP API Specificiation](api.md#digest-parameter).
- **`platform`** *object*
The platform object describes the platform which the image in the
manifest runs on. A full list of valid operating system and architecture
values are listed in the [Go language documentation for `$GOOS` and
`$GOARCH`](https://golang.org/doc/install/source#environment)
- **`architecture`** *string*
The architecture field specifies the CPU architecture, for example
`amd64` or `ppc64le`.
- **`os`** *string*
The os field specifies the operating system, for example
`linux` or `windows`.
- **`os.version`** *string*
The optional os.version field specifies the operating system version,
for example `10.0.10586`.
- **`os.features`** *array*
The optional os.features field specifies an array of strings,
each listing a required OS feature (for example on Windows
`win32k`).
- **`variant`** *string*
The optional variant field specifies a variant of the CPU, for
example `armv6l` to specify a particular CPU variant of the ARM CPU.
- **`features`** *array*
The optional features field specifies an array of strings, each
listing a required CPU feature (for example `sse4` or `aes`).
## Example Manifest List
*Example showing a simple manifest list pointing to image manifests for two platforms:*
```json
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"manifests": [
{
"mediaType": "application/vnd.docker.image.manifest.v2+json",
"size": 7143,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
"platform": {
"architecture": "ppc64le",
"os": "linux",
}
},
{
"mediaType": "application/vnd.docker.image.manifest.v2+json",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
"platform": {
"architecture": "amd64",
"os": "linux",
"features": [
"sse4"
]
}
}
]
}
```
# Image Manifest
The image manifest provides a configuration and a set of layers for a container
image. It's the direct replacement for the schema-1 manifest.
## *Image Manifest* Field Descriptions
- **`schemaVersion`** *int*
This field specifies the image manifest schema version as an integer. This
schema uses version `2`.
- **`mediaType`** *string*
The MIME type of the manifest. This should be set to
`application/vnd.docker.distribution.manifest.v2+json`.
- **`config`** *object*
The config field references a configuration object for a container, by
digest. This configuration item is a JSON blob that the runtime uses
to set up the container. This new schema uses a tweaked version
of this configuration to allow image content-addressability on the
daemon side.
Fields of a config object are:
- **`mediaType`** *string*
The MIME type of the referenced object. This should generally be
`application/vnd.docker.container.image.v1+json`.
- **`size`** *int*
The size in bytes of the object. This field exists so that a client
will have an expected size for the content before validating. If the
length of the retrieved content does not match the specified length,
the content should not be trusted.
- **`digest`** *string*
The digest of the content, as defined by the
[Registry V2 HTTP API Specificiation](api.md#digest-parameter).
- **`layers`** *array*
The layer list is ordered starting from the base image (opposite order of schema1).
Fields of an item in the layers list are:
- **`mediaType`** *string*
The MIME type of the referenced object. This should
generally be `application/vnd.docker.image.rootfs.diff.tar.gzip`.
Layers of type
`application/vnd.docker.image.rootfs.foreign.diff.tar.gzip` may be
pulled from a remote location but they should never be pushed.
- **`size`** *int*
The size in bytes of the object. This field exists so that a client
will have an expected size for the content before validating. If the
length of the retrieved content does not match the specified length,
the content should not be trusted.
- **`digest`** *string*
The digest of the content, as defined by the
[Registry V2 HTTP API Specificiation](api.md#digest-parameter).
- **`urls`** *array*
Provides a list of URLs from which the content may be fetched. Content
should be verified against the `digest` and `size`. This field is
optional and uncommon.
## Example Image Manifest
*Example showing an image manifest:*
```json
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 7023,
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 32654,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 16724,
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 73109,
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
}
]
}
```
# Backward compatibility
The registry will continue to accept uploads of manifests in both the old and
new formats.
When pushing images, clients which support the new manifest format should first
construct a manifest in the new format. If uploading this manifest fails,
presumably because the registry only supports the old format, the client may
fall back to uploading a manifest in the old format.
When pulling images, clients indicate support for this new version of the
manifest format by sending the
`application/vnd.docker.distribution.manifest.v2+json` and
`application/vnd.docker.distribution.manifest.list.v2+json` media types in an
`Accept` header when making a request to the `manifests` endpoint. Updated
clients should check the `Content-Type` header to see whether the manifest
returned from the endpoint is in the old format, or is an image manifest or
manifest list in the new format.
If the manifest being requested uses the new format, and the appropriate media
type is not present in an `Accept` header, the registry will assume that the
client cannot handle the manifest as-is, and rewrite it on the fly into the old
format. If the object that would otherwise be returned is a manifest list, the
registry will look up the appropriate manifest for the amd64 platform and
linux OS, rewrite that manifest into the old format if necessary, and return
the result to the client. If no suitable manifest is found in the manifest
list, the registry will return a 404 error.
One of the challenges in rewriting manifests to the old format is that the old
format involves an image configuration for each layer in the manifest, but the
new format only provides one image configuration. To work around this, the
registry will create synthetic image configurations for all layers except the
top layer. These image configurations will not result in runnable images on
their own, but only serve to fill in the parent chain in a compatible way.
The IDs in these synthetic configurations will be derived from hashes of their
respective blobs. The registry will create these configurations and their IDs
using the same scheme as Docker 1.10 when it creates a legacy manifest to push
to a registry which doesn't support the new format.

View file

@ -0,0 +1,7 @@
---
title: "Reference"
description: "Explains registry JSON objects"
keywords: ["registry, service, images, repository, json"]
type: "menu"
identifier: "smn_registry_ref"
---

View file

@ -0,0 +1,37 @@
package api
import (
"errors"
"net/http"
"github.com/docker/distribution/health"
)
var (
updater = health.NewStatusUpdater()
)
// DownHandler registers a manual_http_status that always returns an Error
func DownHandler(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
updater.Update(errors.New("Manual Check"))
} else {
w.WriteHeader(http.StatusNotFound)
}
}
// UpHandler registers a manual_http_status that always returns nil
func UpHandler(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
updater.Update(nil)
} else {
w.WriteHeader(http.StatusNotFound)
}
}
// init sets up the two endpoints to bring the service up and down
func init() {
health.Register("manual_http_status", updater)
http.HandleFunc("/debug/health/down", DownHandler)
http.HandleFunc("/debug/health/up", UpHandler)
}

View file

@ -0,0 +1,86 @@
package api
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/docker/distribution/health"
)
// TestGETDownHandlerDoesNotChangeStatus ensures that calling the endpoint
// /debug/health/down with METHOD GET returns a 404
func TestGETDownHandlerDoesNotChangeStatus(t *testing.T) {
recorder := httptest.NewRecorder()
req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/down", nil)
if err != nil {
t.Errorf("Failed to create request.")
}
DownHandler(recorder, req)
if recorder.Code != 404 {
t.Errorf("Did not get a 404.")
}
}
// TestGETUpHandlerDoesNotChangeStatus ensures that calling the endpoint
// /debug/health/down with METHOD GET returns a 404
func TestGETUpHandlerDoesNotChangeStatus(t *testing.T) {
recorder := httptest.NewRecorder()
req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/up", nil)
if err != nil {
t.Errorf("Failed to create request.")
}
DownHandler(recorder, req)
if recorder.Code != 404 {
t.Errorf("Did not get a 404.")
}
}
// TestPOSTDownHandlerChangeStatus ensures the endpoint /debug/health/down changes
// the status code of the response to 503
// This test is order dependent, and should come before TestPOSTUpHandlerChangeStatus
func TestPOSTDownHandlerChangeStatus(t *testing.T) {
recorder := httptest.NewRecorder()
req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/down", nil)
if err != nil {
t.Errorf("Failed to create request.")
}
DownHandler(recorder, req)
if recorder.Code != 200 {
t.Errorf("Did not get a 200.")
}
if len(health.CheckStatus()) != 1 {
t.Errorf("DownHandler didn't add an error check.")
}
}
// TestPOSTUpHandlerChangeStatus ensures the endpoint /debug/health/up changes
// the status code of the response to 200
func TestPOSTUpHandlerChangeStatus(t *testing.T) {
recorder := httptest.NewRecorder()
req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/up", nil)
if err != nil {
t.Errorf("Failed to create request.")
}
UpHandler(recorder, req)
if recorder.Code != 200 {
t.Errorf("Did not get a 200.")
}
if len(health.CheckStatus()) != 0 {
t.Errorf("UpHandler didn't remove the error check.")
}
}

View file

@ -0,0 +1,62 @@
package checks
import (
"errors"
"net"
"net/http"
"os"
"strconv"
"time"
"github.com/docker/distribution/health"
)
// FileChecker checks the existence of a file and returns an error
// if the file exists.
func FileChecker(f string) health.Checker {
return health.CheckFunc(func() error {
if _, err := os.Stat(f); err == nil {
return errors.New("file exists")
}
return nil
})
}
// HTTPChecker does a HEAD request and verifies that the HTTP status code
// returned matches statusCode.
func HTTPChecker(r string, statusCode int, timeout time.Duration, headers http.Header) health.Checker {
return health.CheckFunc(func() error {
client := http.Client{
Timeout: timeout,
}
req, err := http.NewRequest("HEAD", r, nil)
if err != nil {
return errors.New("error creating request: " + r)
}
for headerName, headerValues := range headers {
for _, headerValue := range headerValues {
req.Header.Add(headerName, headerValue)
}
}
response, err := client.Do(req)
if err != nil {
return errors.New("error while checking: " + r)
}
if response.StatusCode != statusCode {
return errors.New("downstream service returned unexpected status: " + strconv.Itoa(response.StatusCode))
}
return nil
})
}
// TCPChecker attempts to open a TCP connection.
func TCPChecker(addr string, timeout time.Duration) health.Checker {
return health.CheckFunc(func() error {
conn, err := net.DialTimeout("tcp", addr, timeout)
if err != nil {
return errors.New("connection to " + addr + " failed")
}
conn.Close()
return nil
})
}

View file

@ -0,0 +1,25 @@
package checks
import (
"testing"
)
func TestFileChecker(t *testing.T) {
if err := FileChecker("/tmp").Check(); err == nil {
t.Errorf("/tmp was expected as exists")
}
if err := FileChecker("NoSuchFileFromMoon").Check(); err != nil {
t.Errorf("NoSuchFileFromMoon was expected as not exists, error:%v", err)
}
}
func TestHTTPChecker(t *testing.T) {
if err := HTTPChecker("https://www.google.cybertron", 200, 0, nil).Check(); err == nil {
t.Errorf("Google on Cybertron was expected as not exists")
}
if err := HTTPChecker("https://www.google.pt", 200, 0, nil).Check(); err != nil {
t.Errorf("Google at Portugal was expected as exists, error:%v", err)
}
}

130
vendor/github.com/docker/distribution/health/doc.go generated vendored Normal file
View file

@ -0,0 +1,130 @@
// Package health provides a generic health checking framework.
// The health package works expvar style. By importing the package the debug
// server is getting a "/debug/health" endpoint that returns the current
// status of the application.
// If there are no errors, "/debug/health" will return an HTTP 200 status,
// together with an empty JSON reply "{}". If there are any checks
// with errors, the JSON reply will include all the failed checks, and the
// response will be have an HTTP 503 status.
//
// A Check can either be run synchronously, or asynchronously. We recommend
// that most checks are registered as an asynchronous check, so a call to the
// "/debug/health" endpoint always returns immediately. This pattern is
// particularly useful for checks that verify upstream connectivity or
// database status, since they might take a long time to return/timeout.
//
// Installing
//
// To install health, just import it in your application:
//
// import "github.com/docker/distribution/health"
//
// You can also (optionally) import "health/api" that will add two convenience
// endpoints: "/debug/health/down" and "/debug/health/up". These endpoints add
// "manual" checks that allow the service to quickly be brought in/out of
// rotation.
//
// import _ "github.com/docker/distribution/registry/health/api"
//
// # curl localhost:5001/debug/health
// {}
// # curl -X POST localhost:5001/debug/health/down
// # curl localhost:5001/debug/health
// {"manual_http_status":"Manual Check"}
//
// After importing these packages to your main application, you can start
// registering checks.
//
// Registering Checks
//
// The recommended way of registering checks is using a periodic Check.
// PeriodicChecks run on a certain schedule and asynchronously update the
// status of the check. This allows CheckStatus to return without blocking
// on an expensive check.
//
// A trivial example of a check that runs every 5 seconds and shuts down our
// server if the current minute is even, could be added as follows:
//
// func currentMinuteEvenCheck() error {
// m := time.Now().Minute()
// if m%2 == 0 {
// return errors.New("Current minute is even!")
// }
// return nil
// }
//
// health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5)
//
// Alternatively, you can also make use of "RegisterPeriodicThresholdFunc" to
// implement the exact same check, but add a threshold of failures after which
// the check will be unhealthy. This is particularly useful for flaky Checks,
// ensuring some stability of the service when handling them.
//
// health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4)
//
// The lowest-level way to interact with the health package is calling
// "Register" directly. Register allows you to pass in an arbitrary string and
// something that implements "Checker" and runs your check. If your method
// returns an error with nil, it is considered a healthy check, otherwise it
// will make the health check endpoint "/debug/health" start returning a 503
// and list the specific check that failed.
//
// Assuming you wish to register a method called "currentMinuteEvenCheck()
// error" you could do that by doing:
//
// health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck))
//
// CheckFunc is a convenience type that implements Checker.
//
// Another way of registering a check could be by using an anonymous function
// and the convenience method RegisterFunc. An example that makes the status
// endpoint always return an error:
//
// health.RegisterFunc("my_check", func() error {
// return Errors.new("This is an error!")
// }))
//
// Examples
//
// You could also use the health checker mechanism to ensure your application
// only comes up if certain conditions are met, or to allow the developer to
// take the service out of rotation immediately. An example that checks
// database connectivity and immediately takes the server out of rotation on
// err:
//
// updater = health.NewStatusUpdater()
// health.RegisterFunc("database_check", func() error {
// return updater.Check()
// }))
//
// conn, err := Connect(...) // database call here
// if err != nil {
// updater.Update(errors.New("Error connecting to the database: " + err.Error()))
// }
//
// You can also use the predefined Checkers that come included with the health
// package. First, import the checks:
//
// import "github.com/docker/distribution/health/checks
//
// After that you can make use of any of the provided checks. An example of
// using a `FileChecker` to take the application out of rotation if a certain
// file exists can be done as follows:
//
// health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5))
//
// After registering the check, it is trivial to take an application out of
// rotation from the console:
//
// # curl localhost:5001/debug/health
// {}
// # touch /tmp/disable
// # curl localhost:5001/debug/health
// {"fileChecker":"file exists"}
//
// You could also test the connectivity to a downstream service by using a
// "HTTPChecker", but ensure that you only mark the test unhealthy if there
// are a minimum of two failures in a row:
//
// health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2))
package health

306
vendor/github.com/docker/distribution/health/health.go generated vendored Normal file
View file

@ -0,0 +1,306 @@
package health
import (
"encoding/json"
"fmt"
"net/http"
"sync"
"time"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/api/errcode"
)
// A Registry is a collection of checks. Most applications will use the global
// registry defined in DefaultRegistry. However, unit tests may need to create
// separate registries to isolate themselves from other tests.
type Registry struct {
mu sync.RWMutex
registeredChecks map[string]Checker
}
// NewRegistry creates a new registry. This isn't necessary for normal use of
// the package, but may be useful for unit tests so individual tests have their
// own set of checks.
func NewRegistry() *Registry {
return &Registry{
registeredChecks: make(map[string]Checker),
}
}
// DefaultRegistry is the default registry where checks are registered. It is
// the registry used by the HTTP handler.
var DefaultRegistry *Registry
// Checker is the interface for a Health Checker
type Checker interface {
// Check returns nil if the service is okay.
Check() error
}
// CheckFunc is a convenience type to create functions that implement
// the Checker interface
type CheckFunc func() error
// Check Implements the Checker interface to allow for any func() error method
// to be passed as a Checker
func (cf CheckFunc) Check() error {
return cf()
}
// Updater implements a health check that is explicitly set.
type Updater interface {
Checker
// Update updates the current status of the health check.
Update(status error)
}
// updater implements Checker and Updater, providing an asynchronous Update
// method.
// This allows us to have a Checker that returns the Check() call immediately
// not blocking on a potentially expensive check.
type updater struct {
mu sync.Mutex
status error
}
// Check implements the Checker interface
func (u *updater) Check() error {
u.mu.Lock()
defer u.mu.Unlock()
return u.status
}
// Update implements the Updater interface, allowing asynchronous access to
// the status of a Checker.
func (u *updater) Update(status error) {
u.mu.Lock()
defer u.mu.Unlock()
u.status = status
}
// NewStatusUpdater returns a new updater
func NewStatusUpdater() Updater {
return &updater{}
}
// thresholdUpdater implements Checker and Updater, providing an asynchronous Update
// method.
// This allows us to have a Checker that returns the Check() call immediately
// not blocking on a potentially expensive check.
type thresholdUpdater struct {
mu sync.Mutex
status error
threshold int
count int
}
// Check implements the Checker interface
func (tu *thresholdUpdater) Check() error {
tu.mu.Lock()
defer tu.mu.Unlock()
if tu.count >= tu.threshold {
return tu.status
}
return nil
}
// thresholdUpdater implements the Updater interface, allowing asynchronous
// access to the status of a Checker.
func (tu *thresholdUpdater) Update(status error) {
tu.mu.Lock()
defer tu.mu.Unlock()
if status == nil {
tu.count = 0
} else if tu.count < tu.threshold {
tu.count++
}
tu.status = status
}
// NewThresholdStatusUpdater returns a new thresholdUpdater
func NewThresholdStatusUpdater(t int) Updater {
return &thresholdUpdater{threshold: t}
}
// PeriodicChecker wraps an updater to provide a periodic checker
func PeriodicChecker(check Checker, period time.Duration) Checker {
u := NewStatusUpdater()
go func() {
t := time.NewTicker(period)
for {
<-t.C
u.Update(check.Check())
}
}()
return u
}
// PeriodicThresholdChecker wraps an updater to provide a periodic checker that
// uses a threshold before it changes status
func PeriodicThresholdChecker(check Checker, period time.Duration, threshold int) Checker {
tu := NewThresholdStatusUpdater(threshold)
go func() {
t := time.NewTicker(period)
for {
<-t.C
tu.Update(check.Check())
}
}()
return tu
}
// CheckStatus returns a map with all the current health check errors
func (registry *Registry) CheckStatus() map[string]string { // TODO(stevvooe) this needs a proper type
registry.mu.RLock()
defer registry.mu.RUnlock()
statusKeys := make(map[string]string)
for k, v := range registry.registeredChecks {
err := v.Check()
if err != nil {
statusKeys[k] = err.Error()
}
}
return statusKeys
}
// CheckStatus returns a map with all the current health check errors from the
// default registry.
func CheckStatus() map[string]string {
return DefaultRegistry.CheckStatus()
}
// Register associates the checker with the provided name.
func (registry *Registry) Register(name string, check Checker) {
if registry == nil {
registry = DefaultRegistry
}
registry.mu.Lock()
defer registry.mu.Unlock()
_, ok := registry.registeredChecks[name]
if ok {
panic("Check already exists: " + name)
}
registry.registeredChecks[name] = check
}
// Register associates the checker with the provided name in the default
// registry.
func Register(name string, check Checker) {
DefaultRegistry.Register(name, check)
}
// RegisterFunc allows the convenience of registering a checker directly from
// an arbitrary func() error.
func (registry *Registry) RegisterFunc(name string, check func() error) {
registry.Register(name, CheckFunc(check))
}
// RegisterFunc allows the convenience of registering a checker in the default
// registry directly from an arbitrary func() error.
func RegisterFunc(name string, check func() error) {
DefaultRegistry.RegisterFunc(name, check)
}
// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker
// from an arbitrary func() error.
func (registry *Registry) RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) {
registry.Register(name, PeriodicChecker(CheckFunc(check), period))
}
// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker
// in the default registry from an arbitrary func() error.
func RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) {
DefaultRegistry.RegisterPeriodicFunc(name, period, check)
}
// RegisterPeriodicThresholdFunc allows the convenience of registering a
// PeriodicChecker from an arbitrary func() error.
func (registry *Registry) RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) {
registry.Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold))
}
// RegisterPeriodicThresholdFunc allows the convenience of registering a
// PeriodicChecker in the default registry from an arbitrary func() error.
func RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) {
DefaultRegistry.RegisterPeriodicThresholdFunc(name, period, threshold, check)
}
// StatusHandler returns a JSON blob with all the currently registered Health Checks
// and their corresponding status.
// Returns 503 if any Error status exists, 200 otherwise
func StatusHandler(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
checks := CheckStatus()
status := http.StatusOK
// If there is an error, return 503
if len(checks) != 0 {
status = http.StatusServiceUnavailable
}
statusResponse(w, r, status, checks)
} else {
http.NotFound(w, r)
}
}
// Handler returns a handler that will return 503 response code if the health
// checks have failed. If everything is okay with the health checks, the
// handler will pass through to the provided handler. Use this handler to
// disable a web application when the health checks fail.
func Handler(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
checks := CheckStatus()
if len(checks) != 0 {
errcode.ServeJSON(w, errcode.ErrorCodeUnavailable.
WithDetail("health check failed: please see /debug/health"))
return
}
handler.ServeHTTP(w, r) // pass through
})
}
// statusResponse completes the request with a response describing the health
// of the service.
func statusResponse(w http.ResponseWriter, r *http.Request, status int, checks map[string]string) {
p, err := json.Marshal(checks)
if err != nil {
context.GetLogger(context.Background()).Errorf("error serializing health status: %v", err)
p, err = json.Marshal(struct {
ServerError string `json:"server_error"`
}{
ServerError: "Could not parse error message",
})
status = http.StatusInternalServerError
if err != nil {
context.GetLogger(context.Background()).Errorf("error serializing health status failure message: %v", err)
return
}
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("Content-Length", fmt.Sprint(len(p)))
w.WriteHeader(status)
if _, err := w.Write(p); err != nil {
context.GetLogger(context.Background()).Errorf("error writing health status response body: %v", err)
}
}
// Registers global /debug/health api endpoint, creates default registry
func init() {
DefaultRegistry = NewRegistry()
http.HandleFunc("/debug/health", StatusHandler)
}

View file

@ -0,0 +1,107 @@
package health
import (
"errors"
"fmt"
"net/http"
"net/http/httptest"
"testing"
)
// TestReturns200IfThereAreNoChecks ensures that the result code of the health
// endpoint is 200 if there are not currently registered checks.
func TestReturns200IfThereAreNoChecks(t *testing.T) {
recorder := httptest.NewRecorder()
req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil)
if err != nil {
t.Errorf("Failed to create request.")
}
StatusHandler(recorder, req)
if recorder.Code != 200 {
t.Errorf("Did not get a 200.")
}
}
// TestReturns500IfThereAreErrorChecks ensures that the result code of the
// health endpoint is 500 if there are health checks with errors
func TestReturns503IfThereAreErrorChecks(t *testing.T) {
recorder := httptest.NewRecorder()
req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil)
if err != nil {
t.Errorf("Failed to create request.")
}
// Create a manual error
Register("some_check", CheckFunc(func() error {
return errors.New("This Check did not succeed")
}))
StatusHandler(recorder, req)
if recorder.Code != 503 {
t.Errorf("Did not get a 503.")
}
}
// TestHealthHandler ensures that our handler implementation correct protects
// the web application when things aren't so healthy.
func TestHealthHandler(t *testing.T) {
// clear out existing checks.
DefaultRegistry = NewRegistry()
// protect an http server
handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
}))
// wrap it in our health handler
handler = Handler(handler)
// use this swap check status
updater := NewStatusUpdater()
Register("test_check", updater)
// now, create a test server
server := httptest.NewServer(handler)
checkUp := func(t *testing.T, message string) {
resp, err := http.Get(server.URL)
if err != nil {
t.Fatalf("error getting success status: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
t.Fatalf("unexpected response code from server when %s: %d != %d", message, resp.StatusCode, http.StatusNoContent)
}
// NOTE(stevvooe): we really don't care about the body -- the format is
// not standardized or supported, yet.
}
checkDown := func(t *testing.T, message string) {
resp, err := http.Get(server.URL)
if err != nil {
t.Fatalf("error getting down status: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusServiceUnavailable {
t.Fatalf("unexpected response code from server when %s: %d != %d", message, resp.StatusCode, http.StatusServiceUnavailable)
}
}
// server should be up
checkUp(t, "initial health check")
// now, we fail the health check
updater.Update(fmt.Errorf("the server is now out of commission"))
checkDown(t, "server should be down") // should be down
// bring server back up
updater.Update(nil)
checkUp(t, "when server is back up") // now we should be back up.
}

View file

@ -0,0 +1 @@
package manifest

View file

@ -0,0 +1,155 @@
package manifestlist
import (
"encoding/json"
"errors"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest"
)
// MediaTypeManifestList specifies the mediaType for manifest lists.
const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json"
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
var SchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: MediaTypeManifestList,
}
func init() {
manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
m := new(DeserializedManifestList)
err := m.UnmarshalJSON(b)
if err != nil {
return nil, distribution.Descriptor{}, err
}
dgst := digest.FromBytes(b)
return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err
}
err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
// PlatformSpec specifies a platform where a particular image manifest is
// applicable.
type PlatformSpec struct {
// Architecture field specifies the CPU architecture, for example
// `amd64` or `ppc64`.
Architecture string `json:"architecture"`
// OS specifies the operating system, for example `linux` or `windows`.
OS string `json:"os"`
// OSVersion is an optional field specifying the operating system
// version, for example `10.0.10586`.
OSVersion string `json:"os.version,omitempty"`
// OSFeatures is an optional field specifying an array of strings,
// each listing a required OS feature (for example on Windows `win32k`).
OSFeatures []string `json:"os.features,omitempty"`
// Variant is an optional field specifying a variant of the CPU, for
// example `ppc64le` to specify a little-endian version of a PowerPC CPU.
Variant string `json:"variant,omitempty"`
// Features is an optional field specifying an array of strings, each
// listing a required CPU feature (for example `sse4` or `aes`).
Features []string `json:"features,omitempty"`
}
// A ManifestDescriptor references a platform-specific manifest.
type ManifestDescriptor struct {
distribution.Descriptor
// Platform specifies which platform the manifest pointed to by the
// descriptor runs on.
Platform PlatformSpec `json:"platform"`
}
// ManifestList references manifests for various platforms.
type ManifestList struct {
manifest.Versioned
// Config references the image configuration as a blob.
Manifests []ManifestDescriptor `json:"manifests"`
}
// References returnes the distribution descriptors for the referenced image
// manifests.
func (m ManifestList) References() []distribution.Descriptor {
dependencies := make([]distribution.Descriptor, len(m.Manifests))
for i := range m.Manifests {
dependencies[i] = m.Manifests[i].Descriptor
}
return dependencies
}
// DeserializedManifestList wraps ManifestList with a copy of the original
// JSON.
type DeserializedManifestList struct {
ManifestList
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromDescriptors takes a slice of descriptors, and returns a
// DeserializedManifestList which contains the resulting manifest list
// and its JSON representation.
func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) {
m := ManifestList{
Versioned: SchemaVersion,
}
m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors))
copy(m.Manifests, descriptors)
deserialized := DeserializedManifestList{
ManifestList: m,
}
var err error
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new ManifestList struct from JSON data.
func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b), len(b))
// store manifest list in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into ManifestList object
var manifestList ManifestList
if err := json.Unmarshal(m.canonical, &manifestList); err != nil {
return err
}
m.ManifestList = manifestList
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedManifestList")
}
// Payload returns the raw content of the manifest list. The contents can be
// used to calculate the content identifier.
func (m DeserializedManifestList) Payload() (string, []byte, error) {
return m.MediaType, m.canonical, nil
}

View file

@ -0,0 +1,111 @@
package manifestlist
import (
"bytes"
"encoding/json"
"reflect"
"testing"
"github.com/docker/distribution"
)
var expectedManifestListSerialization = []byte(`{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 985,
"digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
"platform": {
"architecture": "amd64",
"os": "linux",
"features": [
"sse4"
]
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 2392,
"digest": "sha256:6346340964309634683409684360934680934608934608934608934068934608",
"platform": {
"architecture": "sun4m",
"os": "sunos"
}
}
]
}`)
func TestManifestList(t *testing.T) {
manifestDescriptors := []ManifestDescriptor{
{
Descriptor: distribution.Descriptor{
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
Size: 985,
MediaType: "application/vnd.docker.distribution.manifest.v2+json",
},
Platform: PlatformSpec{
Architecture: "amd64",
OS: "linux",
Features: []string{"sse4"},
},
},
{
Descriptor: distribution.Descriptor{
Digest: "sha256:6346340964309634683409684360934680934608934608934608934068934608",
Size: 2392,
MediaType: "application/vnd.docker.distribution.manifest.v2+json",
},
Platform: PlatformSpec{
Architecture: "sun4m",
OS: "sunos",
},
},
}
deserialized, err := FromDescriptors(manifestDescriptors)
if err != nil {
t.Fatalf("error creating DeserializedManifestList: %v", err)
}
mediaType, canonical, err := deserialized.Payload()
if mediaType != MediaTypeManifestList {
t.Fatalf("unexpected media type: %s", mediaType)
}
// Check that the canonical field is the same as json.MarshalIndent
// with these parameters.
p, err := json.MarshalIndent(&deserialized.ManifestList, "", " ")
if err != nil {
t.Fatalf("error marshaling manifest list: %v", err)
}
if !bytes.Equal(p, canonical) {
t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p))
}
// Check that the canonical field has the expected value.
if !bytes.Equal(expectedManifestListSerialization, canonical) {
t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedManifestListSerialization))
}
var unmarshalled DeserializedManifestList
if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil {
t.Fatalf("error unmarshaling manifest: %v", err)
}
if !reflect.DeepEqual(&unmarshalled, deserialized) {
t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized)
}
references := deserialized.References()
if len(references) != 2 {
t.Fatalf("unexpected number of references: %d", len(references))
}
for i := range references {
if !reflect.DeepEqual(references[i], manifestDescriptors[i].Descriptor) {
t.Fatalf("unexpected value %d returned by References: %v", i, references[i])
}
}
}

View file

@ -0,0 +1,282 @@
package schema1
import (
"crypto/sha512"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/reference"
"github.com/docker/libtrust"
)
type diffID digest.Digest
// gzippedEmptyTar is a gzip-compressed version of an empty tar file
// (1024 NULL bytes)
var gzippedEmptyTar = []byte{
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
}
// digestSHA256GzippedEmptyTar is the canonical sha256 digest of
// gzippedEmptyTar
const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
// configManifestBuilder is a type for constructing manifests from an image
// configuration and generic descriptors.
type configManifestBuilder struct {
// bs is a BlobService used to create empty layer tars in the
// blob store if necessary.
bs distribution.BlobService
// pk is the libtrust private key used to sign the final manifest.
pk libtrust.PrivateKey
// configJSON is configuration supplied when the ManifestBuilder was
// created.
configJSON []byte
// ref contains the name and optional tag provided to NewConfigManifestBuilder.
ref reference.Named
// descriptors is the set of descriptors referencing the layers.
descriptors []distribution.Descriptor
// emptyTarDigest is set to a valid digest if an empty tar has been
// put in the blob store; otherwise it is empty.
emptyTarDigest digest.Digest
}
// NewConfigManifestBuilder is used to build new manifests for the current
// schema version from an image configuration and a set of descriptors.
// It takes a BlobService so that it can add an empty tar to the blob store
// if the resulting manifest needs empty layers.
func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder {
return &configManifestBuilder{
bs: bs,
pk: pk,
configJSON: configJSON,
ref: ref,
}
}
// Build produces a final manifest from the given references
func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) {
type imageRootFS struct {
Type string `json:"type"`
DiffIDs []diffID `json:"diff_ids,omitempty"`
BaseLayer string `json:"base_layer,omitempty"`
}
type imageHistory struct {
Created time.Time `json:"created"`
Author string `json:"author,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
Comment string `json:"comment,omitempty"`
EmptyLayer bool `json:"empty_layer,omitempty"`
}
type imageConfig struct {
RootFS *imageRootFS `json:"rootfs,omitempty"`
History []imageHistory `json:"history,omitempty"`
Architecture string `json:"architecture,omitempty"`
}
var img imageConfig
if err := json.Unmarshal(mb.configJSON, &img); err != nil {
return nil, err
}
if len(img.History) == 0 {
return nil, errors.New("empty history when trying to create schema1 manifest")
}
if len(img.RootFS.DiffIDs) != len(mb.descriptors) {
return nil, fmt.Errorf("number of descriptors and number of layers in rootfs must match: len(%v) != len(%v)", img.RootFS.DiffIDs, mb.descriptors)
}
// Generate IDs for each layer
// For non-top-level layers, create fake V1Compatibility strings that
// fit the format and don't collide with anything else, but don't
// result in runnable images on their own.
type v1Compatibility struct {
ID string `json:"id"`
Parent string `json:"parent,omitempty"`
Comment string `json:"comment,omitempty"`
Created time.Time `json:"created"`
ContainerConfig struct {
Cmd []string
} `json:"container_config,omitempty"`
Author string `json:"author,omitempty"`
ThrowAway bool `json:"throwaway,omitempty"`
}
fsLayerList := make([]FSLayer, len(img.History))
history := make([]History, len(img.History))
parent := ""
layerCounter := 0
for i, h := range img.History[:len(img.History)-1] {
var blobsum digest.Digest
if h.EmptyLayer {
if blobsum, err = mb.emptyTar(ctx); err != nil {
return nil, err
}
} else {
if len(img.RootFS.DiffIDs) <= layerCounter {
return nil, errors.New("too many non-empty layers in History section")
}
blobsum = mb.descriptors[layerCounter].Digest
layerCounter++
}
v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex()
if i == 0 && img.RootFS.BaseLayer != "" {
// windows-only baselayer setup
baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer))
parent = fmt.Sprintf("%x", baseID[:32])
}
v1Compatibility := v1Compatibility{
ID: v1ID,
Parent: parent,
Comment: h.Comment,
Created: h.Created,
Author: h.Author,
}
v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
if h.EmptyLayer {
v1Compatibility.ThrowAway = true
}
jsonBytes, err := json.Marshal(&v1Compatibility)
if err != nil {
return nil, err
}
reversedIndex := len(img.History) - i - 1
history[reversedIndex].V1Compatibility = string(jsonBytes)
fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum}
parent = v1ID
}
latestHistory := img.History[len(img.History)-1]
var blobsum digest.Digest
if latestHistory.EmptyLayer {
if blobsum, err = mb.emptyTar(ctx); err != nil {
return nil, err
}
} else {
if len(img.RootFS.DiffIDs) <= layerCounter {
return nil, errors.New("too many non-empty layers in History section")
}
blobsum = mb.descriptors[layerCounter].Digest
}
fsLayerList[0] = FSLayer{BlobSum: blobsum}
dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON)))
// Top-level v1compatibility string should be a modified version of the
// image config.
transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer)
if err != nil {
return nil, err
}
history[0].V1Compatibility = string(transformedConfig)
tag := ""
if tagged, isTagged := mb.ref.(reference.Tagged); isTagged {
tag = tagged.Tag()
}
mfst := Manifest{
Versioned: manifest.Versioned{
SchemaVersion: 1,
},
Name: mb.ref.Name(),
Tag: tag,
Architecture: img.Architecture,
FSLayers: fsLayerList,
History: history,
}
return Sign(&mfst, mb.pk)
}
// emptyTar pushes a compressed empty tar to the blob store if one doesn't
// already exist, and returns its blobsum.
func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) {
if mb.emptyTarDigest != "" {
// Already put an empty tar
return mb.emptyTarDigest, nil
}
descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar)
switch err {
case nil:
mb.emptyTarDigest = descriptor.Digest
return descriptor.Digest, nil
case distribution.ErrBlobUnknown:
// nop
default:
return "", err
}
// Add gzipped empty tar to the blob store
descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar)
if err != nil {
return "", err
}
mb.emptyTarDigest = descriptor.Digest
return descriptor.Digest, nil
}
// AppendReference adds a reference to the current ManifestBuilder
func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error {
// todo: verification here?
mb.descriptors = append(mb.descriptors, d.Descriptor())
return nil
}
// References returns the current references added to this builder
func (mb *configManifestBuilder) References() []distribution.Descriptor {
return mb.descriptors
}
// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON
func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
// Top-level v1compatibility string should be a modified version of the
// image config.
var configAsMap map[string]*json.RawMessage
if err := json.Unmarshal(configJSON, &configAsMap); err != nil {
return nil, err
}
// Delete fields that didn't exist in old manifest
delete(configAsMap, "rootfs")
delete(configAsMap, "history")
configAsMap["id"] = rawJSON(v1ID)
if parentV1ID != "" {
configAsMap["parent"] = rawJSON(parentV1ID)
}
if throwaway {
configAsMap["throwaway"] = rawJSON(true)
}
return json.Marshal(configAsMap)
}
func rawJSON(value interface{}) *json.RawMessage {
jsonval, err := json.Marshal(value)
if err != nil {
return nil
}
return (*json.RawMessage)(&jsonval)
}

View file

@ -0,0 +1,272 @@
package schema1
import (
"bytes"
"compress/gzip"
"io"
"reflect"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
"github.com/docker/libtrust"
)
type mockBlobService struct {
descriptors map[digest.Digest]distribution.Descriptor
}
func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
if descriptor, ok := bs.descriptors[dgst]; ok {
return descriptor, nil
}
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
func (bs *mockBlobService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
panic("not implemented")
}
func (bs *mockBlobService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
panic("not implemented")
}
func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
d := distribution.Descriptor{
Digest: digest.FromBytes(p),
Size: int64(len(p)),
MediaType: mediaType,
}
bs.descriptors[d.Digest] = d
return d, nil
}
func (bs *mockBlobService) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
panic("not implemented")
}
func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
panic("not implemented")
}
func TestEmptyTar(t *testing.T) {
// Confirm that gzippedEmptyTar expands to 1024 NULL bytes.
var decompressed [2048]byte
gzipReader, err := gzip.NewReader(bytes.NewReader(gzippedEmptyTar))
if err != nil {
t.Fatalf("NewReader returned error: %v", err)
}
n, err := gzipReader.Read(decompressed[:])
if n != 1024 {
t.Fatalf("read returned %d bytes; expected 1024", n)
}
n, err = gzipReader.Read(decompressed[1024:])
if n != 0 {
t.Fatalf("read returned %d bytes; expected 0", n)
}
if err != io.EOF {
t.Fatal("read did not return io.EOF")
}
gzipReader.Close()
for _, b := range decompressed[:1024] {
if b != 0 {
t.Fatal("nonzero byte in decompressed tar")
}
}
// Confirm that digestSHA256EmptyTar is the digest of gzippedEmptyTar.
dgst := digest.FromBytes(gzippedEmptyTar)
if dgst != digestSHA256GzippedEmptyTar {
t.Fatalf("digest mismatch for empty tar: expected %s got %s", digestSHA256GzippedEmptyTar, dgst)
}
}
func TestConfigBuilder(t *testing.T) {
imgJSON := `{
"architecture": "amd64",
"config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/bin/sh",
"-c",
"echo hi"
],
"Domainname": "",
"Entrypoint": null,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"derived=true",
"asdf=true"
],
"Hostname": "23304fc829f9",
"Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
"Labels": {},
"OnBuild": [],
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": null,
"WorkingDir": ""
},
"container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001",
"container_config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/bin/sh",
"-c",
"#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"
],
"Domainname": "",
"Entrypoint": null,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"derived=true",
"asdf=true"
],
"Hostname": "23304fc829f9",
"Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
"Labels": {},
"OnBuild": [],
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": null,
"WorkingDir": ""
},
"created": "2015-11-04T23:06:32.365666163Z",
"docker_version": "1.9.0-dev",
"history": [
{
"created": "2015-10-31T22:22:54.690851953Z",
"created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
},
{
"created": "2015-10-31T22:22:55.613815829Z",
"created_by": "/bin/sh -c #(nop) CMD [\"sh\"]"
},
{
"created": "2015-11-04T23:06:30.934316144Z",
"created_by": "/bin/sh -c #(nop) ENV derived=true",
"empty_layer": true
},
{
"created": "2015-11-04T23:06:31.192097572Z",
"created_by": "/bin/sh -c #(nop) ENV asdf=true",
"empty_layer": true
},
{
"author": "Alyssa P. Hacker \u003calyspdev@example.com\u003e",
"created": "2015-11-04T23:06:32.083868454Z",
"created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"
},
{
"created": "2015-11-04T23:06:32.365666163Z",
"created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]",
"empty_layer": true
}
],
"os": "linux",
"rootfs": {
"diff_ids": [
"sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
"sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
"sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
],
"type": "layers"
}
}`
descriptors := []distribution.Descriptor{
{Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
{Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
{Digest: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
}
pk, err := libtrust.GenerateECP256PrivateKey()
if err != nil {
t.Fatalf("could not generate key for testing: %v", err)
}
bs := &mockBlobService{descriptors: make(map[digest.Digest]distribution.Descriptor)}
ref, err := reference.ParseNamed("testrepo:testtag")
if err != nil {
t.Fatalf("could not parse reference: %v", err)
}
builder := NewConfigManifestBuilder(bs, pk, ref, []byte(imgJSON))
for _, d := range descriptors {
if err := builder.AppendReference(d); err != nil {
t.Fatalf("AppendReference returned error: %v", err)
}
}
signed, err := builder.Build(context.Background())
if err != nil {
t.Fatalf("Build returned error: %v", err)
}
// Check that the gzipped empty layer tar was put in the blob store
_, err = bs.Stat(context.Background(), digestSHA256GzippedEmptyTar)
if err != nil {
t.Fatal("gzipped empty tar was not put in the blob store")
}
manifest := signed.(*SignedManifest).Manifest
if manifest.Versioned.SchemaVersion != 1 {
t.Fatal("SchemaVersion != 1")
}
if manifest.Name != "testrepo" {
t.Fatal("incorrect name in manifest")
}
if manifest.Tag != "testtag" {
t.Fatal("incorrect tag in manifest")
}
if manifest.Architecture != "amd64" {
t.Fatal("incorrect arch in manifest")
}
expectedFSLayers := []FSLayer{
{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
{BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
}
if len(manifest.FSLayers) != len(expectedFSLayers) {
t.Fatalf("wrong number of FSLayers: %d", len(manifest.FSLayers))
}
if !reflect.DeepEqual(manifest.FSLayers, expectedFSLayers) {
t.Fatal("wrong FSLayers list")
}
expectedV1Compatibility := []string{
`{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"69e5c1bfadad697fdb6db59f6326648fa119e0c031a0eda33b8cfadcab54ba7f","os":"linux","parent":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","throwaway":true}`,
`{"id":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","parent":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]},"author":"Alyssa P. Hacker \u003calyspdev@example.com\u003e"}`,
`{"id":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","parent":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV asdf=true"]},"throwaway":true}`,
`{"id":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV derived=true"]},"throwaway":true}`,
`{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\"sh\"]"]}}`,
`{"id":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:54.690851953Z","container_config":{"Cmd":["/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"]}}`,
}
if len(manifest.History) != len(expectedV1Compatibility) {
t.Fatalf("wrong number of history entries: %d", len(manifest.History))
}
for i := range expectedV1Compatibility {
if manifest.History[i].V1Compatibility != expectedV1Compatibility[i] {
t.Errorf("wrong V1Compatibility %d. expected:\n%s\ngot:\n%s", i, expectedV1Compatibility[i], manifest.History[i].V1Compatibility)
}
}
}

Some files were not shown because too many files have changed in this diff Show more