forked from TrueCloudLab/distribution
Compare commits
51 commits
main
...
release/2.
Author | SHA1 | Date | |
---|---|---|---|
|
3b7b534569 | ||
|
afe85428bb | ||
|
f7365390ef | ||
|
97f6daced4 | ||
|
4313c14723 | ||
|
9a3ff11330 | ||
|
10ade61de9 | ||
|
691e62e7ef | ||
|
19b573a6f7 | ||
|
c5679da3a1 | ||
|
61e7e20823 | ||
|
d836b23fc2 | ||
|
18230b7b34 | ||
|
51636a6711 | ||
|
09109ab50a | ||
|
89e6568e34 | ||
|
3c64ff10bb | ||
|
f807afbf85 | ||
|
9142de99fa | ||
|
cc341b0110 | ||
|
cc866a5bf3 | ||
|
3fe1d67ace | ||
|
6300300270 | ||
|
f1bd655119 | ||
|
d7362d7e3a | ||
|
cf8615dedf | ||
|
70e0022e42 | ||
|
48eeac88e9 | ||
|
a45a401e97 | ||
|
e2f006ac2b | ||
|
0a1e4a57e2 | ||
|
bdf503a444 | ||
|
be75da0ef2 | ||
|
afa91463d6 | ||
|
fad36ed1a1 | ||
|
cfd1309845 | ||
|
a85caead04 | ||
|
f999f540d3 | ||
|
c636ed788a | ||
|
5883e2d935 | ||
|
269d18d9a8 | ||
|
a3c027e626 | ||
|
2461543d98 | ||
|
5b98226afe | ||
|
2eab12df9b | ||
|
445ef068dd | ||
|
cbc30be414 | ||
|
bf74e4f91d | ||
|
62994fdd12 | ||
|
e702d95cfd | ||
|
caf43bbcc2 |
112 changed files with 1438 additions and 566 deletions
50
.github/workflows/ci.yml
vendored
Normal file
50
.github/workflows/ci.yml
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DOCKER_BUILDTAGS: "include_oss include_gcs"
|
||||
CGO_ENABLED: 1
|
||||
GO111MODULE: "auto"
|
||||
GOPATH: ${{ github.workspace }}
|
||||
GOOS: linux
|
||||
COMMIT_RANGE: ${{ github.event_name == 'pull_request' && format('{0}..{1}',github.event.pull_request.base.sha, github.event.pull_request.head.sha) || github.sha }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: src/github.com/docker/distribution
|
||||
fetch-depth: 50
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.*
|
||||
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get -q update
|
||||
sudo -E apt-get -yq --no-install-suggests --no-install-recommends install python2-minimal
|
||||
cd /tmp && go get -u github.com/vbatts/git-validation
|
||||
|
||||
- name: Build
|
||||
working-directory: ./src/github.com/docker/distribution
|
||||
run: |
|
||||
DCO_VERBOSITY=-q script/validate/dco
|
||||
GO111MODULE=on script/setup/install-dev-tools
|
||||
script/validate/vendor
|
||||
go build -i .
|
||||
make check
|
||||
make build
|
||||
make binaries
|
||||
if [ "$GOOS" = "linux" ]; then make coverage ; fi
|
||||
|
||||
- uses: codecov/codecov-action@v1
|
||||
with:
|
||||
directory: ./src/github.com/docker/distribution
|
20
.golangci.yml
Normal file
20
.golangci.yml
Normal file
|
@ -0,0 +1,20 @@
|
|||
linters:
|
||||
enable:
|
||||
- structcheck
|
||||
- varcheck
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- ineffassign
|
||||
- vet
|
||||
- unused
|
||||
- misspell
|
||||
disable:
|
||||
- errcheck
|
||||
|
||||
run:
|
||||
deadline: 2m
|
||||
skip-dirs:
|
||||
- vendor
|
|
@ -1,16 +0,0 @@
|
|||
{
|
||||
"Vendor": true,
|
||||
"Deadline": "2m",
|
||||
"Sort": ["linter", "severity", "path", "line"],
|
||||
"EnableGC": true,
|
||||
"Enable": [
|
||||
"structcheck",
|
||||
"staticcheck",
|
||||
"unconvert",
|
||||
|
||||
"gofmt",
|
||||
"goimports",
|
||||
"golint",
|
||||
"vet"
|
||||
]
|
||||
}
|
51
.travis.yml
51
.travis.yml
|
@ -1,51 +0,0 @@
|
|||
dist: trusty
|
||||
sudo: required
|
||||
# setup travis so that we can run containers for integration tests
|
||||
services:
|
||||
- docker
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- "1.11.x"
|
||||
|
||||
go_import_path: github.com/docker/distribution
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- python-minimal
|
||||
|
||||
|
||||
env:
|
||||
- TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1
|
||||
|
||||
before_install:
|
||||
- uname -r
|
||||
- sudo apt-get -q update
|
||||
|
||||
install:
|
||||
- go get -u github.com/vbatts/git-validation
|
||||
# TODO: Add enforcement of license
|
||||
# - go get -u github.com/kunalkushwaha/ltag
|
||||
- cd $TRAVIS_BUILD_DIR
|
||||
|
||||
script:
|
||||
- export GOOS=$TRAVIS_GOOS
|
||||
- export CGO_ENABLED=$TRAVIS_CGO_ENABLED
|
||||
- DCO_VERBOSITY=-q script/validate/dco
|
||||
- GOOS=linux script/setup/install-dev-tools
|
||||
- script/validate/vendor
|
||||
- go build -i .
|
||||
- make check
|
||||
- make build
|
||||
- make binaries
|
||||
# Currently takes too long
|
||||
#- if [ "$GOOS" = "linux" ]; then make test-race ; fi
|
||||
- if [ "$GOOS" = "linux" ]; then make coverage ; fi
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash) -F linux
|
||||
|
||||
before_deploy:
|
||||
# Run tests with storage driver configurations
|
|
@ -1,7 +1,8 @@
|
|||
FROM golang:1.11-alpine AS build
|
||||
FROM golang:1.16-alpine AS build
|
||||
|
||||
ENV GO111MODULE=auto
|
||||
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
||||
ENV DOCKER_BUILDTAGS include_oss include_gcs
|
||||
ENV BUILDTAGS include_oss include_gcs
|
||||
|
||||
ARG GOOS=linux
|
||||
ARG GOARCH=amd64
|
||||
|
|
2
Makefile
2
Makefile
|
@ -50,7 +50,7 @@ version/version.go:
|
|||
|
||||
check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck")
|
||||
@echo "$(WHALE) $@"
|
||||
gometalinter --config .gometalinter.json ./...
|
||||
golangci-lint run
|
||||
|
||||
test: ## run tests, except integration test with test.short
|
||||
@echo "$(WHALE) $@"
|
||||
|
|
2
blobs.go
2
blobs.go
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"text/template"
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
)
|
||||
|
||||
var spaceRegex = regexp.MustCompile(`\n\s*`)
|
||||
|
|
|
@ -108,6 +108,12 @@ type Configuration struct {
|
|||
// A file may contain multiple CA certificates encoded as PEM
|
||||
ClientCAs []string `yaml:"clientcas,omitempty"`
|
||||
|
||||
// Specifies the lowest TLS version allowed
|
||||
MinimumTLS string `yaml:"minimumtls,omitempty"`
|
||||
|
||||
// Specifies a list of cipher suites allowed
|
||||
CipherSuites []string `yaml:"ciphersuites,omitempty"`
|
||||
|
||||
// LetsEncrypt is used to configuration setting up TLS through
|
||||
// Let's Encrypt instead of manually specifying certificate and
|
||||
// key. If a TLS certificate is specified, the Let's Encrypt
|
||||
|
@ -388,7 +394,7 @@ func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error
|
|||
switch loglevelString {
|
||||
case "error", "warn", "info", "debug":
|
||||
default:
|
||||
return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString)
|
||||
return fmt.Errorf("invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString)
|
||||
}
|
||||
|
||||
*loglevel = Loglevel(loglevelString)
|
||||
|
@ -463,7 +469,7 @@ func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
}
|
||||
|
||||
if len(types) > 1 {
|
||||
return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types)
|
||||
return fmt.Errorf("must provide exactly one storage type. Provided: %v", types)
|
||||
}
|
||||
}
|
||||
*storage = storageMap
|
||||
|
@ -665,11 +671,11 @@ func Parse(rd io.Reader) (*Configuration, error) {
|
|||
v0_1.Loglevel = Loglevel("")
|
||||
}
|
||||
if v0_1.Storage.Type() == "" {
|
||||
return nil, errors.New("No storage configuration provided")
|
||||
return nil, errors.New("no storage configuration provided")
|
||||
}
|
||||
return (*Configuration)(v0_1), nil
|
||||
}
|
||||
return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c)
|
||||
return nil, fmt.Errorf("expected *v0_1Configuration, received %#v", c)
|
||||
},
|
||||
},
|
||||
})
|
||||
|
|
|
@ -80,10 +80,12 @@ var configStruct = Configuration{
|
|||
RelativeURLs bool `yaml:"relativeurls,omitempty"`
|
||||
DrainTimeout time.Duration `yaml:"draintimeout,omitempty"`
|
||||
TLS struct {
|
||||
Certificate string `yaml:"certificate,omitempty"`
|
||||
Key string `yaml:"key,omitempty"`
|
||||
ClientCAs []string `yaml:"clientcas,omitempty"`
|
||||
LetsEncrypt struct {
|
||||
Certificate string `yaml:"certificate,omitempty"`
|
||||
Key string `yaml:"key,omitempty"`
|
||||
ClientCAs []string `yaml:"clientcas,omitempty"`
|
||||
MinimumTLS string `yaml:"minimumtls,omitempty"`
|
||||
CipherSuites []string `yaml:"ciphersuites,omitempty"`
|
||||
LetsEncrypt struct {
|
||||
CacheFile string `yaml:"cachefile,omitempty"`
|
||||
Email string `yaml:"email,omitempty"`
|
||||
Hosts []string `yaml:"hosts,omitempty"`
|
||||
|
@ -102,10 +104,12 @@ var configStruct = Configuration{
|
|||
} `yaml:"http2,omitempty"`
|
||||
}{
|
||||
TLS: struct {
|
||||
Certificate string `yaml:"certificate,omitempty"`
|
||||
Key string `yaml:"key,omitempty"`
|
||||
ClientCAs []string `yaml:"clientcas,omitempty"`
|
||||
LetsEncrypt struct {
|
||||
Certificate string `yaml:"certificate,omitempty"`
|
||||
Key string `yaml:"key,omitempty"`
|
||||
ClientCAs []string `yaml:"clientcas,omitempty"`
|
||||
MinimumTLS string `yaml:"minimumtls,omitempty"`
|
||||
CipherSuites []string `yaml:"ciphersuites,omitempty"`
|
||||
LetsEncrypt struct {
|
||||
CacheFile string `yaml:"cachefile,omitempty"`
|
||||
Email string `yaml:"email,omitempty"`
|
||||
Hosts []string `yaml:"hosts,omitempty"`
|
||||
|
@ -540,9 +544,7 @@ func copyConfig(config Configuration) *Configuration {
|
|||
}
|
||||
|
||||
configCopy.Notifications = Notifications{Endpoints: []Endpoint{}}
|
||||
for _, v := range config.Notifications.Endpoints {
|
||||
configCopy.Notifications.Endpoints = append(configCopy.Notifications.Endpoints, v)
|
||||
}
|
||||
configCopy.Notifications.Endpoints = append(configCopy.Notifications.Endpoints, config.Notifications.Endpoints...)
|
||||
|
||||
configCopy.HTTP.Headers = make(http.Header)
|
||||
for k, v := range config.HTTP.Headers {
|
||||
|
|
|
@ -122,7 +122,7 @@ func (p *Parser) Parse(in []byte, v interface{}) error {
|
|||
|
||||
parseInfo, ok := p.mapping[versionedStruct.Version]
|
||||
if !ok {
|
||||
return fmt.Errorf("Unsupported version: %q", versionedStruct.Version)
|
||||
return fmt.Errorf("unsupported version: %q", versionedStruct.Version)
|
||||
}
|
||||
|
||||
parseAs := reflect.New(parseInfo.ParseAs)
|
||||
|
|
|
@ -2,9 +2,10 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"math/rand"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -141,8 +142,15 @@ const refreshTokenLength = 15
|
|||
|
||||
func newRefreshToken() string {
|
||||
s := make([]rune, refreshTokenLength)
|
||||
max := int64(len(refreshCharacters))
|
||||
for i := range s {
|
||||
s[i] = refreshCharacters[rand.Intn(len(refreshCharacters))]
|
||||
randInt, err := rand.Int(rand.Reader, big.NewInt(max))
|
||||
// let '0' serves the failure case
|
||||
if err != nil {
|
||||
logrus.Infof("Error on making refersh token: %v", err)
|
||||
randInt = big.NewInt(0)
|
||||
}
|
||||
s[i] = refreshCharacters[randInt.Int64()]
|
||||
}
|
||||
return string(s)
|
||||
}
|
||||
|
|
|
@ -703,15 +703,20 @@ interpretation of the options.
|
|||
| `baseurl` | yes | The `SCHEME://HOST[/PATH]` at which Cloudfront is served. |
|
||||
| `privatekey` | yes | The private key for Cloudfront, provided by AWS. |
|
||||
| `keypairid` | yes | The key pair ID provided by AWS. |
|
||||
| `duration` | no | An integer and unit for the duration of the Cloudfront session. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, or `h`. For example, `3000s` is valid, but `3000 s` is not. If you do not specify a `duration` or you specify an integer without a time unit, the duration defaults to `20m` (20 minutes).|
|
||||
|`ipfilteredby`|no | A string with the following value `none|aws|awsregion`. |
|
||||
|`awsregion`|no | A comma separated string of AWS regions, only available when `ipfilteredby` is `awsregion`. For example, `us-east-1, us-west-2`|
|
||||
|`updatefrenquency`|no | The frequency to update AWS IP regions, default: `12h`|
|
||||
|`iprangesurl`|no | The URL contains the AWS IP ranges information, default: `https://ip-ranges.amazonaws.com/ip-ranges.json`|
|
||||
Then value of ipfilteredby:
|
||||
`none`: default, do not filter by IP
|
||||
`aws`: IP from AWS goes to S3 directly
|
||||
`awsregion`: IP from certain AWS regions goes to S3 directly, use together with `awsregion`
|
||||
| `duration` | no | An integer and unit for the duration of the Cloudfront session. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, or `h`. For example, `3000s` is valid, but `3000 s` is not. If you do not specify a `duration` or you specify an integer without a time unit, the duration defaults to `20m` (20 minutes). |
|
||||
| `ipfilteredby` | no | A string with the following value `none`, `aws` or `awsregion`. |
|
||||
| `awsregion` | no | A comma separated string of AWS regions, only available when `ipfilteredby` is `awsregion`. For example, `us-east-1, us-west-2` |
|
||||
| `updatefrenquency` | no | The frequency to update AWS IP regions, default: `12h` |
|
||||
| `iprangesurl` | no | The URL contains the AWS IP ranges information, default: `https://ip-ranges.amazonaws.com/ip-ranges.json` |
|
||||
|
||||
|
||||
Value of `ipfilteredby` can be:
|
||||
|
||||
| Value | Description |
|
||||
|-------------|------------------------------------|
|
||||
| `none` | default, do not filter by IP |
|
||||
| `aws` | IP from AWS goes to S3 directly |
|
||||
| `awsregion` | IP from certain AWS regions goes to S3 directly, use together with `awsregion`. |
|
||||
|
||||
### `redirect`
|
||||
|
||||
|
@ -777,6 +782,10 @@ http:
|
|||
clientcas:
|
||||
- /path/to/ca.pem
|
||||
- /path/to/another/ca.pem
|
||||
minimumtls: tls1.2
|
||||
ciphersuites:
|
||||
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
|
||||
letsencrypt:
|
||||
cachefile: /path/to/cache-file
|
||||
email: emailused@letsencrypt.com
|
||||
|
@ -812,9 +821,49 @@ and proxy connections to the registry server.
|
|||
|
||||
| Parameter | Required | Description |
|
||||
|-----------|----------|-------------------------------------------------------|
|
||||
| `certificate` | yes | Absolute path to the x509 certificate file. |
|
||||
| `key` | yes | Absolute path to the x509 private key file. |
|
||||
| `clientcas` | no | An array of absolute paths to x509 CA files. |
|
||||
| `certificate` | yes | Absolute path to the x509 certificate file. |
|
||||
| `key` | yes | Absolute path to the x509 private key file. |
|
||||
| `clientcas` | no | An array of absolute paths to x509 CA files. |
|
||||
| `minimumtls` | no | Minimum TLS version allowed (tls1.0, tls1.1, tls1.2, tls1.3). Defaults to tls1.2 |
|
||||
| `ciphersuites` | no | Cipher suites allowed. Please see below for allowed values and default. |
|
||||
|
||||
Available cipher suites:
|
||||
- TLS_RSA_WITH_RC4_128_SHA
|
||||
- TLS_RSA_WITH_3DES_EDE_CBC_SHA
|
||||
- TLS_RSA_WITH_AES_128_CBC_SHA
|
||||
- TLS_RSA_WITH_AES_256_CBC_SHA
|
||||
- TLS_RSA_WITH_AES_128_CBC_SHA256
|
||||
- TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
- TLS_RSA_WITH_AES_256_GCM_SHA384
|
||||
- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
|
||||
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
|
||||
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
|
||||
- TLS_ECDHE_RSA_WITH_RC4_128_SHA
|
||||
- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
|
||||
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
|
||||
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
|
||||
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
|
||||
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
|
||||
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
|
||||
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
|
||||
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
|
||||
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
|
||||
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
|
||||
- TLS_AES_128_GCM_SHA256
|
||||
- TLS_AES_256_GCM_SHA384
|
||||
- TLS_CHACHA20_POLY1305_SHA256
|
||||
|
||||
Default cipher suites:
|
||||
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
|
||||
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
|
||||
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
|
||||
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
|
||||
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
|
||||
- TLS_AES_128_GCM_SHA256
|
||||
- TLS_CHACHA20_POLY1305_SHA256
|
||||
- TLS_AES_256_GCM_SHA384
|
||||
|
||||
### `letsencrypt`
|
||||
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
title: "HTTP API V2"
|
||||
description: "Specification for the Registry API."
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, api, advanced
|
||||
redirect_from:
|
||||
- /reference/api/registry_api/
|
||||
---
|
||||
|
||||
# Docker Registry HTTP API V2
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
title: "HTTP API V2"
|
||||
description: "Specification for the Registry API."
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, api, advanced
|
||||
redirect_from:
|
||||
- /reference/api/registry_api/
|
||||
---
|
||||
|
||||
# Docker Registry HTTP API V2
|
||||
|
|
41
docs/spec/deprecated-schema-v1.md
Normal file
41
docs/spec/deprecated-schema-v1.md
Normal file
|
@ -0,0 +1,41 @@
|
|||
---
|
||||
title: Update deprecated schema image manifest version 2, v1 images
|
||||
description: Update deprecated schema v1 iamges
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, api, advanced, manifest
|
||||
---
|
||||
|
||||
## Image manifest version 2, schema 1
|
||||
With the release of image manifest version 2, schema 2, image manifest version
|
||||
2, schema 1 has been deprecated. This could lead to compatibility and
|
||||
vulnerability issues in images that haven't been updated to image manifest
|
||||
version 2, schema 2.
|
||||
|
||||
This page contains information on how to update from image manifest version 2,
|
||||
schema 1. However, these instructions will not ensure your new image will run
|
||||
successfully. There may be several other issues to troubleshoot that are
|
||||
associated with the deprecated image manifest that will block your image from
|
||||
running succesfully. A list of possible methods to help update your image is
|
||||
also included below.
|
||||
|
||||
### Update to image manifest version 2, schema 2
|
||||
|
||||
One way to upgrade an image from image manifest version 2, schema 1 to
|
||||
schema 2 is to `docker pull` the image and then `docker push` the image with a
|
||||
current version of Docker. Doing so will automatically convert the image to use
|
||||
the latest image manifest specification.
|
||||
|
||||
Converting an image to image manifest version 2, schema 2 converts the
|
||||
manifest format, but does not update the contents within the image. Images
|
||||
using manifest version 2, schema 1 may contain unpatched vulnerabilities. We
|
||||
recommend looking for an alternative image or rebuilding it.
|
||||
|
||||
|
||||
### Update FROM statement
|
||||
|
||||
You can rebuild the image by updating the `FROM` statement in your
|
||||
`Dockerfile`. If your image manifest is out-of-date, there is a chance the
|
||||
image pulled from your `FROM` statement in your `Dockerfile` is also
|
||||
out-of-date. See the [Dockerfile reference](https://docs.docker.com/engine/reference/builder/#from)
|
||||
and the [Dockerfile best practices guide](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/)
|
||||
for more information on how to update the `FROM` statement in your
|
||||
`Dockerfile`.
|
|
@ -220,7 +220,7 @@ image. It's the direct replacement for the schema-1 manifest.
|
|||
- **`urls`** *array*
|
||||
|
||||
Provides a list of URLs from which the content may be fetched. Content
|
||||
should be verified against the `digest` and `size`. This field is
|
||||
must be verified against the `digest` and `size`. This field is
|
||||
optional and uncommon.
|
||||
|
||||
## Example Image Manifest
|
||||
|
|
|
@ -14,7 +14,7 @@ var (
|
|||
// DownHandler registers a manual_http_status that always returns an Error
|
||||
func DownHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "POST" {
|
||||
updater.Update(errors.New("Manual Check"))
|
||||
updater.Update(errors.New("manual Check"))
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -54,6 +54,9 @@ func init() {
|
|||
}
|
||||
|
||||
imageIndexFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
|
||||
if err := validateIndex(b); err != nil {
|
||||
return nil, distribution.Descriptor{}, err
|
||||
}
|
||||
m := new(DeserializedManifestList)
|
||||
err := m.UnmarshalJSON(b)
|
||||
if err != nil {
|
||||
|
@ -163,7 +166,7 @@ func FromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType st
|
|||
},
|
||||
}
|
||||
|
||||
m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors))
|
||||
m.Manifests = make([]ManifestDescriptor, len(descriptors))
|
||||
copy(m.Manifests, descriptors)
|
||||
|
||||
deserialized := DeserializedManifestList{
|
||||
|
@ -177,7 +180,7 @@ func FromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType st
|
|||
|
||||
// UnmarshalJSON populates a new ManifestList struct from JSON data.
|
||||
func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error {
|
||||
m.canonical = make([]byte, len(b), len(b))
|
||||
m.canonical = make([]byte, len(b))
|
||||
// store manifest list in canonical
|
||||
copy(m.canonical, b)
|
||||
|
||||
|
@ -214,3 +217,23 @@ func (m DeserializedManifestList) Payload() (string, []byte, error) {
|
|||
|
||||
return mediaType, m.canonical, nil
|
||||
}
|
||||
|
||||
// unknownDocument represents a manifest, manifest list, or index that has not
|
||||
// yet been validated
|
||||
type unknownDocument struct {
|
||||
Config interface{} `json:"config,omitempty"`
|
||||
Layers interface{} `json:"layers,omitempty"`
|
||||
}
|
||||
|
||||
// validateIndex returns an error if the byte slice is invalid JSON or if it
|
||||
// contains fields that belong to a manifest
|
||||
func validateIndex(b []byte) error {
|
||||
var doc unknownDocument
|
||||
if err := json.Unmarshal(b, &doc); err != nil {
|
||||
return err
|
||||
}
|
||||
if doc.Config != nil || doc.Layers != nil {
|
||||
return errors.New("index: expected index but found manifest")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -7,7 +7,9 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/docker/distribution/manifest/ocischema"
|
||||
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
var expectedManifestListSerialization = []byte(`{
|
||||
|
@ -303,3 +305,33 @@ func TestMediaTypes(t *testing.T) {
|
|||
mediaTypeTest(t, v1.MediaTypeImageIndex, v1.MediaTypeImageIndex, false)
|
||||
mediaTypeTest(t, v1.MediaTypeImageIndex, v1.MediaTypeImageIndex+"XXX", true)
|
||||
}
|
||||
|
||||
func TestValidateManifest(t *testing.T) {
|
||||
manifest := ocischema.Manifest{
|
||||
Config: distribution.Descriptor{Size: 1},
|
||||
Layers: []distribution.Descriptor{{Size: 2}},
|
||||
}
|
||||
index := ManifestList{
|
||||
Manifests: []ManifestDescriptor{
|
||||
{Descriptor: distribution.Descriptor{Size: 3}},
|
||||
},
|
||||
}
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
b, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected error marshaling index", err)
|
||||
}
|
||||
if err := validateIndex(b); err != nil {
|
||||
t.Error("index should be valid", err)
|
||||
}
|
||||
})
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
b, err := json.Marshal(manifest)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected error marshaling manifest", err)
|
||||
}
|
||||
if err := validateIndex(b); err == nil {
|
||||
t.Error("manifest should not be valid")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// Builder is a type for constructing manifests.
|
||||
|
@ -48,7 +48,7 @@ func NewManifestBuilder(bs distribution.BlobService, configJSON []byte, annotati
|
|||
// valid media type for oci image manifests currently: "" or "application/vnd.oci.image.manifest.v1+json"
|
||||
func (mb *Builder) SetMediaType(mediaType string) error {
|
||||
if mediaType != "" && mediaType != v1.MediaTypeImageManifest {
|
||||
return errors.New("Invalid media type for OCI image manifest")
|
||||
return errors.New("invalid media type for OCI image manifest")
|
||||
}
|
||||
|
||||
mb.mediaType = mediaType
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type mockBlobService struct {
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -22,6 +22,9 @@ var (
|
|||
|
||||
func init() {
|
||||
ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
|
||||
if err := validateManifest(b); err != nil {
|
||||
return nil, distribution.Descriptor{}, err
|
||||
}
|
||||
m := new(DeserializedManifest)
|
||||
err := m.UnmarshalJSON(b)
|
||||
if err != nil {
|
||||
|
@ -87,7 +90,7 @@ func FromStruct(m Manifest) (*DeserializedManifest, error) {
|
|||
|
||||
// UnmarshalJSON populates a new Manifest struct from JSON data.
|
||||
func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
|
||||
m.canonical = make([]byte, len(b), len(b))
|
||||
m.canonical = make([]byte, len(b))
|
||||
// store manifest in canonical
|
||||
copy(m.canonical, b)
|
||||
|
||||
|
@ -122,3 +125,22 @@ func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
|
|||
func (m DeserializedManifest) Payload() (string, []byte, error) {
|
||||
return v1.MediaTypeImageManifest, m.canonical, nil
|
||||
}
|
||||
|
||||
// unknownDocument represents a manifest, manifest list, or index that has not
|
||||
// yet been validated
|
||||
type unknownDocument struct {
|
||||
Manifests interface{} `json:"manifests,omitempty"`
|
||||
}
|
||||
|
||||
// validateManifest returns an error if the byte slice is invalid JSON or if it
|
||||
// contains fields that belong to a index
|
||||
func validateManifest(b []byte) error {
|
||||
var doc unknownDocument
|
||||
if err := json.Unmarshal(b, &doc); err != nil {
|
||||
return err
|
||||
}
|
||||
if doc.Manifests != nil {
|
||||
return errors.New("ocimanifest: expected manifest but found index")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -8,7 +8,9 @@ import (
|
|||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
var expectedManifestSerialization = []byte(`{
|
||||
|
@ -182,3 +184,33 @@ func TestMediaTypes(t *testing.T) {
|
|||
mediaTypeTest(t, v1.MediaTypeImageManifest, false)
|
||||
mediaTypeTest(t, v1.MediaTypeImageManifest+"XXX", true)
|
||||
}
|
||||
|
||||
func TestValidateManifest(t *testing.T) {
|
||||
manifest := Manifest{
|
||||
Config: distribution.Descriptor{Size: 1},
|
||||
Layers: []distribution.Descriptor{{Size: 2}},
|
||||
}
|
||||
index := manifestlist.ManifestList{
|
||||
Manifests: []manifestlist.ManifestDescriptor{
|
||||
{Descriptor: distribution.Descriptor{Size: 3}},
|
||||
},
|
||||
}
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
b, err := json.Marshal(manifest)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected error marshaling manifest", err)
|
||||
}
|
||||
if err := validateManifest(b); err != nil {
|
||||
t.Error("manifest should be valid", err)
|
||||
}
|
||||
})
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
b, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected error marshaling index", err)
|
||||
}
|
||||
if err := validateManifest(b); err == nil {
|
||||
t.Error("index should not be valid")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ type SignedManifest struct {
|
|||
|
||||
// UnmarshalJSON populates a new SignedManifest struct from JSON data.
|
||||
func (sm *SignedManifest) UnmarshalJSON(b []byte) error {
|
||||
sm.all = make([]byte, len(b), len(b))
|
||||
sm.all = make([]byte, len(b))
|
||||
// store manifest and signatures in all
|
||||
copy(sm.all, b)
|
||||
|
||||
|
@ -124,7 +124,7 @@ func (sm *SignedManifest) UnmarshalJSON(b []byte) error {
|
|||
}
|
||||
|
||||
// sm.Canonical stores the canonical manifest JSON
|
||||
sm.Canonical = make([]byte, len(bytes), len(bytes))
|
||||
sm.Canonical = make([]byte, len(bytes))
|
||||
copy(sm.Canonical, bytes)
|
||||
|
||||
// Unmarshal canonical JSON into Manifest object
|
||||
|
|
|
@ -58,7 +58,7 @@ func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Man
|
|||
func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error {
|
||||
r, ok := d.(Reference)
|
||||
if !ok {
|
||||
return fmt.Errorf("Unable to add non-reference type to v1 builder")
|
||||
return fmt.Errorf("unable to add non-reference type to v1 builder")
|
||||
}
|
||||
|
||||
// Entries need to be prepended
|
||||
|
|
|
@ -106,7 +106,7 @@ func FromStruct(m Manifest) (*DeserializedManifest, error) {
|
|||
|
||||
// UnmarshalJSON populates a new Manifest struct from JSON data.
|
||||
func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
|
||||
m.canonical = make([]byte, len(b), len(b))
|
||||
m.canonical = make([]byte, len(b))
|
||||
// store manifest in canonical
|
||||
copy(m.canonical, b)
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ func ManifestMediaTypes() (mediaTypes []string) {
|
|||
// UnmarshalFunc implements manifest unmarshalling a given MediaType
|
||||
type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
|
||||
|
||||
var mappings = make(map[string]UnmarshalFunc, 0)
|
||||
var mappings = make(map[string]UnmarshalFunc)
|
||||
|
||||
// UnmarshalManifest looks up manifest unmarshal functions based on
|
||||
// MediaType
|
||||
|
|
|
@ -125,15 +125,6 @@ func (b *bridge) RepoDeleted(repo reference.Named) error {
|
|||
return b.sink.Write(*event)
|
||||
}
|
||||
|
||||
func (b *bridge) createManifestEventAndWrite(action string, repo reference.Named, sm distribution.Manifest) error {
|
||||
manifestEvent, err := b.createManifestEvent(action, repo, sm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return b.sink.Write(*manifestEvent)
|
||||
}
|
||||
|
||||
func (b *bridge) createManifestDeleteEventAndWrite(action string, repo reference.Named, dgst digest.Digest) error {
|
||||
event := b.createEvent(action)
|
||||
event.Target.Repository = repo.Name()
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
|
|
@ -114,8 +114,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
|
|||
prototype.Request.UserAgent = "test/0.1"
|
||||
prototype.Source.Addr = "hostname.local:port"
|
||||
|
||||
var manifestPush Event
|
||||
manifestPush = prototype
|
||||
var manifestPush = prototype
|
||||
manifestPush.ID = "asdf-asdf-asdf-asdf-0"
|
||||
manifestPush.Target.Digest = "sha256:0123456789abcdef0"
|
||||
manifestPush.Target.Length = 1
|
||||
|
@ -124,8 +123,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
|
|||
manifestPush.Target.Repository = "library/test"
|
||||
manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest"
|
||||
|
||||
var layerPush0 Event
|
||||
layerPush0 = prototype
|
||||
var layerPush0 = prototype
|
||||
layerPush0.ID = "asdf-asdf-asdf-asdf-1"
|
||||
layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
|
||||
layerPush0.Target.Length = 2
|
||||
|
@ -134,8 +132,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
|
|||
layerPush0.Target.Repository = "library/test"
|
||||
layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest"
|
||||
|
||||
var layerPush1 Event
|
||||
layerPush1 = prototype
|
||||
var layerPush1 = prototype
|
||||
layerPush1.ID = "asdf-asdf-asdf-asdf-2"
|
||||
layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6"
|
||||
layerPush1.Target.Length = 3
|
||||
|
|
|
@ -133,8 +133,7 @@ type headerRoundTripper struct {
|
|||
}
|
||||
|
||||
func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
var nreq http.Request
|
||||
nreq = *req
|
||||
var nreq = *req
|
||||
nreq.Header = make(http.Header)
|
||||
|
||||
merge := func(headers http.Header) {
|
||||
|
|
|
@ -136,11 +136,10 @@ func checkExerciseRepository(t *testing.T, repository distribution.Repository, r
|
|||
var blobDigests []digest.Digest
|
||||
blobs := repository.Blobs(ctx)
|
||||
for i := 0; i < 2; i++ {
|
||||
rs, ds, err := testutil.CreateRandomTarFile()
|
||||
rs, dgst, err := testutil.CreateRandomTarFile()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating test layer: %v", err)
|
||||
}
|
||||
dgst := digest.Digest(ds)
|
||||
blobDigests = append(blobDigests, dgst)
|
||||
|
||||
wr, err := blobs.Create(ctx)
|
||||
|
|
|
@ -284,11 +284,6 @@ type retryingSink struct {
|
|||
}
|
||||
}
|
||||
|
||||
type retryingSinkListener interface {
|
||||
active(events ...Event)
|
||||
retry(events ...Event)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): We are using circuit break here, which actually doesn't
|
||||
// make a whole lot of sense for this use case, since we always retry. Move
|
||||
// this to use bounded exponential backoff.
|
||||
|
|
|
@ -56,6 +56,35 @@ func ParseNormalizedNamed(s string) (Named, error) {
|
|||
return named, nil
|
||||
}
|
||||
|
||||
// ParseDockerRef normalizes the image reference following the docker convention. This is added
|
||||
// mainly for backward compatibility.
|
||||
// The reference returned can only be either tagged or digested. For reference contains both tag
|
||||
// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@
|
||||
// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as
|
||||
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
|
||||
func ParseDockerRef(ref string) (Named, error) {
|
||||
named, err := ParseNormalizedNamed(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := named.(NamedTagged); ok {
|
||||
if canonical, ok := named.(Canonical); ok {
|
||||
// The reference is both tagged and digested, only
|
||||
// return digested.
|
||||
newNamed, err := WithName(canonical.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newCanonical, err := WithDigest(newNamed, canonical.Digest())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newCanonical, nil
|
||||
}
|
||||
}
|
||||
return TagNameOnly(named), nil
|
||||
}
|
||||
|
||||
// splitDockerDomain splits a repository name to domain and remotename string.
|
||||
// If no valid domain is found, the default domain is used. Repository name
|
||||
// needs to be already validated before.
|
||||
|
|
|
@ -623,3 +623,83 @@ func TestMatch(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseDockerRef(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "nothing",
|
||||
input: "busybox",
|
||||
expected: "docker.io/library/busybox:latest",
|
||||
},
|
||||
{
|
||||
name: "tag only",
|
||||
input: "busybox:latest",
|
||||
expected: "docker.io/library/busybox:latest",
|
||||
},
|
||||
{
|
||||
name: "digest only",
|
||||
input: "busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||
expected: "docker.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||
},
|
||||
{
|
||||
name: "path only",
|
||||
input: "library/busybox",
|
||||
expected: "docker.io/library/busybox:latest",
|
||||
},
|
||||
{
|
||||
name: "hostname only",
|
||||
input: "docker.io/busybox",
|
||||
expected: "docker.io/library/busybox:latest",
|
||||
},
|
||||
{
|
||||
name: "no tag",
|
||||
input: "docker.io/library/busybox",
|
||||
expected: "docker.io/library/busybox:latest",
|
||||
},
|
||||
{
|
||||
name: "no path",
|
||||
input: "docker.io/busybox:latest",
|
||||
expected: "docker.io/library/busybox:latest",
|
||||
},
|
||||
{
|
||||
name: "no hostname",
|
||||
input: "library/busybox:latest",
|
||||
expected: "docker.io/library/busybox:latest",
|
||||
},
|
||||
{
|
||||
name: "full reference with tag",
|
||||
input: "docker.io/library/busybox:latest",
|
||||
expected: "docker.io/library/busybox:latest",
|
||||
},
|
||||
{
|
||||
name: "gcr reference without tag",
|
||||
input: "gcr.io/library/busybox",
|
||||
expected: "gcr.io/library/busybox:latest",
|
||||
},
|
||||
{
|
||||
name: "both tag and digest",
|
||||
input: "gcr.io/library/busybox:latest@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||
expected: "gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||
},
|
||||
}
|
||||
for _, test := range testcases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
normalized, err := ParseDockerRef(test.input)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
output := normalized.String()
|
||||
if output != test.expected {
|
||||
t.Fatalf("expected %q to be parsed as %v, got %v", test.input, test.expected, output)
|
||||
}
|
||||
_, err = Parse(output)
|
||||
if err != nil {
|
||||
t.Fatalf("%q should be a valid reference, but got an error: %v", output, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -205,7 +205,7 @@ func Parse(s string) (Reference, error) {
|
|||
var repo repository
|
||||
|
||||
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
|
||||
if nameMatch != nil && len(nameMatch) == 3 {
|
||||
if len(nameMatch) == 3 {
|
||||
repo.domain = nameMatch[1]
|
||||
repo.path = nameMatch[2]
|
||||
} else {
|
||||
|
|
|
@ -639,7 +639,7 @@ func TestParseNamed(t *testing.T) {
|
|||
failf("error parsing name: %s", err)
|
||||
continue
|
||||
} else if err == nil && testcase.err != nil {
|
||||
failf("parsing succeded: expected error %v", testcase.err)
|
||||
failf("parsing succeeded: expected error %v", testcase.err)
|
||||
continue
|
||||
} else if err != testcase.err {
|
||||
failf("unexpected error %v, expected %v", err, testcase.err)
|
||||
|
|
|
@ -207,11 +207,11 @@ func (errs Errors) MarshalJSON() ([]byte, error) {
|
|||
for _, daErr := range errs {
|
||||
var err Error
|
||||
|
||||
switch daErr.(type) {
|
||||
switch daErr := daErr.(type) {
|
||||
case ErrorCode:
|
||||
err = daErr.(ErrorCode).WithDetail(nil)
|
||||
err = daErr.WithDetail(nil)
|
||||
case Error:
|
||||
err = daErr.(Error)
|
||||
err = daErr
|
||||
default:
|
||||
err = ErrorCodeUnknown.WithDetail(daErr)
|
||||
|
||||
|
|
|
@ -252,15 +252,3 @@ func appendValuesURL(u *url.URL, values ...url.Values) *url.URL {
|
|||
u.RawQuery = merged.Encode()
|
||||
return u
|
||||
}
|
||||
|
||||
// appendValues appends the parameters to the url. Panics if the string is not
|
||||
// a url.
|
||||
func appendValues(u string, values ...url.Values) string {
|
||||
up, err := url.Parse(u)
|
||||
|
||||
if err != nil {
|
||||
panic(err) // should never happen
|
||||
}
|
||||
|
||||
return appendValuesURL(up, values...).String()
|
||||
}
|
||||
|
|
|
@ -182,11 +182,6 @@ func TestURLBuilderWithPrefix(t *testing.T) {
|
|||
doTest(false)
|
||||
}
|
||||
|
||||
type builderFromRequestTestCase struct {
|
||||
request *http.Request
|
||||
base string
|
||||
}
|
||||
|
||||
func TestBuilderFromRequest(t *testing.T) {
|
||||
u, err := url.Parse("http://example.com")
|
||||
if err != nil {
|
||||
|
|
|
@ -162,11 +162,14 @@ func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) {
|
|||
|
||||
opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3]
|
||||
|
||||
autoRedirect, ok := options["autoredirect"].(bool)
|
||||
if !ok {
|
||||
return opts, fmt.Errorf("token auth requires a valid option bool: autoredirect")
|
||||
autoRedirectVal, ok := options["autoredirect"]
|
||||
if ok {
|
||||
autoRedirect, ok := autoRedirectVal.(bool)
|
||||
if !ok {
|
||||
return opts, fmt.Errorf("token auth requires a valid option bool: autoredirect")
|
||||
}
|
||||
opts.autoRedirect = autoRedirect
|
||||
}
|
||||
opts.autoRedirect = autoRedirect
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
|
|
@ -117,8 +117,8 @@ func init() {
|
|||
var t octetType
|
||||
isCtl := c <= 31 || c == 127
|
||||
isChar := 0 <= c && c <= 127
|
||||
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
|
||||
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
|
||||
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
|
||||
if strings.ContainsRune(" \t\r\n", rune(c)) {
|
||||
t |= isSpace
|
||||
}
|
||||
if isChar && !isCtl && !isSeparator {
|
||||
|
|
|
@ -466,7 +466,7 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) {
|
|||
},
|
||||
})
|
||||
|
||||
authenicate1 := fmt.Sprintf("Basic realm=localhost")
|
||||
authenicate1 := "Basic realm=localhost"
|
||||
basicCheck := func(a string) bool {
|
||||
return a == fmt.Sprintf("Basic %s", basicAuth(username, password))
|
||||
}
|
||||
|
@ -546,7 +546,7 @@ func TestEndpointAuthorizeTokenBasicWithExpiresIn(t *testing.T) {
|
|||
},
|
||||
})
|
||||
|
||||
authenicate1 := fmt.Sprintf("Basic realm=localhost")
|
||||
authenicate1 := "Basic realm=localhost"
|
||||
tokenExchanges := 0
|
||||
basicCheck := func(a string) bool {
|
||||
tokenExchanges = tokenExchanges + 1
|
||||
|
@ -706,7 +706,7 @@ func TestEndpointAuthorizeTokenBasicWithExpiresInAndIssuedAt(t *testing.T) {
|
|||
},
|
||||
})
|
||||
|
||||
authenicate1 := fmt.Sprintf("Basic realm=localhost")
|
||||
authenicate1 := "Basic realm=localhost"
|
||||
tokenExchanges := 0
|
||||
basicCheck := func(a string) bool {
|
||||
tokenExchanges = tokenExchanges + 1
|
||||
|
@ -835,7 +835,7 @@ func TestEndpointAuthorizeBasic(t *testing.T) {
|
|||
|
||||
username := "user1"
|
||||
password := "funSecretPa$$word"
|
||||
authenicate := fmt.Sprintf("Basic realm=localhost")
|
||||
authenicate := "Basic realm=localhost"
|
||||
validCheck := func(a string) bool {
|
||||
return a == fmt.Sprintf("Basic %s", basicAuth(username, password))
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/testutil"
|
||||
)
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/distribution/registry/storage/cache"
|
||||
"github.com/docker/distribution/registry/storage/cache/memory"
|
||||
|
@ -736,7 +736,12 @@ func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateO
|
|||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := bs.client.Post(u, "", nil)
|
||||
req, err := http.NewRequest("POST", u, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := bs.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/testutil"
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/docker/libtrust"
|
||||
|
@ -152,7 +152,7 @@ func TestBlobFetch(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bytes.Compare(b, b1) != 0 {
|
||||
if !bytes.Equal(b, b1) {
|
||||
t.Fatalf("Wrong bytes values fetched: [%d]byte != [%d]byte", len(b), len(b1))
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/docker/distribution/registry/storage/driver/factory"
|
||||
_ "github.com/docker/distribution/registry/storage/driver/testdriver"
|
||||
|
@ -959,7 +959,6 @@ func testManifestWithStorageError(t *testing.T, env *testEnv, imageName referenc
|
|||
defer resp.Body.Close()
|
||||
checkResponse(t, "getting non-existent manifest", resp, expectedStatusCode)
|
||||
checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, expectedErrorCode)
|
||||
return
|
||||
}
|
||||
|
||||
func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs {
|
||||
|
@ -1066,12 +1065,11 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name
|
|||
expectedLayers := make(map[digest.Digest]io.ReadSeeker)
|
||||
|
||||
for i := range unsignedManifest.FSLayers {
|
||||
rs, dgstStr, err := testutil.CreateRandomTarFile()
|
||||
rs, dgst, err := testutil.CreateRandomTarFile()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error creating random layer %d: %v", i, err)
|
||||
}
|
||||
dgst := digest.Digest(dgstStr)
|
||||
|
||||
expectedLayers[dgst] = rs
|
||||
unsignedManifest.FSLayers[i].BlobSum = dgst
|
||||
|
@ -1405,12 +1403,11 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name
|
|||
expectedLayers := make(map[digest.Digest]io.ReadSeeker)
|
||||
|
||||
for i := range manifest.Layers {
|
||||
rs, dgstStr, err := testutil.CreateRandomTarFile()
|
||||
rs, dgst, err := testutil.CreateRandomTarFile()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error creating random layer %d: %v", i, err)
|
||||
}
|
||||
dgst := digest.Digest(dgstStr)
|
||||
|
||||
expectedLayers[dgst] = rs
|
||||
manifest.Layers[i].Digest = dgst
|
||||
|
@ -2357,7 +2354,7 @@ func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, error
|
|||
// Ensure that counts of expected errors were all non-zero
|
||||
for code := range expected {
|
||||
if counts[code] == 0 {
|
||||
t.Fatalf("expected error code %v not encounterd during %s: %s", code, msg, string(p))
|
||||
t.Fatalf("expected error code %v not encountered during %s: %s", code, msg, string(p))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2432,11 +2429,10 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string)
|
|||
expectedLayers := make(map[digest.Digest]io.ReadSeeker)
|
||||
|
||||
for i := range unsignedManifest.FSLayers {
|
||||
rs, dgstStr, err := testutil.CreateRandomTarFile()
|
||||
rs, dgst, err := testutil.CreateRandomTarFile()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating random layer %d: %v", i, err)
|
||||
}
|
||||
dgst := digest.Digest(dgstStr)
|
||||
|
||||
expectedLayers[dgst] = rs
|
||||
unsignedManifest.FSLayers[i].BlobSum = dgst
|
||||
|
|
|
@ -2,10 +2,11 @@ package handlers
|
|||
|
||||
import (
|
||||
"context"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/rand"
|
||||
"expvar"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"math"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -24,7 +25,7 @@ import (
|
|||
"github.com/docker/distribution/notifications"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/auth"
|
||||
registrymiddleware "github.com/docker/distribution/registry/middleware/registry"
|
||||
repositorymiddleware "github.com/docker/distribution/registry/middleware/repository"
|
||||
|
@ -610,7 +611,7 @@ func (app *App) configureLogHook(configuration *configuration.Configuration) {
|
|||
func (app *App) configureSecret(configuration *configuration.Configuration) {
|
||||
if configuration.HTTP.Secret == "" {
|
||||
var secretBytes [randomSecretSize]byte
|
||||
if _, err := cryptorand.Read(secretBytes[:]); err != nil {
|
||||
if _, err := rand.Read(secretBytes[:]); err != nil {
|
||||
panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err))
|
||||
}
|
||||
configuration.HTTP.Secret = string(secretBytes[:])
|
||||
|
@ -753,20 +754,18 @@ func (app *App) logError(ctx context.Context, errors errcode.Errors) {
|
|||
for _, e1 := range errors {
|
||||
var c context.Context
|
||||
|
||||
switch e1.(type) {
|
||||
switch e := e1.(type) {
|
||||
case errcode.Error:
|
||||
e, _ := e1.(errcode.Error)
|
||||
c = context.WithValue(ctx, errCodeKey{}, e.Code)
|
||||
c = context.WithValue(c, errMessageKey{}, e.Message)
|
||||
c = context.WithValue(c, errDetailKey{}, e.Detail)
|
||||
case errcode.ErrorCode:
|
||||
e, _ := e1.(errcode.ErrorCode)
|
||||
c = context.WithValue(ctx, errCodeKey{}, e)
|
||||
c = context.WithValue(c, errMessageKey{}, e.Message())
|
||||
default:
|
||||
// just normal go 'error'
|
||||
c = context.WithValue(ctx, errCodeKey{}, errcode.ErrorCodeUnknown)
|
||||
c = context.WithValue(c, errMessageKey{}, e1.Error())
|
||||
c = context.WithValue(c, errMessageKey{}, e.Error())
|
||||
}
|
||||
|
||||
c = dcontext.WithLogger(c, dcontext.GetLogger(c,
|
||||
|
@ -1062,8 +1061,13 @@ func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageD
|
|||
}
|
||||
|
||||
go func() {
|
||||
rand.Seed(time.Now().Unix())
|
||||
jitter := time.Duration(rand.Int()%60) * time.Minute
|
||||
randInt, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64))
|
||||
if err != nil {
|
||||
log.Infof("Failed to generate random jitter: %v", err)
|
||||
// sleep 30min for failure case
|
||||
randInt = big.NewInt(30)
|
||||
}
|
||||
jitter := time.Duration(randInt.Int64()%60) * time.Minute
|
||||
log.Infof("Starting upload purge in %s", jitter)
|
||||
time.Sleep(jitter)
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/docker/distribution/configuration"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/auth"
|
||||
_ "github.com/docker/distribution/registry/auth/silly"
|
||||
"github.com/docker/distribution/registry/storage"
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
dcontext "github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/storage"
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
@ -172,7 +172,7 @@ func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Reque
|
|||
|
||||
ct := r.Header.Get("Content-Type")
|
||||
if ct != "" && ct != "application/octet-stream" {
|
||||
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type")))
|
||||
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("bad Content-Type")))
|
||||
// TODO(dmcgowan): encode error
|
||||
return
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/docker/distribution"
|
||||
dcontext "github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/auth"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
|
|
@ -20,7 +20,7 @@ type logHook struct {
|
|||
func (hook *logHook) Fire(entry *logrus.Entry) error {
|
||||
addr := strings.Split(hook.Mail.Addr, ":")
|
||||
if len(addr) != 2 {
|
||||
return errors.New("Invalid Mail Address")
|
||||
return errors.New("invalid Mail Address")
|
||||
}
|
||||
host := addr[0]
|
||||
subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message)
|
||||
|
@ -37,7 +37,7 @@ func (hook *logHook) Fire(entry *logrus.Entry) error {
|
|||
if err := t.Execute(b, entry); err != nil {
|
||||
return err
|
||||
}
|
||||
body := fmt.Sprintf("%s", b)
|
||||
body := b.String()
|
||||
|
||||
return hook.Mail.sendMail(subject, body)
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ type mailer struct {
|
|||
func (mail *mailer) sendMail(subject, message string) error {
|
||||
addr := strings.Split(mail.Addr, ":")
|
||||
if len(addr) != 2 {
|
||||
return errors.New("Invalid Mail Address")
|
||||
return errors.New("invalid Mail Address")
|
||||
}
|
||||
host := addr[0]
|
||||
msg := []byte("To:" + strings.Join(mail.To, ";") +
|
||||
|
|
|
@ -14,11 +14,11 @@ import (
|
|||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/auth"
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// These constants determine which architecture and OS to choose from a
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/gorilla/handlers"
|
||||
)
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
dcontext "github.com/docker/distribution/context"
|
||||
|
@ -15,9 +14,6 @@ import (
|
|||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// todo(richardscothern): from cache control header or config file
|
||||
const blobTTL = 24 * 7 * time.Hour
|
||||
|
||||
type proxyBlobStore struct {
|
||||
localStore distribution.BlobStore
|
||||
remoteStore distribution.BlobService
|
||||
|
|
|
@ -193,7 +193,7 @@ func makeTestEnv(t *testing.T, name string) *testEnv {
|
|||
}
|
||||
|
||||
func makeBlob(size int) []byte {
|
||||
blob := make([]byte, size, size)
|
||||
blob := make([]byte, size)
|
||||
for i := 0; i < size; i++ {
|
||||
blob[i] = byte('A' + rand.Int()%48)
|
||||
}
|
||||
|
@ -204,16 +204,6 @@ func init() {
|
|||
rand.Seed(42)
|
||||
}
|
||||
|
||||
func perm(m []distribution.Descriptor) []distribution.Descriptor {
|
||||
for i := 0; i < len(m); i++ {
|
||||
j := rand.Intn(i + 1)
|
||||
tmp := m[i]
|
||||
m[i] = m[j]
|
||||
m[j] = tmp
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) {
|
||||
var inRemote []distribution.Descriptor
|
||||
|
||||
|
|
|
@ -165,11 +165,10 @@ func populateRepo(ctx context.Context, t *testing.T, repository distribution.Rep
|
|||
t.Fatalf("unexpected error creating test upload: %v", err)
|
||||
}
|
||||
|
||||
rs, ts, err := testutil.CreateRandomTarFile()
|
||||
rs, dgst, err := testutil.CreateRandomTarFile()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error generating test layer file")
|
||||
}
|
||||
dgst := digest.Digest(ts)
|
||||
if _, err := io.Copy(wr, rs); err != nil {
|
||||
t.Fatalf("unexpected error copying to upload: %v", err)
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ func (ttles *TTLExpirationScheduler) Start() error {
|
|||
}
|
||||
|
||||
if !ttles.stopped {
|
||||
return fmt.Errorf("Scheduler already started")
|
||||
return fmt.Errorf("scheduler already started")
|
||||
}
|
||||
|
||||
dcontext.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...")
|
||||
|
@ -126,7 +126,7 @@ func (ttles *TTLExpirationScheduler) Start() error {
|
|||
|
||||
// Start timer for each deserialized entry
|
||||
for _, entry := range ttles.entries {
|
||||
entry.timer = ttles.startTimer(entry, entry.Expiry.Sub(time.Now()))
|
||||
entry.timer = ttles.startTimer(entry, time.Until(entry.Expiry))
|
||||
}
|
||||
|
||||
// Start a ticker to periodically save the entries index
|
||||
|
@ -164,7 +164,7 @@ func (ttles *TTLExpirationScheduler) add(r reference.Reference, ttl time.Duratio
|
|||
Expiry: time.Now().Add(ttl),
|
||||
EntryType: eType,
|
||||
}
|
||||
dcontext.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now()))
|
||||
dcontext.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, time.Until(entry.Expiry))
|
||||
if oldEntry, present := ttles.entries[entry.Key]; present && oldEntry.timer != nil {
|
||||
oldEntry.timer.Stop()
|
||||
}
|
||||
|
|
|
@ -9,12 +9,14 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"rsc.io/letsencrypt"
|
||||
|
||||
"github.com/Shopify/logrus-bugsnag"
|
||||
logrus_bugsnag "github.com/Shopify/logrus-bugsnag"
|
||||
|
||||
logstash "github.com/bshuster-repo/logrus-logstash-hook"
|
||||
"github.com/bugsnag/bugsnag-go"
|
||||
"github.com/docker/distribution/configuration"
|
||||
|
@ -31,6 +33,60 @@ import (
|
|||
"github.com/yvasiyarov/gorelic"
|
||||
)
|
||||
|
||||
// a map of TLS cipher suite names to constants in https://golang.org/pkg/crypto/tls/#pkg-constants
|
||||
var cipherSuites = map[string]uint16{
|
||||
// TLS 1.0 - 1.2 cipher suites
|
||||
"TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256,
|
||||
"TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||
"TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||
// TLS 1.3 cipher suites
|
||||
"TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256,
|
||||
"TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384,
|
||||
"TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256,
|
||||
}
|
||||
|
||||
// a list of default ciphersuites to utilize
|
||||
var defaultCipherSuites = []uint16{
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_AES_128_GCM_SHA256,
|
||||
tls.TLS_CHACHA20_POLY1305_SHA256,
|
||||
tls.TLS_AES_256_GCM_SHA384,
|
||||
}
|
||||
|
||||
// maps tls version strings to constants
|
||||
var defaultTLSVersionStr = "tls1.2"
|
||||
var tlsVersions = map[string]uint16{
|
||||
// user specified values
|
||||
"tls1.0": tls.VersionTLS10,
|
||||
"tls1.1": tls.VersionTLS11,
|
||||
"tls1.2": tls.VersionTLS12,
|
||||
"tls1.3": tls.VersionTLS13,
|
||||
}
|
||||
|
||||
// this channel gets notified when process receives signal. It is global to ease unit testing
|
||||
var quit = make(chan os.Signal, 1)
|
||||
|
||||
|
@ -125,6 +181,35 @@ func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Reg
|
|||
}, nil
|
||||
}
|
||||
|
||||
// takes a list of cipher suites and converts it to a list of respective tls constants
|
||||
// if an empty list is provided, then the defaults will be used
|
||||
func getCipherSuites(names []string) ([]uint16, error) {
|
||||
if len(names) == 0 {
|
||||
return defaultCipherSuites, nil
|
||||
}
|
||||
cipherSuiteConsts := make([]uint16, len(names))
|
||||
for i, name := range names {
|
||||
cipherSuiteConst, ok := cipherSuites[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown TLS cipher suite '%s' specified for http.tls.cipherSuites", name)
|
||||
}
|
||||
cipherSuiteConsts[i] = cipherSuiteConst
|
||||
}
|
||||
return cipherSuiteConsts, nil
|
||||
}
|
||||
|
||||
// takes a list of cipher suite ids and converts it to a list of respective names
|
||||
func getCipherSuiteNames(ids []uint16) []string {
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
names := make([]string, len(ids))
|
||||
for i, id := range ids {
|
||||
names[i] = tls.CipherSuiteName(id)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// ListenAndServe runs the registry's HTTP server.
|
||||
func (registry *Registry) ListenAndServe() error {
|
||||
config := registry.config
|
||||
|
@ -135,19 +220,27 @@ func (registry *Registry) ListenAndServe() error {
|
|||
}
|
||||
|
||||
if config.HTTP.TLS.Certificate != "" || config.HTTP.TLS.LetsEncrypt.CacheFile != "" {
|
||||
if config.HTTP.TLS.MinimumTLS == "" {
|
||||
config.HTTP.TLS.MinimumTLS = defaultTLSVersionStr
|
||||
}
|
||||
tlsMinVersion, ok := tlsVersions[config.HTTP.TLS.MinimumTLS]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown minimum TLS level '%s' specified for http.tls.minimumtls", config.HTTP.TLS.MinimumTLS)
|
||||
}
|
||||
dcontext.GetLogger(registry.app).Infof("restricting TLS version to %s or higher", config.HTTP.TLS.MinimumTLS)
|
||||
|
||||
tlsCipherSuites, err := getCipherSuites(config.HTTP.TLS.CipherSuites)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dcontext.GetLogger(registry.app).Infof("restricting TLS cipher suites to: %s", strings.Join(getCipherSuiteNames(tlsCipherSuites), ","))
|
||||
|
||||
tlsConf := &tls.Config{
|
||||
ClientAuth: tls.NoClientCert,
|
||||
NextProtos: nextProtos(config),
|
||||
MinVersion: tls.VersionTLS10,
|
||||
MinVersion: tlsMinVersion,
|
||||
PreferServerCipherSuites: true,
|
||||
CipherSuites: []uint16{
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
},
|
||||
CipherSuites: tlsCipherSuites,
|
||||
}
|
||||
|
||||
if config.HTTP.TLS.LetsEncrypt.CacheFile != "" {
|
||||
|
@ -185,7 +278,7 @@ func (registry *Registry) ListenAndServe() error {
|
|||
}
|
||||
|
||||
if ok := pool.AppendCertsFromPEM(caPem); !ok {
|
||||
return fmt.Errorf("Could not add CA to pool")
|
||||
return fmt.Errorf("could not add CA to pool")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3,12 +3,24 @@ package registry
|
|||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -38,18 +50,30 @@ func TestNextProtos(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func setupRegistry() (*Registry, error) {
|
||||
type registryTLSConfig struct {
|
||||
cipherSuites []string
|
||||
certificatePath string
|
||||
privateKeyPath string
|
||||
certificate *tls.Certificate
|
||||
}
|
||||
|
||||
func setupRegistry(tlsCfg *registryTLSConfig, addr string) (*Registry, error) {
|
||||
config := &configuration.Configuration{}
|
||||
// TODO: this needs to change to something ephemeral as the test will fail if there is any server
|
||||
// already listening on port 5000
|
||||
config.HTTP.Addr = ":5000"
|
||||
config.HTTP.Addr = addr
|
||||
config.HTTP.DrainTimeout = time.Duration(10) * time.Second
|
||||
if tlsCfg != nil {
|
||||
config.HTTP.TLS.CipherSuites = tlsCfg.cipherSuites
|
||||
config.HTTP.TLS.Certificate = tlsCfg.certificatePath
|
||||
config.HTTP.TLS.Key = tlsCfg.privateKeyPath
|
||||
}
|
||||
config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}}
|
||||
return NewRegistry(context.Background(), config)
|
||||
}
|
||||
|
||||
func TestGracefulShutdown(t *testing.T) {
|
||||
registry, err := setupRegistry()
|
||||
registry, err := setupRegistry(nil, ":5000")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -98,3 +122,227 @@ func TestGracefulShutdown(t *testing.T) {
|
|||
t.Error("Body is not {}; ", string(body))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCipherSuite(t *testing.T) {
|
||||
resp, err := getCipherSuites([]string{"TLS_RSA_WITH_AES_128_CBC_SHA"})
|
||||
if err != nil || len(resp) != 1 || resp[0] != tls.TLS_RSA_WITH_AES_128_CBC_SHA {
|
||||
t.Errorf("expected cipher suite %q, got %q",
|
||||
"TLS_RSA_WITH_AES_128_CBC_SHA",
|
||||
strings.Join(getCipherSuiteNames(resp), ","),
|
||||
)
|
||||
}
|
||||
|
||||
resp, err = getCipherSuites([]string{"TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_AES_128_GCM_SHA256"})
|
||||
if err != nil || len(resp) != 2 ||
|
||||
resp[0] != tls.TLS_RSA_WITH_AES_128_CBC_SHA || resp[1] != tls.TLS_AES_128_GCM_SHA256 {
|
||||
t.Errorf("expected cipher suites %q, got %q",
|
||||
"TLS_RSA_WITH_AES_128_CBC_SHA,TLS_AES_128_GCM_SHA256",
|
||||
strings.Join(getCipherSuiteNames(resp), ","),
|
||||
)
|
||||
}
|
||||
|
||||
_, err = getCipherSuites([]string{"TLS_RSA_WITH_AES_128_CBC_SHA", "bad_input"})
|
||||
if err == nil {
|
||||
t.Error("did not return expected error about unknown cipher suite")
|
||||
}
|
||||
}
|
||||
|
||||
func buildRegistryTLSConfig(name, keyType string, cipherSuites []string) (*registryTLSConfig, error) {
|
||||
var priv interface{}
|
||||
var pub crypto.PublicKey
|
||||
var err error
|
||||
switch keyType {
|
||||
case "rsa":
|
||||
priv, err = rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create rsa private key: %v", err)
|
||||
}
|
||||
rsaKey := priv.(*rsa.PrivateKey)
|
||||
pub = rsaKey.Public()
|
||||
case "ecdsa":
|
||||
priv, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create ecdsa private key: %v", err)
|
||||
}
|
||||
ecdsaKey := priv.(*ecdsa.PrivateKey)
|
||||
pub = ecdsaKey.Public()
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported key type: %v", keyType)
|
||||
}
|
||||
|
||||
notBefore := time.Now()
|
||||
notAfter := notBefore.Add(time.Minute)
|
||||
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create serial number: %v", err)
|
||||
}
|
||||
cert := x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: pkix.Name{
|
||||
Organization: []string{"registry_test"},
|
||||
},
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
BasicConstraintsValid: true,
|
||||
IPAddresses: []net.IP{net.ParseIP("127.0.0.1")},
|
||||
DNSNames: []string{"localhost"},
|
||||
IsCA: true,
|
||||
}
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, &cert, &cert, pub, priv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create certificate: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(os.TempDir()); os.IsNotExist(err) {
|
||||
os.Mkdir(os.TempDir(), 1777)
|
||||
}
|
||||
|
||||
certPath := path.Join(os.TempDir(), name+".pem")
|
||||
certOut, err := os.Create(certPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create pem: %v", err)
|
||||
}
|
||||
if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
|
||||
return nil, fmt.Errorf("failed to write data to %s: %v", certPath, err)
|
||||
}
|
||||
if err := certOut.Close(); err != nil {
|
||||
return nil, fmt.Errorf("error closing %s: %v", certPath, err)
|
||||
}
|
||||
|
||||
keyPath := path.Join(os.TempDir(), name+".key")
|
||||
keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open %s for writing: %v", keyPath, err)
|
||||
}
|
||||
privBytes, err := x509.MarshalPKCS8PrivateKey(priv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal private key: %v", err)
|
||||
}
|
||||
if err := pem.Encode(keyOut, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}); err != nil {
|
||||
return nil, fmt.Errorf("failed to write data to key.pem: %v", err)
|
||||
}
|
||||
if err := keyOut.Close(); err != nil {
|
||||
return nil, fmt.Errorf("error closing %s: %v", keyPath, err)
|
||||
}
|
||||
|
||||
tlsCert := tls.Certificate{
|
||||
Certificate: [][]byte{derBytes},
|
||||
PrivateKey: priv,
|
||||
}
|
||||
|
||||
tlsTestCfg := registryTLSConfig{
|
||||
cipherSuites: cipherSuites,
|
||||
certificatePath: certPath,
|
||||
privateKeyPath: keyPath,
|
||||
certificate: &tlsCert,
|
||||
}
|
||||
|
||||
return &tlsTestCfg, nil
|
||||
}
|
||||
|
||||
func TestRegistrySupportedCipherSuite(t *testing.T) {
|
||||
name := "registry_test_server_supported_cipher"
|
||||
cipherSuites := []string{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"}
|
||||
serverTLS, err := buildRegistryTLSConfig(name, "rsa", cipherSuites)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
registry, err := setupRegistry(serverTLS, ":5001")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// run registry server
|
||||
var errchan chan error
|
||||
go func() {
|
||||
errchan <- registry.ListenAndServe()
|
||||
}()
|
||||
select {
|
||||
case err = <-errchan:
|
||||
t.Fatalf("Error listening: %v", err)
|
||||
default:
|
||||
}
|
||||
|
||||
// Wait for some unknown random time for server to start listening
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// send tls request with server supported cipher suite
|
||||
clientCipherSuites, err := getCipherSuites(cipherSuites)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clientTLS := tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
CipherSuites: clientCipherSuites,
|
||||
}
|
||||
dialer := net.Dialer{
|
||||
Timeout: time.Second * 5,
|
||||
}
|
||||
conn, err := tls.DialWithDialer(&dialer, "tcp", "127.0.0.1:5001", &clientTLS)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fmt.Fprintf(conn, "GET /v2/ HTTP/1.1\r\nHost: 127.0.0.1\r\n\r\n")
|
||||
|
||||
resp, err := http.ReadResponse(bufio.NewReader(conn), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.Status != "200 OK" {
|
||||
t.Error("response status is not 200 OK: ", resp.Status)
|
||||
}
|
||||
if body, err := ioutil.ReadAll(resp.Body); err != nil || string(body) != "{}" {
|
||||
t.Error("Body is not {}; ", string(body))
|
||||
}
|
||||
|
||||
// send stop signal
|
||||
quit <- os.Interrupt
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
func TestRegistryUnsupportedCipherSuite(t *testing.T) {
|
||||
name := "registry_test_server_unsupported_cipher"
|
||||
serverTLS, err := buildRegistryTLSConfig(name, "rsa", []string{"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA358"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
registry, err := setupRegistry(serverTLS, ":5002")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// run registry server
|
||||
var errchan chan error
|
||||
go func() {
|
||||
errchan <- registry.ListenAndServe()
|
||||
}()
|
||||
select {
|
||||
case err = <-errchan:
|
||||
t.Fatalf("Error listening: %v", err)
|
||||
default:
|
||||
}
|
||||
|
||||
// Wait for some unknown random time for server to start listening
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// send tls request with server unsupported cipher suite
|
||||
clientTLS := tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
CipherSuites: []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256},
|
||||
}
|
||||
dialer := net.Dialer{
|
||||
Timeout: time.Second * 5,
|
||||
}
|
||||
_, err = tls.DialWithDialer(&dialer, "tcp", "127.0.0.1:5002", &clientTLS)
|
||||
if err == nil {
|
||||
t.Error("expected TLS connection to timeout")
|
||||
}
|
||||
|
||||
// send stop signal
|
||||
quit <- os.Interrupt
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
|
|
@ -418,7 +418,7 @@ func TestBlobMount(t *testing.T) {
|
|||
|
||||
bs := repository.Blobs(ctx)
|
||||
// Test destination for existence.
|
||||
statDesc, err = bs.Stat(ctx, desc.Digest)
|
||||
_, err = bs.Stat(ctx, desc.Digest)
|
||||
if err == nil {
|
||||
t.Fatalf("unexpected non-error stating unmounted blob: %v", desc)
|
||||
}
|
||||
|
@ -478,12 +478,12 @@ func TestBlobMount(t *testing.T) {
|
|||
t.Fatalf("Unexpected error deleting blob")
|
||||
}
|
||||
|
||||
d, err := bs.Stat(ctx, desc.Digest)
|
||||
_, err = bs.Stat(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error stating blob deleted from source repository: %v", err)
|
||||
}
|
||||
|
||||
d, err = sbs.Stat(ctx, desc.Digest)
|
||||
d, err := sbs.Stat(ctx, desc.Digest)
|
||||
if err == nil {
|
||||
t.Fatalf("unexpected non-error stating deleted blob: %v", d)
|
||||
}
|
||||
|
|
|
@ -152,16 +152,6 @@ func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest,
|
|||
return linked, nil
|
||||
}
|
||||
|
||||
// resolve reads the digest link at path and returns the blob store path.
|
||||
func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) {
|
||||
dgst, err := bs.readlink(ctx, path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return bs.path(dgst)
|
||||
}
|
||||
|
||||
type blobStatter struct {
|
||||
driver driver.StorageDriver
|
||||
}
|
||||
|
|
3
registry/storage/cache/cachecheck/suite.go
vendored
3
registry/storage/cache/cachecheck/suite.go
vendored
|
@ -173,8 +173,7 @@ func checkBlobDescriptorCacheClear(ctx context.Context, t *testing.T, provider c
|
|||
t.Error(err)
|
||||
}
|
||||
|
||||
desc, err = cache.Stat(ctx, localDigest)
|
||||
if err == nil {
|
||||
if _, err = cache.Stat(ctx, localDigest); err == nil {
|
||||
t.Fatalf("expected error statting deleted blob: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -55,17 +55,17 @@ func (factory *azureDriverFactory) Create(parameters map[string]interface{}) (st
|
|||
func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
||||
accountName, ok := parameters[paramAccountName]
|
||||
if !ok || fmt.Sprint(accountName) == "" {
|
||||
return nil, fmt.Errorf("No %s parameter provided", paramAccountName)
|
||||
return nil, fmt.Errorf("no %s parameter provided", paramAccountName)
|
||||
}
|
||||
|
||||
accountKey, ok := parameters[paramAccountKey]
|
||||
if !ok || fmt.Sprint(accountKey) == "" {
|
||||
return nil, fmt.Errorf("No %s parameter provided", paramAccountKey)
|
||||
return nil, fmt.Errorf("no %s parameter provided", paramAccountKey)
|
||||
}
|
||||
|
||||
container, ok := parameters[paramContainer]
|
||||
if !ok || fmt.Sprint(container) == "" {
|
||||
return nil, fmt.Errorf("No %s parameter provided", paramContainer)
|
||||
return nil, fmt.Errorf("no %s parameter provided", paramContainer)
|
||||
}
|
||||
|
||||
realm, ok := parameters[paramRealm]
|
||||
|
|
|
@ -36,7 +36,7 @@ func init() {
|
|||
func TestFromParametersImpl(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
params map[string]interface{} // techincally the yaml can contain anything
|
||||
params map[string]interface{} // technically the yaml can contain anything
|
||||
expected DriverParameters
|
||||
pass bool
|
||||
}{
|
||||
|
|
|
@ -252,20 +252,6 @@ func (d *dir) delete(p string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// dump outputs a primitive directory structure to stdout.
|
||||
func (d *dir) dump(indent string) {
|
||||
fmt.Println(indent, d.name()+"/")
|
||||
|
||||
for _, child := range d.children {
|
||||
if child.isdir() {
|
||||
child.(*dir).dump(indent + "\t")
|
||||
} else {
|
||||
fmt.Println(indent, child.name())
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dir) String() string {
|
||||
return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children)
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/service/cloudfront/sign"
|
||||
dcontext "github.com/docker/distribution/context"
|
||||
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/docker/distribution/registry/storage/driver/middleware"
|
||||
storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware"
|
||||
)
|
||||
|
||||
// cloudFrontStorageMiddleware provides a simple implementation of layerHandler that
|
||||
|
@ -138,27 +138,33 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o
|
|||
|
||||
// parse ipfilteredby
|
||||
var awsIPs *awsIPs
|
||||
if ipFilteredBy := options["ipfilteredby"].(string); ok {
|
||||
switch strings.ToLower(strings.TrimSpace(ipFilteredBy)) {
|
||||
case "", "none":
|
||||
awsIPs = nil
|
||||
case "aws":
|
||||
newAWSIPs(ipRangesURL, updateFrequency, nil)
|
||||
case "awsregion":
|
||||
var awsRegion []string
|
||||
if regions, ok := options["awsregion"].(string); ok {
|
||||
for _, awsRegions := range strings.Split(regions, ",") {
|
||||
awsRegion = append(awsRegion, strings.ToLower(strings.TrimSpace(awsRegions)))
|
||||
if i, ok := options["ipfilteredby"]; ok {
|
||||
if ipFilteredBy, ok := i.(string); ok {
|
||||
switch strings.ToLower(strings.TrimSpace(ipFilteredBy)) {
|
||||
case "", "none":
|
||||
awsIPs = nil
|
||||
case "aws":
|
||||
awsIPs = newAWSIPs(ipRangesURL, updateFrequency, nil)
|
||||
case "awsregion":
|
||||
var awsRegion []string
|
||||
if i, ok := options["awsregion"]; ok {
|
||||
if regions, ok := i.(string); ok {
|
||||
for _, awsRegions := range strings.Split(regions, ",") {
|
||||
awsRegion = append(awsRegion, strings.ToLower(strings.TrimSpace(awsRegions)))
|
||||
}
|
||||
awsIPs = newAWSIPs(ipRangesURL, updateFrequency, awsRegion)
|
||||
} else {
|
||||
return nil, fmt.Errorf("awsRegion must be a comma separated string of valid aws regions")
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("awsRegion is not defined")
|
||||
}
|
||||
awsIPs = newAWSIPs(ipRangesURL, updateFrequency, awsRegion)
|
||||
} else {
|
||||
return nil, fmt.Errorf("awsRegion must be a comma separated string of valid aws regions")
|
||||
default:
|
||||
return nil, fmt.Errorf("ipfilteredby only allows a string the following value: none|aws|awsregion")
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("ipfilteredby only allows a string the following value: none|aws|awsregion")
|
||||
} else {
|
||||
return nil, fmt.Errorf("ipfilteredby only allows a string with the following value: none|aws|awsregion")
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("ipfilteredby only allows a string with the following value: none|aws|awsregion")
|
||||
}
|
||||
|
||||
return &cloudFrontStorageMiddleware{
|
||||
|
|
|
@ -188,19 +188,19 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
|
||||
regionName := parameters["region"]
|
||||
if regionName == nil || fmt.Sprint(regionName) == "" {
|
||||
return nil, fmt.Errorf("No region parameter provided")
|
||||
return nil, fmt.Errorf("no region parameter provided")
|
||||
}
|
||||
region := fmt.Sprint(regionName)
|
||||
// Don't check the region value if a custom endpoint is provided.
|
||||
if regionEndpoint == "" {
|
||||
if _, ok := validRegions[region]; !ok {
|
||||
return nil, fmt.Errorf("Invalid region provided: %v", region)
|
||||
return nil, fmt.Errorf("invalid region provided: %v", region)
|
||||
}
|
||||
}
|
||||
|
||||
bucket := parameters["bucket"]
|
||||
if bucket == nil || fmt.Sprint(bucket) == "" {
|
||||
return nil, fmt.Errorf("No bucket parameter provided")
|
||||
return nil, fmt.Errorf("no bucket parameter provided")
|
||||
}
|
||||
|
||||
encryptBool := false
|
||||
|
@ -209,7 +209,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
case string:
|
||||
b, err := strconv.ParseBool(encrypt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("The encrypt parameter should be a boolean")
|
||||
return nil, fmt.Errorf("the encrypt parameter should be a boolean")
|
||||
}
|
||||
encryptBool = b
|
||||
case bool:
|
||||
|
@ -217,7 +217,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
case nil:
|
||||
// do nothing
|
||||
default:
|
||||
return nil, fmt.Errorf("The encrypt parameter should be a boolean")
|
||||
return nil, fmt.Errorf("the encrypt parameter should be a boolean")
|
||||
}
|
||||
|
||||
secureBool := true
|
||||
|
@ -226,7 +226,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
case string:
|
||||
b, err := strconv.ParseBool(secure)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("The secure parameter should be a boolean")
|
||||
return nil, fmt.Errorf("the secure parameter should be a boolean")
|
||||
}
|
||||
secureBool = b
|
||||
case bool:
|
||||
|
@ -234,7 +234,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
case nil:
|
||||
// do nothing
|
||||
default:
|
||||
return nil, fmt.Errorf("The secure parameter should be a boolean")
|
||||
return nil, fmt.Errorf("the secure parameter should be a boolean")
|
||||
}
|
||||
|
||||
skipVerifyBool := false
|
||||
|
@ -243,7 +243,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
case string:
|
||||
b, err := strconv.ParseBool(skipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("The skipVerify parameter should be a boolean")
|
||||
return nil, fmt.Errorf("the skipVerify parameter should be a boolean")
|
||||
}
|
||||
skipVerifyBool = b
|
||||
case bool:
|
||||
|
@ -251,7 +251,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
case nil:
|
||||
// do nothing
|
||||
default:
|
||||
return nil, fmt.Errorf("The skipVerify parameter should be a boolean")
|
||||
return nil, fmt.Errorf("the skipVerify parameter should be a boolean")
|
||||
}
|
||||
|
||||
v4Bool := true
|
||||
|
@ -260,7 +260,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
case string:
|
||||
b, err := strconv.ParseBool(v4auth)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("The v4auth parameter should be a boolean")
|
||||
return nil, fmt.Errorf("the v4auth parameter should be a boolean")
|
||||
}
|
||||
v4Bool = b
|
||||
case bool:
|
||||
|
@ -268,7 +268,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
case nil:
|
||||
// do nothing
|
||||
default:
|
||||
return nil, fmt.Errorf("The v4auth parameter should be a boolean")
|
||||
return nil, fmt.Errorf("the v4auth parameter should be a boolean")
|
||||
}
|
||||
|
||||
keyID := parameters["keyid"]
|
||||
|
@ -306,7 +306,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
if storageClassParam != nil {
|
||||
storageClassString, ok := storageClassParam.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid",
|
||||
return nil, fmt.Errorf("the storageclass parameter must be one of %v, %v invalid",
|
||||
[]string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam)
|
||||
}
|
||||
// All valid storage class parameters are UPPERCASE, so be a bit more flexible here
|
||||
|
@ -314,7 +314,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
if storageClassString != noStorageClass &&
|
||||
storageClassString != s3.StorageClassStandard &&
|
||||
storageClassString != s3.StorageClassReducedRedundancy {
|
||||
return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid",
|
||||
return nil, fmt.Errorf("the storageclass parameter must be one of %v, %v invalid",
|
||||
[]string{noStorageClass, s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam)
|
||||
}
|
||||
storageClass = storageClassString
|
||||
|
@ -330,11 +330,11 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
if objectACLParam != nil {
|
||||
objectACLString, ok := objectACLParam.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectACLParam)
|
||||
return nil, fmt.Errorf("invalid value for objectacl parameter: %v", objectACLParam)
|
||||
}
|
||||
|
||||
if _, ok = validObjectACLs[objectACLString]; !ok {
|
||||
return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectACLParam)
|
||||
return nil, fmt.Errorf("invalid value for objectacl parameter: %v", objectACLParam)
|
||||
}
|
||||
objectACL = objectACLString
|
||||
}
|
||||
|
@ -366,7 +366,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
return New(params)
|
||||
}
|
||||
|
||||
// getParameterAsInt64 converts paramaters[name] to an int64 value (using
|
||||
// getParameterAsInt64 converts parameters[name] to an int64 value (using
|
||||
// defaultt if nil), verifies it is no smaller than min, and returns it.
|
||||
func getParameterAsInt64(parameters map[string]interface{}, name string, defaultt int64, min int64, max int64) (int64, error) {
|
||||
rv := defaultt
|
||||
|
@ -389,7 +389,7 @@ func getParameterAsInt64(parameters map[string]interface{}, name string, default
|
|||
}
|
||||
|
||||
if rv < min || rv > max {
|
||||
return 0, fmt.Errorf("The %s %#v parameter should be a number between %d and %d (inclusive)", name, rv, min, max)
|
||||
return 0, fmt.Errorf("the %s %#v parameter should be a number between %d and %d (inclusive)", name, rv, min, max)
|
||||
}
|
||||
|
||||
return rv, nil
|
||||
|
@ -401,7 +401,7 @@ func New(params DriverParameters) (*Driver, error) {
|
|||
if !params.V4Auth &&
|
||||
(params.RegionEndpoint == "" ||
|
||||
strings.Contains(params.RegionEndpoint, "s3.amazonaws.com")) {
|
||||
return nil, fmt.Errorf("On Amazon S3 this storage driver can only be used with v4 authentication")
|
||||
return nil, fmt.Errorf("on Amazon S3 this storage driver can only be used with v4 authentication")
|
||||
}
|
||||
|
||||
awsConfig := aws.NewConfig()
|
||||
|
@ -878,7 +878,7 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int
|
|||
if ok {
|
||||
et, ok := expires.(time.Time)
|
||||
if ok {
|
||||
expiresIn = et.Sub(time.Now())
|
||||
expiresIn = time.Until(et)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -970,8 +970,19 @@ func (d *driver) doWalk(parentCtx context.Context, objectCount *int64, path, pre
|
|||
defer done("s3aws.ListObjectsV2Pages(%s)", path)
|
||||
listObjectErr := d.S3.ListObjectsV2PagesWithContext(ctx, listObjectsInput, func(objects *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
|
||||
*objectCount += *objects.KeyCount
|
||||
walkInfos := make([]walkInfoContainer, 0, *objects.KeyCount)
|
||||
var count int64
|
||||
// KeyCount was introduced with version 2 of the GET Bucket operation in S3.
|
||||
// Some S3 implementations don't support V2 now, so we fall back to manual
|
||||
// calculation of the key count if required
|
||||
if objects.KeyCount != nil {
|
||||
count = *objects.KeyCount
|
||||
*objectCount += *objects.KeyCount
|
||||
} else {
|
||||
count = int64(len(objects.Contents) + len(objects.CommonPrefixes))
|
||||
*objectCount += count
|
||||
}
|
||||
|
||||
walkInfos := make([]walkInfoContainer, 0, count)
|
||||
|
||||
for _, dir := range objects.CommonPrefixes {
|
||||
commonPrefix := *dir.Prefix
|
||||
|
|
|
@ -39,12 +39,6 @@ import (
|
|||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
signatureVersion = "2"
|
||||
signatureMethod = "HmacSHA1"
|
||||
timeFormat = "2006-01-02T15:04:05Z"
|
||||
)
|
||||
|
||||
type signer struct {
|
||||
// Values that must be populated from the request
|
||||
Request *http.Request
|
||||
|
|
|
@ -160,23 +160,23 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
}
|
||||
|
||||
if params.Username == "" {
|
||||
return nil, fmt.Errorf("No username parameter provided")
|
||||
return nil, fmt.Errorf("no username parameter provided")
|
||||
}
|
||||
|
||||
if params.Password == "" {
|
||||
return nil, fmt.Errorf("No password parameter provided")
|
||||
return nil, fmt.Errorf("no password parameter provided")
|
||||
}
|
||||
|
||||
if params.AuthURL == "" {
|
||||
return nil, fmt.Errorf("No authurl parameter provided")
|
||||
return nil, fmt.Errorf("no authurl parameter provided")
|
||||
}
|
||||
|
||||
if params.Container == "" {
|
||||
return nil, fmt.Errorf("No container parameter provided")
|
||||
return nil, fmt.Errorf("no container parameter provided")
|
||||
}
|
||||
|
||||
if params.ChunkSize < minChunkSize {
|
||||
return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize)
|
||||
return nil, fmt.Errorf("the chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize)
|
||||
}
|
||||
|
||||
return New(params)
|
||||
|
@ -211,15 +211,15 @@ func New(params Parameters) (*Driver, error) {
|
|||
}
|
||||
err := ct.Authenticate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Swift authentication failed: %s", err)
|
||||
return nil, fmt.Errorf("swift authentication failed: %s", err)
|
||||
}
|
||||
|
||||
if _, _, err := ct.Container(params.Container); err == swift.ContainerNotFound {
|
||||
if err := ct.ContainerCreate(params.Container, nil); err != nil {
|
||||
return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err)
|
||||
return nil, fmt.Errorf("failed to create container %s (%s)", params.Container, err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("Failed to retrieve info about container %s (%s)", params.Container, err)
|
||||
return nil, fmt.Errorf("failed to retrieve info about container %s (%s)", params.Container, err)
|
||||
}
|
||||
|
||||
d := &driver{
|
||||
|
@ -258,7 +258,7 @@ func New(params Parameters) (*Driver, error) {
|
|||
if d.TempURLContainerKey {
|
||||
_, containerHeaders, err := d.Conn.Container(d.Container)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to fetch container info %s (%s)", d.Container, err)
|
||||
return nil, fmt.Errorf("failed to fetch container info %s (%s)", d.Container, err)
|
||||
}
|
||||
|
||||
d.SecretKey = containerHeaders["X-Container-Meta-Temp-Url-Key"]
|
||||
|
@ -273,7 +273,7 @@ func New(params Parameters) (*Driver, error) {
|
|||
// Use the account secret key
|
||||
_, accountHeaders, err := d.Conn.Account()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to fetch account info (%s)", err)
|
||||
return nil, fmt.Errorf("failed to fetch account info (%s)", err)
|
||||
}
|
||||
|
||||
d.SecretKey = accountHeaders["X-Account-Meta-Temp-Url-Key"]
|
||||
|
@ -350,7 +350,7 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
|
|||
}
|
||||
if isDLO && size == 0 {
|
||||
if time.Now().Add(waitingTime).After(endTime) {
|
||||
return nil, fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path)
|
||||
return nil, fmt.Errorf("timeout expired while waiting for segments of %s to show up", path)
|
||||
}
|
||||
time.Sleep(waitingTime)
|
||||
waitingTime *= 2
|
||||
|
@ -456,7 +456,7 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo,
|
|||
_, isDLO := headers["X-Object-Manifest"]
|
||||
if isDLO && info.Bytes == 0 {
|
||||
if time.Now().Add(waitingTime).After(endTime) {
|
||||
return nil, fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path)
|
||||
return nil, fmt.Errorf("timeout expired while waiting for segments of %s to show up", path)
|
||||
}
|
||||
time.Sleep(waitingTime)
|
||||
waitingTime *= 2
|
||||
|
@ -755,7 +755,7 @@ func chunkFilenames(slice []string, maxSize int) (chunks [][]string, err error)
|
|||
chunks = append(chunks, slice[offset:offset+chunkSize])
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("Max chunk size must be > 0")
|
||||
return nil, fmt.Errorf("max chunk size must be > 0")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -894,7 +894,7 @@ func (w *writer) waitForSegmentsToShowUp() error {
|
|||
if info.Bytes == w.size {
|
||||
break
|
||||
}
|
||||
err = fmt.Errorf("Timeout expired while waiting for segments of %s to show up", w.path)
|
||||
err = fmt.Errorf("timeout expired while waiting for segments of %s to show up", w.path)
|
||||
}
|
||||
if time.Now().Add(waitingTime).After(endTime) {
|
||||
break
|
||||
|
|
|
@ -98,15 +98,13 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis
|
|||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
// In certain situations such as unfinished uploads, deleting all
|
||||
// tags in S3 or removing the _manifests folder manually, this
|
||||
// error may be of type PathNotFound.
|
||||
//
|
||||
// In these cases we can continue marking other manifests safely.
|
||||
if _, ok := err.(driver.PathNotFoundError); ok {
|
||||
return nil
|
||||
}
|
||||
// In certain situations such as unfinished uploads, deleting all
|
||||
// tags in S3 or removing the _manifests folder manually, this
|
||||
// error may be of type PathNotFound.
|
||||
//
|
||||
// In these cases we can continue marking other manifests safely.
|
||||
if _, ok := err.(driver.PathNotFoundError); ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
|
|
|
@ -18,6 +18,7 @@ func getContent(ctx context.Context, driver driver.StorageDriver, p string) ([]b
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
return readAllLimited(r, maxBlobGetSize)
|
||||
}
|
||||
|
|
|
@ -27,13 +27,12 @@ func TestLinkedBlobStoreCreateWithMountFrom(t *testing.T) {
|
|||
// readseekers for upload later.
|
||||
testLayers := map[digest.Digest]io.ReadSeeker{}
|
||||
for i := 0; i < 2; i++ {
|
||||
rs, ds, err := testutil.CreateRandomTarFile()
|
||||
rs, dgst, err := testutil.CreateRandomTarFile()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error generating test layer file")
|
||||
}
|
||||
dgst := digest.Digest(ds)
|
||||
|
||||
testLayers[digest.Digest(dgst)] = rs
|
||||
testLayers[dgst] = rs
|
||||
}
|
||||
|
||||
// upload the layers to foo/bar
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// A ManifestHandler gets and puts manifests of a particular type.
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
"github.com/docker/distribution/testutil"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type manifestStoreTestEnv struct {
|
||||
|
@ -91,13 +91,12 @@ func testManifestStorage(t *testing.T, schema1Enabled bool, options ...RegistryO
|
|||
// readseekers for upload later.
|
||||
testLayers := map[digest.Digest]io.ReadSeeker{}
|
||||
for i := 0; i < 2; i++ {
|
||||
rs, ds, err := testutil.CreateRandomTarFile()
|
||||
rs, dgst, err := testutil.CreateRandomTarFile()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error generating test layer file")
|
||||
}
|
||||
dgst := digest.Digest(ds)
|
||||
|
||||
testLayers[digest.Digest(dgst)] = rs
|
||||
testLayers[dgst] = rs
|
||||
m.FSLayers = append(m.FSLayers, schema1.FSLayer{
|
||||
BlobSum: dgst,
|
||||
})
|
||||
|
@ -414,11 +413,10 @@ func testOCIManifestStorage(t *testing.T, testname string, includeMediaTypes boo
|
|||
|
||||
// Add some layers
|
||||
for i := 0; i < 2; i++ {
|
||||
rs, ds, err := testutil.CreateRandomTarFile()
|
||||
rs, dgst, err := testutil.CreateRandomTarFile()
|
||||
if err != nil {
|
||||
t.Fatalf("%s: unexpected error generating test layer file", testname)
|
||||
}
|
||||
dgst := digest.Digest(ds)
|
||||
|
||||
wr, err := env.repository.Blobs(env.ctx).Create(env.ctx)
|
||||
if err != nil {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
dcontext "github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/manifest/ocischema"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
//ocischemaManifestHandler is a ManifestHandler that covers ocischema manifests.
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/manifest/ocischema"
|
||||
"github.com/docker/distribution/registry/storage/driver/inmemory"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
func TestVerifyOCIManifestNonDistributableLayer(t *testing.T) {
|
||||
|
|
|
@ -133,10 +133,7 @@ func pathFor(spec pathSpec) (string, error) {
|
|||
|
||||
return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil
|
||||
case manifestRevisionLinkPathSpec:
|
||||
root, err := pathFor(manifestRevisionPathSpec{
|
||||
name: v.name,
|
||||
revision: v.revision,
|
||||
})
|
||||
root, err := pathFor(manifestRevisionPathSpec(v))
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -156,10 +153,7 @@ func pathFor(spec pathSpec) (string, error) {
|
|||
|
||||
return path.Join(root, v.tag), nil
|
||||
case manifestTagCurrentPathSpec:
|
||||
root, err := pathFor(manifestTagPathSpec{
|
||||
name: v.name,
|
||||
tag: v.tag,
|
||||
})
|
||||
root, err := pathFor(manifestTagPathSpec(v))
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -167,10 +161,7 @@ func pathFor(spec pathSpec) (string, error) {
|
|||
|
||||
return path.Join(root, "current", "link"), nil
|
||||
case manifestTagIndexPathSpec:
|
||||
root, err := pathFor(manifestTagPathSpec{
|
||||
name: v.name,
|
||||
tag: v.tag,
|
||||
})
|
||||
root, err := pathFor(manifestTagPathSpec(v))
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -178,11 +169,7 @@ func pathFor(spec pathSpec) (string, error) {
|
|||
|
||||
return path.Join(root, "index"), nil
|
||||
case manifestTagIndexEntryLinkPathSpec:
|
||||
root, err := pathFor(manifestTagIndexEntryPathSpec{
|
||||
name: v.name,
|
||||
tag: v.tag,
|
||||
revision: v.revision,
|
||||
})
|
||||
root, err := pathFor(manifestTagIndexEntryPathSpec(v))
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
|
|
@ -10,7 +10,6 @@ func TestPathMapper(t *testing.T) {
|
|||
for _, testcase := range []struct {
|
||||
spec pathSpec
|
||||
expected string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
spec: manifestRevisionPathSpec{
|
||||
|
|
|
@ -59,7 +59,7 @@ func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, older
|
|||
// file, so gather files by UUID with a date from startedAt.
|
||||
func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) {
|
||||
var errors []error
|
||||
uploads := make(map[string]uploadData, 0)
|
||||
uploads := make(map[string]uploadData)
|
||||
|
||||
inUploadDir := false
|
||||
root, err := pathFor(repositoriesRootPathSpec{})
|
||||
|
|
|
@ -118,7 +118,7 @@ func TestPurgeOnlyUploads(t *testing.T) {
|
|||
t.Fatalf(err.Error())
|
||||
}
|
||||
nonUploadPath := strings.Replace(dataPath, "_upload", "_important", -1)
|
||||
if strings.Index(nonUploadPath, "_upload") != -1 {
|
||||
if strings.Contains(nonUploadPath, "_upload") {
|
||||
t.Fatalf("Non-upload path not created correctly")
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ func TestPurgeOnlyUploads(t *testing.T) {
|
|||
t.Error("Unexpected errors", errs)
|
||||
}
|
||||
for _, file := range deleted {
|
||||
if strings.Index(file, "_upload") == -1 {
|
||||
if !strings.Contains(file, "_upload") {
|
||||
t.Errorf("Non-upload file deleted")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,9 +14,8 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
errUnexpectedURL = errors.New("unexpected URL on layer")
|
||||
errMissingURL = errors.New("missing URL on layer")
|
||||
errInvalidURL = errors.New("invalid URL on layer")
|
||||
errMissingURL = errors.New("missing URL on layer")
|
||||
errInvalidURL = errors.New("invalid URL on layer")
|
||||
)
|
||||
|
||||
//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests.
|
||||
|
@ -93,7 +92,7 @@ func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst sche
|
|||
switch descriptor.MediaType {
|
||||
case schema2.MediaTypeForeignLayer:
|
||||
// Clients download this layer from an external URL, so do not check for
|
||||
// its presense.
|
||||
// its presence.
|
||||
if len(descriptor.URLs) == 0 {
|
||||
err = errMissingURL
|
||||
}
|
||||
|
|
|
@ -50,25 +50,6 @@ func (ts *tagStore) All(ctx context.Context) ([]string, error) {
|
|||
return tags, nil
|
||||
}
|
||||
|
||||
// exists returns true if the specified manifest tag exists in the repository.
|
||||
func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) {
|
||||
tagPath, err := pathFor(manifestTagCurrentPathSpec{
|
||||
name: ts.repository.Named().Name(),
|
||||
tag: tag,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
exists, err := exists(ctx, ts.blobStore.driver, tagPath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
// Tag tags the digest with the given tag, updating the the store to point at
|
||||
// the current tag. The digest must point to a manifest.
|
||||
func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
)
|
||||
|
||||
// Exists provides a utility method to test whether or not a path exists in
|
||||
// the given driver.
|
||||
func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) {
|
||||
if _, err := drv.Stat(ctx, path); err != nil {
|
||||
switch err := err.(type) {
|
||||
case driver.PathNotFoundError:
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
26
releases/v2.7.1.toml
Normal file
26
releases/v2.7.1.toml
Normal file
|
@ -0,0 +1,26 @@
|
|||
# commit to be tagged for new release
|
||||
commit = "HEAD"
|
||||
|
||||
project_name = "registry"
|
||||
github_repo = "docker/distribution"
|
||||
|
||||
# previous release
|
||||
previous = "v2.7.0"
|
||||
|
||||
pre_release = false
|
||||
|
||||
preface = """\
|
||||
The first patch release of 2.7 addresses an upgrade issue when
|
||||
using configurations from pre-2.7 registries. When upgrading from
|
||||
2.6 or earlier use this patch release or newer to avoid a failure
|
||||
on startup from not updating the configuration file.
|
||||
|
||||
- Set default for new `autoredirect` option
|
||||
- GCS driver is now included in binary builds using Dockerfile"""
|
||||
|
||||
# notable prs to include in the release notes, 1234 is the pr number
|
||||
[notes]
|
||||
|
||||
[breaking]
|
||||
|
||||
[rename_deps]
|
|
@ -1,11 +1,12 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
GOLANGCI_LINT_VERSION="v1.27.0"
|
||||
|
||||
#
|
||||
# Install developer tools to $GOBIN (or $GOPATH/bin if unset)
|
||||
#
|
||||
set -eu -o pipefail
|
||||
|
||||
go get -u github.com/alecthomas/gometalinter
|
||||
gometalinter --install >/dev/null
|
||||
cd /tmp
|
||||
go get -u github.com/LK4D4/vndr
|
||||
go get -u github.com/cpuguy83/go-md2man
|
||||
go get "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}"
|
||||
|
|
|
@ -9,4 +9,4 @@ if ! command -v git-validation; then
|
|||
fi
|
||||
|
||||
verbosity="${DCO_VERBOSITY--v}"
|
||||
GIT_CHECK_EXCLUDE="./vendor:./script/validate/template" git-validation "$verbosity" -run DCO,short-subject,dangling-whitespace
|
||||
GIT_CHECK_EXCLUDE="./vendor:./script/validate/template" git-validation "$verbosity" -range "5b98226afefa11a06ef0c652af4995177c0efda0..HEAD" -run DCO,short-subject,dangling-whitespace
|
|
@ -8,7 +8,7 @@ github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274
|
|||
github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
|
||||
github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
|
||||
github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2
|
||||
github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04
|
||||
github.com/dgrijalva/jwt-go 4bbdd8ac624fc7a9ef7aec841c43d99b5fe65a29 https://github.com/golang-jwt/jwt.git # v3.2.2
|
||||
github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab
|
||||
github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21
|
||||
github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257
|
||||
|
@ -48,4 +48,4 @@ gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b
|
|||
gopkg.in/yaml.v2 v2.2.1
|
||||
rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git
|
||||
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
|
||||
github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882
|
||||
github.com/opencontainers/image-spec 67d2d5658fe0476ab9bf414cec164077ebff3920 # v1.0.2
|
||||
|
|
6
vendor/github.com/aws/aws-sdk-go/go.mod
generated
vendored
Normal file
6
vendor/github.com/aws/aws-sdk-go/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
module github.com/aws/aws-sdk-go
|
||||
|
||||
require (
|
||||
github.com/go-ini/ini v1.25.4
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8
|
||||
)
|
1
vendor/github.com/dgrijalva/jwt-go/LICENSE
generated
vendored
1
vendor/github.com/dgrijalva/jwt-go/LICENSE
generated
vendored
|
@ -1,4 +1,5 @@
|
|||
Copyright (c) 2012 Dave Grijalva
|
||||
Copyright (c) 2021 golang-jwt maintainers
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
|
|
66
vendor/github.com/dgrijalva/jwt-go/README.md
generated
vendored
66
vendor/github.com/dgrijalva/jwt-go/README.md
generated
vendored
|
@ -1,21 +1,34 @@
|
|||
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
|
||||
# jwt-go
|
||||
|
||||
[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go)
|
||||
[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml)
|
||||
[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt)
|
||||
|
||||
**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
|
||||
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
|
||||
|
||||
**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect.
|
||||
**IMPORT PATH CHANGE:** Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic.
|
||||
|
||||
Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path.
|
||||
|
||||
**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail.
|
||||
|
||||
**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
|
||||
|
||||
### Supported Go versions
|
||||
|
||||
Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy).
|
||||
So we will support a major version of Go until there are two newer major releases.
|
||||
We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities
|
||||
which will not be fixed.
|
||||
|
||||
## What the heck is a JWT?
|
||||
|
||||
JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
|
||||
|
||||
In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
|
||||
In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded. The last part is the signature, encoded the same way.
|
||||
|
||||
The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
|
||||
|
||||
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
|
||||
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own.
|
||||
|
||||
## What's in the box?
|
||||
|
||||
|
@ -23,31 +36,34 @@ This library supports the parsing and verification as well as the generation and
|
|||
|
||||
## Examples
|
||||
|
||||
See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
|
||||
See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage:
|
||||
|
||||
* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac)
|
||||
* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac)
|
||||
* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
|
||||
* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac)
|
||||
* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac)
|
||||
* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples)
|
||||
|
||||
## Extensions
|
||||
|
||||
This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
|
||||
|
||||
Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
|
||||
Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go
|
||||
|
||||
## Compliance
|
||||
|
||||
This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
|
||||
This library was last reviewed to comply with [RTF 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences:
|
||||
|
||||
* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
|
||||
* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
|
||||
|
||||
## Project Status & Versioning
|
||||
|
||||
This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
|
||||
|
||||
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
|
||||
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases).
|
||||
|
||||
While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning.
|
||||
While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/golang-jwt/jwt.v3`. It will do the right thing WRT semantic versioning.
|
||||
|
||||
**BREAKING CHANGES:***
|
||||
* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
|
||||
|
||||
## Usage Tips
|
||||
|
||||
|
@ -68,18 +84,30 @@ Symmetric signing methods, such as HSA, use only a single secret. This is probab
|
|||
|
||||
Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
|
||||
|
||||
### Signing Methods and Key Types
|
||||
|
||||
Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
|
||||
|
||||
* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
|
||||
* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
|
||||
* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
|
||||
|
||||
### JWT and OAuth
|
||||
|
||||
It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
|
||||
|
||||
Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
|
||||
|
||||
* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
|
||||
* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
|
||||
* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
|
||||
* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
|
||||
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types.
|
||||
|
||||
## More
|
||||
|
||||
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
|
||||
Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt).
|
||||
|
||||
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation.
|
||||
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
|
||||
|
|
32
vendor/github.com/dgrijalva/jwt-go/claims.go
generated
vendored
32
vendor/github.com/dgrijalva/jwt-go/claims.go
generated
vendored
|
@ -35,18 +35,18 @@ func (c StandardClaims) Valid() error {
|
|||
|
||||
// The claims below are optional, by default, so if they are set to the
|
||||
// default value in Go, let's not fail the verification for them.
|
||||
if c.VerifyExpiresAt(now, false) == false {
|
||||
if !c.VerifyExpiresAt(now, false) {
|
||||
delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
|
||||
vErr.Inner = fmt.Errorf("token is expired by %v", delta)
|
||||
vErr.Errors |= ValidationErrorExpired
|
||||
}
|
||||
|
||||
if c.VerifyIssuedAt(now, false) == false {
|
||||
if !c.VerifyIssuedAt(now, false) {
|
||||
vErr.Inner = fmt.Errorf("Token used before issued")
|
||||
vErr.Errors |= ValidationErrorIssuedAt
|
||||
}
|
||||
|
||||
if c.VerifyNotBefore(now, false) == false {
|
||||
if !c.VerifyNotBefore(now, false) {
|
||||
vErr.Inner = fmt.Errorf("token is not valid yet")
|
||||
vErr.Errors |= ValidationErrorNotValidYet
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ func (c StandardClaims) Valid() error {
|
|||
// Compares the aud claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
|
||||
return verifyAud(c.Audience, cmp, req)
|
||||
return verifyAud([]string{c.Audience}, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the exp claim against cmp.
|
||||
|
@ -90,15 +90,27 @@ func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
|
|||
|
||||
// ----- helpers
|
||||
|
||||
func verifyAud(aud string, cmp string, required bool) bool {
|
||||
if aud == "" {
|
||||
func verifyAud(aud []string, cmp string, required bool) bool {
|
||||
if len(aud) == 0 {
|
||||
return !required
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
// use a var here to keep constant time compare when looping over a number of claims
|
||||
result := false
|
||||
|
||||
var stringClaims string
|
||||
for _, a := range aud {
|
||||
if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
|
||||
result = true
|
||||
}
|
||||
stringClaims = stringClaims + a
|
||||
}
|
||||
|
||||
// case where "" is sent in one or many aud claims
|
||||
if len(stringClaims) == 0 {
|
||||
return !required
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func verifyExp(exp int64, now int64, required bool) bool {
|
||||
|
|
25
vendor/github.com/dgrijalva/jwt-go/ecdsa.go
generated
vendored
25
vendor/github.com/dgrijalva/jwt-go/ecdsa.go
generated
vendored
|
@ -14,6 +14,7 @@ var (
|
|||
)
|
||||
|
||||
// Implements the ECDSA family of signing methods signing methods
|
||||
// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
|
||||
type SigningMethodECDSA struct {
|
||||
Name string
|
||||
Hash crypto.Hash
|
||||
|
@ -87,11 +88,11 @@ func (m *SigningMethodECDSA) Verify(signingString, signature string, key interfa
|
|||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Verify the signature
|
||||
if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true {
|
||||
if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
|
||||
return nil
|
||||
} else {
|
||||
return ErrECDSAVerification
|
||||
}
|
||||
|
||||
return ErrECDSAVerification
|
||||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod
|
||||
|
@ -127,18 +128,12 @@ func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string
|
|||
keyBytes += 1
|
||||
}
|
||||
|
||||
// We serialize the outpus (r and s) into big-endian byte arrays and pad
|
||||
// them with zeros on the left to make sure the sizes work out. Both arrays
|
||||
// must be keyBytes long, and the output must be 2*keyBytes long.
|
||||
rBytes := r.Bytes()
|
||||
rBytesPadded := make([]byte, keyBytes)
|
||||
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
|
||||
|
||||
sBytes := s.Bytes()
|
||||
sBytesPadded := make([]byte, keyBytes)
|
||||
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
|
||||
|
||||
out := append(rBytesPadded, sBytesPadded...)
|
||||
// We serialize the outputs (r and s) into big-endian byte arrays
|
||||
// padded with zeros on the left to make sure the sizes work out.
|
||||
// Output must be 2*keyBytes long.
|
||||
out := make([]byte, 2*keyBytes)
|
||||
r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output.
|
||||
s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
|
||||
|
||||
return EncodeSegment(out), nil
|
||||
} else {
|
||||
|
|
4
vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
generated
vendored
4
vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
generated
vendored
|
@ -25,7 +25,9 @@ func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
|
|||
// Parse the key
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
|
||||
return nil, err
|
||||
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var pkey *ecdsa.PrivateKey
|
||||
|
|
81
vendor/github.com/dgrijalva/jwt-go/ed25519.go
generated
vendored
Normal file
81
vendor/github.com/dgrijalva/jwt-go/ed25519.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"crypto/ed25519"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrEd25519Verification = errors.New("ed25519: verification error")
|
||||
)
|
||||
|
||||
// Implements the EdDSA family
|
||||
// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
|
||||
type SigningMethodEd25519 struct{}
|
||||
|
||||
// Specific instance for EdDSA
|
||||
var (
|
||||
SigningMethodEdDSA *SigningMethodEd25519
|
||||
)
|
||||
|
||||
func init() {
|
||||
SigningMethodEdDSA = &SigningMethodEd25519{}
|
||||
RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod {
|
||||
return SigningMethodEdDSA
|
||||
})
|
||||
}
|
||||
|
||||
func (m *SigningMethodEd25519) Alg() string {
|
||||
return "EdDSA"
|
||||
}
|
||||
|
||||
// Implements the Verify method from SigningMethod
|
||||
// For this verify method, key must be an ed25519.PublicKey
|
||||
func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error {
|
||||
var err error
|
||||
var ed25519Key ed25519.PublicKey
|
||||
var ok bool
|
||||
|
||||
if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
|
||||
return ErrInvalidKeyType
|
||||
}
|
||||
|
||||
if len(ed25519Key) != ed25519.PublicKeySize {
|
||||
return ErrInvalidKey
|
||||
}
|
||||
|
||||
// Decode the signature
|
||||
var sig []byte
|
||||
if sig, err = DecodeSegment(signature); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify the signature
|
||||
if !ed25519.Verify(ed25519Key, []byte(signingString), sig) {
|
||||
return ErrEd25519Verification
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod
|
||||
// For this signing method, key must be an ed25519.PrivateKey
|
||||
func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) {
|
||||
var ed25519Key ed25519.PrivateKey
|
||||
var ok bool
|
||||
|
||||
if ed25519Key, ok = key.(ed25519.PrivateKey); !ok {
|
||||
return "", ErrInvalidKeyType
|
||||
}
|
||||
|
||||
// ed25519.Sign panics if private key not equal to ed25519.PrivateKeySize
|
||||
// this allows to avoid recover usage
|
||||
if len(ed25519Key) != ed25519.PrivateKeySize {
|
||||
return "", ErrInvalidKey
|
||||
}
|
||||
|
||||
// Sign the string and return the encoded result
|
||||
sig := ed25519.Sign(ed25519Key, []byte(signingString))
|
||||
return EncodeSegment(sig), nil
|
||||
}
|
64
vendor/github.com/dgrijalva/jwt-go/ed25519_utils.go
generated
vendored
Normal file
64
vendor/github.com/dgrijalva/jwt-go/ed25519_utils.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ed25519"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotEdPrivateKey = errors.New("Key is not a valid Ed25519 private key")
|
||||
ErrNotEdPublicKey = errors.New("Key is not a valid Ed25519 public key")
|
||||
)
|
||||
|
||||
// Parse PEM-encoded Edwards curve private key
|
||||
func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
|
||||
var err error
|
||||
|
||||
// Parse PEM block
|
||||
var block *pem.Block
|
||||
if block, _ = pem.Decode(key); block == nil {
|
||||
return nil, ErrKeyMustBePEMEncoded
|
||||
}
|
||||
|
||||
// Parse the key
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pkey ed25519.PrivateKey
|
||||
var ok bool
|
||||
if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok {
|
||||
return nil, ErrNotEdPrivateKey
|
||||
}
|
||||
|
||||
return pkey, nil
|
||||
}
|
||||
|
||||
// Parse PEM-encoded Edwards curve public key
|
||||
func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
|
||||
var err error
|
||||
|
||||
// Parse PEM block
|
||||
var block *pem.Block
|
||||
if block, _ = pem.Decode(key); block == nil {
|
||||
return nil, ErrKeyMustBePEMEncoded
|
||||
}
|
||||
|
||||
// Parse the key
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pkey ed25519.PublicKey
|
||||
var ok bool
|
||||
if pkey, ok = parsedKey.(ed25519.PublicKey); !ok {
|
||||
return nil, ErrNotEdPublicKey
|
||||
}
|
||||
|
||||
return pkey, nil
|
||||
}
|
3
vendor/github.com/dgrijalva/jwt-go/hmac.go
generated
vendored
3
vendor/github.com/dgrijalva/jwt-go/hmac.go
generated
vendored
|
@ -7,6 +7,7 @@ import (
|
|||
)
|
||||
|
||||
// Implements the HMAC-SHA family of signing methods signing methods
|
||||
// Expects key type of []byte for both signing and validation
|
||||
type SigningMethodHMAC struct {
|
||||
Name string
|
||||
Hash crypto.Hash
|
||||
|
@ -90,5 +91,5 @@ func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string,
|
|||
return EncodeSegment(hasher.Sum(nil)), nil
|
||||
}
|
||||
|
||||
return "", ErrInvalidKey
|
||||
return "", ErrInvalidKeyType
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue