Merge branch 'main' into feature/sse-c

Signed-off-by: Richard Bateman <taxilian@gmail.com>
This commit is contained in:
Richard Bateman 2023-03-15 14:52:43 -06:00 committed by Richard Bateman
commit 7504d26d0d
422 changed files with 30537 additions and 7281 deletions

View file

@ -1,5 +1,9 @@
name: build
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
push:
branches:
@ -8,8 +12,6 @@ on:
tags:
- 'v*'
pull_request:
branches:
- '*'
env:
DOCKERHUB_SLUG: distribution/distribution
@ -49,13 +51,13 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
-
name: Docker meta
id: meta
uses: docker/metadata-action@v3
uses: docker/metadata-action@v4
with:
images: |
${{ env.DOCKERHUB_SLUG }}
@ -80,17 +82,17 @@ jobs:
org.opencontainers.image.description=The toolkit to pack, ship, store, and deliver container content
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v2
-
name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Build artifacts
uses: docker/bake-action@v1
uses: docker/bake-action@v2
with:
targets: artifact-all
-
@ -99,14 +101,14 @@ jobs:
mv ./bin/**/* ./bin/
-
name: Upload artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: registry
path: ./bin/*
if-no-files-found: error
-
name: Build image
uses: docker/bake-action@v1
uses: docker/bake-action@v2
with:
files: |
./docker-bake.hcl

View file

@ -1,63 +1,48 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
name: CodeQL
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
push:
branches: [ main ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ main ]
schedule:
- cron: '41 13 * * 1'
- cron: '0 12 * * 6'
push:
branches:
- 'main'
- 'release/*'
tags:
- 'v*'
pull_request:
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
language:
- go
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1
-
name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 2
-
name: Checkout HEAD on PR
if: ${{ github.event_name == 'pull_request' }}
run: |
git checkout HEAD^2
-
name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
-
name: Autobuild
uses: github/codeql-action/autobuild@v2
-
name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

View file

@ -1,5 +1,9 @@
name: conformance
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
pull_request:
push:
@ -10,12 +14,12 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
-
name: Build image
uses: docker/bake-action@v1
uses: docker/bake-action@v2
with:
targets: image-local
-
@ -42,7 +46,7 @@ jobs:
run: mkdir -p .out/ && mv {report.html,junit.xml} .out/
-
name: Upload test results
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: oci-test-results-${{ github.sha }}
path: .out/

View file

@ -1,12 +1,15 @@
name: e2e
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
push:
branches:
- main
- 'main'
- 'release/*'
pull_request:
branches:
- main
jobs:
run-e2e-test:
@ -14,12 +17,12 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
-
name: Build image
uses: docker/bake-action@v1
uses: docker/bake-action@v2
with:
targets: image-local
-

View file

@ -1,5 +1,9 @@
name: FOSSA License Scanning
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
- pull_request
- push
@ -10,9 +14,9 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Run FOSSA scan and upload build data
uses: fossa-contrib/fossa-action@v1
uses: fossa-contrib/fossa-action@v2
with:
fossa-api-key: cac3dc8d4f2ba86142f6c0f2199a160f

View file

@ -1,5 +1,9 @@
name: validate
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
push:
branches:

View file

@ -1,6 +1,11 @@
Docker Hub https://hub.docker.com/
GitLab Container Registry https://docs.gitlab.com/ee/user/packages/container_registry/
GitHub Container Registry https://docs.github.com/en/free-pro-team@latest/packages/guides/about-github-container-registry
Harbor, CNCF Graduated project https://goharbor.io/
VMware Harbor Registry https://docs.pivotal.io/partners/vmware-harbor/index.html
DigitalOcean Container Registry https://www.digitalocean.com/products/container-registry/

View file

@ -19,19 +19,29 @@ You are expected to know your way around with go & git.
If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you.
## Build the development environment
## Configure the development environment
The first prerequisite of properly building distribution targets is to have a Go
development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html)
for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the
environment.
If a Go development environment is setup, one can use `go get` to install the
`registry` command from the current latest:
Next, fetch the code from the repository using git:
go get github.com/distribution/distribution/cmd/registry
git clone https://github.com/distribution/distribution
cd distribution
The above will install the source repository into the `GOPATH`.
If you are planning to create a pull request with changes, you may want to clone directly from your [fork](https://help.github.com/en/articles/about-forks).
## Build and run from source
First, build the binaries:
$ make
+ bin/registry
+ bin/digest
+ bin/registry-api-descriptor-template
+ binaries
Now create the directory for the registry data (this might require you to set permissions properly)
@ -42,75 +52,48 @@ Now create the directory for the registry data (this might require you to set pe
The `registry`
binary can then be run with the following:
$ $GOPATH/bin/registry --version
$GOPATH/bin/registry github.com/distribution/distribution v2.0.0-alpha.1+unknown
$ ./bin/registry --version
./bin/registry github.com/distribution/distribution/v3 v2.7.0-1993-g8857a194
> __NOTE:__ While you do not need to use `go get` to checkout the distribution
> project, for these build instructions to work, the project must be checked
> out in the correct location in the `GOPATH`. This should almost always be
> `$GOPATH/src/github.com/distribution/distribution`.
The registry can be run with the default config using the following
The registry can be run with a development config using the following
incantation:
$ $GOPATH/bin/registry serve $GOPATH/src/github.com/distribution/distribution/cmd/registry/config-example.yml
INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
INFO[0000] debug server listening localhost:5001
$ ./bin/registry serve cmd/registry/config-dev.yml
INFO[0000] debug server listening :5001
WARN[0000] No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable. environment=development go.version=go1.18.3 instance.id=e837df62-a66c-4e04-a014-b063546e82e0 service=registry version=v2.7.0-1993-g8857a194
INFO[0000] endpoint local-5003 disabled, skipping environment=development go.version=go1.18.3 instance.id=e837df62-a66c-4e04-a014-b063546e82e0 service=registry version=v2.7.0-1993-g8857a194
INFO[0000] endpoint local-8083 disabled, skipping environment=development go.version=go1.18.3 instance.id=e837df62-a66c-4e04-a014-b063546e82e0 service=registry version=v2.7.0-1993-g8857a194
INFO[0000] using inmemory blob descriptor cache environment=development go.version=go1.18.3 instance.id=e837df62-a66c-4e04-a014-b063546e82e0 service=registry version=v2.7.0-1993-g8857a194
INFO[0000] providing prometheus metrics on /metrics
INFO[0000] listening on [::]:5000 environment=development go.version=go1.18.3 instance.id=e837df62-a66c-4e04-a014-b063546e82e0 service=registry version=v2.7.0-1993-g8857a194
If it is working, one should see the above log messages.
### Repeatable Builds
### Build reference
For the full development experience, one should `cd` into
`$GOPATH/src/github.com/distribution/distribution`. From there, the regular `go`
commands, such as `go test`, should work per package (please see
[Developing](#developing) if they don't work).
The regular `go` commands, such as `go test`, should work per package.
A `Makefile` has been provided as a convenience to support repeatable builds.
Please install the following into `GOPATH` for it to work:
go get github.com/golang/lint/golint
Once these commands are available in the `GOPATH`, run `make` to get a full
build:
Run `make` to build the binaries:
$ make
+ clean
+ fmt
+ vet
+ lint
+ build
github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar
github.com/sirupsen/logrus
github.com/docker/libtrust
...
github.com/yvasiyarov/gorelic
github.com/distribution/distribution/registry/handlers
github.com/distribution/distribution/cmd/registry
+ test
...
ok github.com/distribution/distribution/digest 7.875s
ok github.com/distribution/distribution/manifest 0.028s
ok github.com/distribution/distribution/notifications 17.322s
? github.com/distribution/distribution/registry [no test files]
ok github.com/distribution/distribution/registry/api/v2 0.101s
? github.com/distribution/distribution/registry/auth [no test files]
ok github.com/distribution/distribution/registry/auth/silly 0.011s
...
+ /Users/sday/go/src/github.com/distribution/distribution/bin/registry
+ /Users/sday/go/src/github.com/distribution/distribution/bin/registry-api-descriptor-template
+ bin/registry
+ bin/digest
+ bin/registry-api-descriptor-template
+ binaries
The above provides a repeatable build using the contents of the vendor
directory. This includes formatting, vetting, linting, building,
testing and generating tagged binaries. We can verify this worked by running
directory. We can verify this worked by running
the registry binary generated in the "./bin" directory:
$ ./bin/registry --version
./bin/registry github.com/distribution/distribution v2.0.0-alpha.2-80-g16d8b2c.m
Run `make test` to run all of the tests.
Run `make validate` to run the validators, including the linter and vendor validation. You must have docker with the buildx plugin installed to run the validators.
### Optional build tags
Optional [build tags](http://golang.org/pkg/go/build/) can be provided using

View file

@ -63,13 +63,13 @@ type Descriptor struct {
// encoded as utf-8.
MediaType string `json:"mediaType,omitempty"`
// Size in bytes of content.
Size int64 `json:"size,omitempty"`
// Digest uniquely identifies the content. A byte stream can be verified
// against this digest.
Digest digest.Digest `json:"digest,omitempty"`
// Size in bytes of content.
Size int64 `json:"size,omitempty"`
// URLs contains the source URLs of this content.
URLs []string `json:"urls,omitempty"`
@ -142,20 +142,18 @@ type BlobDescriptorServiceFactory interface {
// ReadSeekCloser is the primary reader type for blob data, combining
// io.ReadSeeker with io.Closer.
type ReadSeekCloser interface {
io.ReadSeeker
io.Closer
}
//
// Deprecated: use [io.ReadSeekCloser].
type ReadSeekCloser = io.ReadSeekCloser
// BlobProvider describes operations for getting blob data.
type BlobProvider interface {
// Get returns the entire blob identified by digest along with the descriptor.
Get(ctx context.Context, dgst digest.Digest) ([]byte, error)
// Open provides a ReadSeekCloser to the blob identified by the provided
// descriptor. If the blob is not known to the service, an error will be
// returned.
Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error)
// Open provides an [io.ReadSeekCloser] to the blob identified by the provided
// descriptor. If the blob is not known to the service, an error is returned.
Open(ctx context.Context, dgst digest.Digest) (io.ReadSeekCloser, error)
}
// BlobServer can serve blobs via http.

View file

@ -62,7 +62,6 @@ func main() {
if flag.NArg() > 0 {
for _, path := range flag.Args() {
fp, err := os.Open(path)
if err != nil {
log.Printf("%s: %v", path, err)
fail = true

View file

@ -4,7 +4,7 @@
// For example, to generate a new API specification, one would execute the
// following command from the repo root:
//
// $ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md
// $ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md
//
// The templates are passed in the api/v2.APIDescriptor object. Please see the
// package documentation for fields available on that object. The template
@ -27,7 +27,6 @@ import (
var spaceRegex = regexp.MustCompile(`\n\s*`)
func main() {
if len(os.Args) != 2 {
log.Fatalln("please specify a template to execute.")
}
@ -127,5 +126,4 @@ end:
}
return output
}

View file

@ -4,7 +4,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"reflect"
"strings"
@ -204,12 +203,24 @@ type Configuration struct {
// Compatibility is used for configurations of working with older or deprecated features.
Compatibility struct {
// Schema1 configures how schema1 manifests will be handled
// Schema1 configures how schema1 manifests will be handled.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since
// 2015. These options should only be used if you need to provide
// backward compatibility.
Schema1 struct {
// TrustKey is the signing key to use for adding the signature to
// schema1 manifests.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since
// 2015. These options should only be used if you need to provide
// backward compatibility.
TrustKey string `yaml:"signingkeyfile,omitempty"`
// Enabled determines if schema1 manifests should be pullable
// Enabled determines if schema1 manifests should be pullable.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since
// 2015. These options should only be used if you need to provide
// backward compatibility.
Enabled bool `yaml:"enabled,omitempty"`
} `yaml:"schema1,omitempty"`
} `yaml:"compatibility,omitempty"`
@ -589,7 +600,7 @@ type Events struct {
IncludeReferences bool `yaml:"includereferences"` // include reference data in manifest events
}
//Ignore configures mediaTypes and actions of the event, that it won't be propagated
// Ignore configures mediaTypes and actions of the event, that it won't be propagated
type Ignore struct {
MediaTypes []string `yaml:"mediatypes"` // target media types to ignore
Actions []string `yaml:"actions"` // ignore action types
@ -654,7 +665,7 @@ type Proxy struct {
// Configuration.Abc may be replaced by the value of REGISTRY_ABC,
// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth
func Parse(rd io.Reader) (*Configuration, error) {
in, err := ioutil.ReadAll(rd)
in, err := io.ReadAll(rd)
if err != nil {
return nil, err
}

View file

@ -360,7 +360,6 @@ func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) {
_, err = Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, NotNil)
}
// TestParseWithDifferentEnvReporting validates that environment variables

View file

@ -1,16 +0,0 @@
// +build gofuzz
package configuration
import (
"bytes"
)
// ParserFuzzer implements a fuzzer that targets Parser()
// Export before building
// nolint:deadcode
func parserFuzzer(data []byte) int {
rd := bytes.NewReader(data)
_, _ = Parse(rd)
return 1
}

View file

@ -0,0 +1,15 @@
package configuration
import (
"bytes"
"testing"
)
// ParserFuzzer implements a fuzzer that targets Parser()
// nolint:deadcode
func FuzzConfigurationParse(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
rd := bytes.NewReader(data)
_, _ = Parse(rd)
})
}

View file

@ -23,7 +23,7 @@ func MajorMinorVersion(major, minor uint) Version {
}
func (version Version) major() (uint, error) {
majorPart := strings.Split(string(version), ".")[0]
majorPart, _, _ := strings.Cut(string(version), ".")
major, err := strconv.ParseUint(majorPart, 10, 0)
return uint(major), err
}
@ -35,7 +35,7 @@ func (version Version) Major() uint {
}
func (version Version) minor() (uint, error) {
minorPart := strings.Split(string(version), ".")[1]
_, minorPart, _ := strings.Cut(string(version), ".")
minor, err := strconv.ParseUint(minorPart, 10, 0)
return uint(minor), err
}
@ -89,8 +89,8 @@ func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser {
}
for _, env := range os.Environ() {
envParts := strings.SplitN(env, "=", 2)
p.env = append(p.env, envVar{envParts[0], envParts[1]})
k, v, _ := strings.Cut(env, "=")
p.env = append(p.env, envVar{k, v})
}
// We must sort the environment variables lexically by name so that
@ -138,7 +138,7 @@ func (p *Parser) Parse(in []byte, v interface{}) error {
err = p.overwriteFields(parseAs, pathStr, path[1:], envVar.value)
if err != nil {
return err
return fmt.Errorf("parsing environment variable %s: %v", pathStr, err)
}
}
}
@ -206,7 +206,6 @@ func (p *Parser) overwriteStruct(v reflect.Value, fullpath string, path []string
fieldVal := reflect.New(sf.Type)
err := yaml.Unmarshal([]byte(payload), fieldVal.Interface())
if err != nil {
logrus.Warnf("Error parsing environment variable %s: %s", fullpath, err)
return err
}
field.Set(reflect.Indirect(fieldVal))

View file

@ -4,68 +4,68 @@
//
// The easiest way to get started is to get the background context:
//
// ctx := context.Background()
// ctx := context.Background()
//
// The returned context should be passed around your application and be the
// root of all other context instances. If the application has a version, this
// line should be called before anything else:
//
// ctx := context.WithVersion(context.Background(), version)
// ctx := context.WithVersion(context.Background(), version)
//
// The above will store the version in the context and will be available to
// the logger.
//
// Logging
// # Logging
//
// The most useful aspect of this package is GetLogger. This function takes
// any context.Context interface and returns the current logger from the
// context. Canonical usage looks like this:
//
// GetLogger(ctx).Infof("something interesting happened")
// GetLogger(ctx).Infof("something interesting happened")
//
// GetLogger also takes optional key arguments. The keys will be looked up in
// the context and reported with the logger. The following example would
// return a logger that prints the version with each log message:
//
// ctx := context.Context(context.Background(), "version", version)
// GetLogger(ctx, "version").Infof("this log message has a version field")
// ctx := context.Context(context.Background(), "version", version)
// GetLogger(ctx, "version").Infof("this log message has a version field")
//
// The above would print out a log message like this:
//
// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
//
// When used with WithLogger, we gain the ability to decorate the context with
// loggers that have information from disparate parts of the call stack.
// Following from the version example, we can build a new context with the
// configured logger such that we always print the version field:
//
// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
//
// Since the logger has been pushed to the context, we can now get the version
// field for free with our log messages. Future calls to GetLogger on the new
// context will have the version field:
//
// GetLogger(ctx).Infof("this log message has a version field")
// GetLogger(ctx).Infof("this log message has a version field")
//
// This becomes more powerful when we start stacking loggers. Let's say we
// have the version logger from above but also want a request id. Using the
// context above, in our request scoped function, we place another logger in
// the context:
//
// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
//
// When GetLogger is called on the new context, "http.request.id" will be
// included as a logger field, along with the original "version" field:
//
// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
//
// Note that this only affects the new context, the previous context, with the
// version field, can be used independently. Put another way, the new logger,
// added to the request context, is unique to that context and can have
// request scoped variables.
//
// HTTP Requests
// # HTTP Requests
//
// This package also contains several methods for working with http requests.
// The concepts are very similar to those described above. We simply place the
@ -73,13 +73,13 @@
// available. GetRequestLogger can then be called to get request specific
// variables in a log line:
//
// ctx = WithRequest(ctx, req)
// GetRequestLogger(ctx).Infof("request variables")
// ctx = WithRequest(ctx, req)
// GetRequestLogger(ctx).Infof("request variables")
//
// Like above, if we want to include the request data in all log messages in
// the context, we push the logger to a new context and use that one:
//
// ctx = WithLogger(ctx, GetRequestLogger(ctx))
// ctx = WithLogger(ctx, GetRequestLogger(ctx))
//
// The concept is fairly powerful and ensures that calls throughout the stack
// can be traced in log messages. Using the fields like "http.request.id", one

View file

@ -32,12 +32,10 @@ func parseIP(ipStr string) net.IP {
// account proxy headers.
func RemoteAddr(r *http.Request) string {
if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
proxies := strings.Split(prior, ",")
if len(proxies) > 0 {
remoteAddr := strings.Trim(proxies[0], " ")
if parseIP(remoteAddr) != nil {
return remoteAddr
}
remoteAddr, _, _ := strings.Cut(prior, ",")
remoteAddr = strings.Trim(remoteAddr, " ")
if parseIP(remoteAddr) != nil {
return remoteAddr
}
}
// X-Real-Ip is less supported, but worth checking in the
@ -189,49 +187,37 @@ type httpRequestContext struct {
// "request.<component>". For example, r.RequestURI
func (ctx *httpRequestContext) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
if keyStr == "http.request" {
switch keyStr {
case "http.request":
return ctx.r
}
if !strings.HasPrefix(keyStr, "http.request.") {
goto fallback
}
parts := strings.Split(keyStr, ".")
if len(parts) != 3 {
goto fallback
}
switch parts[2] {
case "uri":
case "http.request.uri":
return ctx.r.RequestURI
case "remoteaddr":
case "http.request.remoteaddr":
return RemoteAddr(ctx.r)
case "method":
case "http.request.method":
return ctx.r.Method
case "host":
case "http.request.host":
return ctx.r.Host
case "referer":
case "http.request.referer":
referer := ctx.r.Referer()
if referer != "" {
return referer
}
case "useragent":
case "http.request.useragent":
return ctx.r.UserAgent()
case "id":
case "http.request.id":
return ctx.id
case "startedat":
case "http.request.startedat":
return ctx.startedAt
case "contenttype":
ct := ctx.r.Header.Get("Content-Type")
if ct != "" {
case "http.request.contenttype":
if ct := ctx.r.Header.Get("Content-Type"); ct != "" {
return ct
}
default:
// no match; fall back to standard behavior below
}
}
fallback:
return ctx.Context.Value(key)
}
@ -245,10 +231,9 @@ func (ctx *muxVarsContext) Value(key interface{}) interface{} {
if keyStr == "vars" {
return ctx.vars
}
keyStr = strings.TrimPrefix(keyStr, "vars.")
if v, ok := ctx.vars[keyStr]; ok {
// TODO(thaJeztah): this considers "vars.FOO" and "FOO" to be equal.
// We need to check if that's intentional (could be a bug).
if v, ok := ctx.vars[strings.TrimPrefix(keyStr, "vars.")]; ok {
return v
}
}
@ -300,36 +285,25 @@ func (irw *instrumentedResponseWriter) Flush() {
func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
if keyStr == "http.response" {
switch keyStr {
case "http.response":
return irw
}
if !strings.HasPrefix(keyStr, "http.response.") {
goto fallback
}
parts := strings.Split(keyStr, ".")
if len(parts) != 3 {
goto fallback
}
irw.mu.Lock()
defer irw.mu.Unlock()
switch parts[2] {
case "written":
case "http.response.written":
irw.mu.Lock()
defer irw.mu.Unlock()
return irw.written
case "status":
case "http.response.status":
irw.mu.Lock()
defer irw.mu.Unlock()
return irw.status
case "contenttype":
contentType := irw.Header().Get("Content-Type")
if contentType != "" {
return contentType
case "http.response.contenttype":
if ct := irw.Header().Get("Content-Type"); ct != "" {
return ct
}
default:
// no match; fall back to standard behavior below
}
}
fallback:
return irw.Context.Value(key)
}

View file

@ -24,16 +24,16 @@ import (
//
// Here is an example of the usage:
//
// func timedOperation(ctx Context) {
// ctx, done := WithTrace(ctx)
// defer done("this will be the log message")
// // ... function body ...
// }
// func timedOperation(ctx Context) {
// ctx, done := WithTrace(ctx)
// defer done("this will be the log message")
// // ... function body ...
// }
//
// If the function ran for roughly 1s, such a usage would emit a log message
// as follows:
//
// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/distribution/distribution/context.traceOperation trace.id=<id> ...
// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/distribution/distribution/context.traceOperation trace.id=<id> ...
//
// Notice that the function name is automatically resolved, along with the
// package and a trace id is emitted that can be linked with parent ids.

View file

@ -20,9 +20,7 @@ import (
"github.com/sirupsen/logrus"
)
var (
enforceRepoClass bool
)
var enforceRepoClass bool
func main() {
var (
@ -110,7 +108,6 @@ func main() {
if err != nil {
logrus.Infof("Error serving: %v", err)
}
}
// handlerWithContext wraps the given context-aware handler by setting up the

View file

@ -5,11 +5,10 @@ import (
"crypto/rsa"
"encoding/base64"
"errors"
"strings"
"testing"
"time"
"strings"
"github.com/distribution/distribution/v3/registry/auth"
"github.com/docker/libtrust"
)
@ -49,7 +48,6 @@ func TestCreateJWTSuccessWithEmptyACL(t *testing.T) {
if !strings.Contains(json, "test") {
t.Fatal("Valid token was not generated.")
}
}
func decodeJWT(rawToken string) (string, error) {
@ -74,7 +72,7 @@ func joseBase64Decode(s string) (string, error) {
}
data, err := base64.StdEncoding.DecodeString(s)
if err != nil {
return "", err //errors.New("Error in Decoding base64 String")
return "", err // errors.New("Error in Decoding base64 String")
}
return string(data), nil
}

51
digestset/deprecated.go Normal file
View file

@ -0,0 +1,51 @@
package digestset
import (
"github.com/opencontainers/go-digest"
"github.com/opencontainers/go-digest/digestset"
)
// ErrDigestNotFound is used when a matching digest
// could not be found in a set.
//
// Deprecated: use [digestset.ErrDigestNotFound].
var ErrDigestNotFound = digestset.ErrDigestNotFound
// ErrDigestAmbiguous is used when multiple digests
// are found in a set. None of the matching digests
// should be considered valid matches.
//
// Deprecated: use [digestset.ErrDigestAmbiguous].
var ErrDigestAmbiguous = digestset.ErrDigestAmbiguous
// Set is used to hold a unique set of digests which
// may be easily referenced by a string
// representation of the digest as well as short representation.
// The uniqueness of the short representation is based on other
// digests in the set. If digests are omitted from this set,
// collisions in a larger set may not be detected, therefore it
// is important to always do short representation lookups on
// the complete set of digests. To mitigate collisions, an
// appropriately long short code should be used.
//
// Deprecated: use [digestset.Set].
type Set = digestset.Set
// NewSet creates an empty set of digests
// which may have digests added.
//
// Deprecated: use [digestset.NewSet].
func NewSet() *digestset.Set {
return digestset.NewSet()
}
// ShortCodeTable returns a map of Digest to unique short codes. The
// length represents the minimum value, the maximum length may be the
// entire value of digest if uniqueness cannot be achieved without the
// full value. This function will attempt to make short codes as short
// as possible to be unique.
//
// Deprecated: use [digestset.ShortCodeTable].
func ShortCodeTable(dst *digestset.Set, length int) map[digest.Digest]string {
return digestset.ShortCodeTable(dst, length)
}

View file

@ -1,371 +0,0 @@
package digestset
import (
"crypto/sha256"
_ "crypto/sha512"
"encoding/binary"
"math/rand"
"testing"
digest "github.com/opencontainers/go-digest"
)
func assertEqualDigests(t *testing.T, d1, d2 digest.Digest) {
if d1 != d2 {
t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2)
}
}
func TestLookup(t *testing.T) {
digests := []digest.Digest{
"sha256:1234511111111111111111111111111111111111111111111111111111111111",
"sha256:1234111111111111111111111111111111111111111111111111111111111111",
"sha256:1234611111111111111111111111111111111111111111111111111111111111",
"sha256:5432111111111111111111111111111111111111111111111111111111111111",
"sha256:6543111111111111111111111111111111111111111111111111111111111111",
"sha256:6432111111111111111111111111111111111111111111111111111111111111",
"sha256:6542111111111111111111111111111111111111111111111111111111111111",
"sha256:6532111111111111111111111111111111111111111111111111111111111111",
}
dset := NewSet()
for i := range digests {
if err := dset.Add(digests[i]); err != nil {
t.Fatal(err)
}
}
dgst, err := dset.Lookup("54")
if err != nil {
t.Fatal(err)
}
assertEqualDigests(t, dgst, digests[3])
_, err = dset.Lookup("1234")
if err == nil {
t.Fatal("Expected ambiguous error looking up: 1234")
}
if err != ErrDigestAmbiguous {
t.Fatal(err)
}
_, err = dset.Lookup("9876")
if err == nil {
t.Fatal("Expected not found error looking up: 9876")
}
if err != ErrDigestNotFound {
t.Fatal(err)
}
_, err = dset.Lookup("sha256:1234")
if err == nil {
t.Fatal("Expected ambiguous error looking up: sha256:1234")
}
if err != ErrDigestAmbiguous {
t.Fatal(err)
}
dgst, err = dset.Lookup("sha256:12345")
if err != nil {
t.Fatal(err)
}
assertEqualDigests(t, dgst, digests[0])
dgst, err = dset.Lookup("sha256:12346")
if err != nil {
t.Fatal(err)
}
assertEqualDigests(t, dgst, digests[2])
dgst, err = dset.Lookup("12346")
if err != nil {
t.Fatal(err)
}
assertEqualDigests(t, dgst, digests[2])
dgst, err = dset.Lookup("12345")
if err != nil {
t.Fatal(err)
}
assertEqualDigests(t, dgst, digests[0])
}
func TestAddDuplication(t *testing.T) {
digests := []digest.Digest{
"sha256:1234111111111111111111111111111111111111111111111111111111111111",
"sha256:1234511111111111111111111111111111111111111111111111111111111111",
"sha256:1234611111111111111111111111111111111111111111111111111111111111",
"sha256:5432111111111111111111111111111111111111111111111111111111111111",
"sha256:6543111111111111111111111111111111111111111111111111111111111111",
"sha512:65431111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111",
"sha512:65421111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111",
"sha512:65321111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111",
}
dset := NewSet()
for i := range digests {
if err := dset.Add(digests[i]); err != nil {
t.Fatal(err)
}
}
if len(dset.entries) != 8 {
t.Fatal("Invalid dset size")
}
if err := dset.Add(digest.Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil {
t.Fatal(err)
}
if len(dset.entries) != 8 {
t.Fatal("Duplicate digest insert should not increase entries size")
}
if err := dset.Add(digest.Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil {
t.Fatal(err)
}
if len(dset.entries) != 9 {
t.Fatal("Insert with different algorithm should be allowed")
}
}
func TestRemove(t *testing.T) {
digests, err := createDigests(10)
if err != nil {
t.Fatal(err)
}
dset := NewSet()
for i := range digests {
if err := dset.Add(digests[i]); err != nil {
t.Fatal(err)
}
}
dgst, err := dset.Lookup(digests[0].String())
if err != nil {
t.Fatal(err)
}
if dgst != digests[0] {
t.Fatalf("Unexpected digest value:\n\tExpected: %s\n\tActual: %s", digests[0], dgst)
}
if err := dset.Remove(digests[0]); err != nil {
t.Fatal(err)
}
if _, err := dset.Lookup(digests[0].String()); err != ErrDigestNotFound {
t.Fatalf("Expected error %v when looking up removed digest, got %v", ErrDigestNotFound, err)
}
}
func TestAll(t *testing.T) {
digests, err := createDigests(100)
if err != nil {
t.Fatal(err)
}
dset := NewSet()
for i := range digests {
if err := dset.Add(digests[i]); err != nil {
t.Fatal(err)
}
}
all := map[digest.Digest]struct{}{}
for _, dgst := range dset.All() {
all[dgst] = struct{}{}
}
if len(all) != len(digests) {
t.Fatalf("Unexpected number of unique digests found:\n\tExpected: %d\n\tActual: %d", len(digests), len(all))
}
for i, dgst := range digests {
if _, ok := all[dgst]; !ok {
t.Fatalf("Missing element at position %d: %s", i, dgst)
}
}
}
func assertEqualShort(t *testing.T, actual, expected string) {
if actual != expected {
t.Fatalf("Unexpected short value:\n\tExpected: %s\n\tActual: %s", expected, actual)
}
}
func TestShortCodeTable(t *testing.T) {
digests := []digest.Digest{
"sha256:1234111111111111111111111111111111111111111111111111111111111111",
"sha256:1234511111111111111111111111111111111111111111111111111111111111",
"sha256:1234611111111111111111111111111111111111111111111111111111111111",
"sha256:5432111111111111111111111111111111111111111111111111111111111111",
"sha256:6543111111111111111111111111111111111111111111111111111111111111",
"sha256:6432111111111111111111111111111111111111111111111111111111111111",
"sha256:6542111111111111111111111111111111111111111111111111111111111111",
"sha256:6532111111111111111111111111111111111111111111111111111111111111",
}
dset := NewSet()
for i := range digests {
if err := dset.Add(digests[i]); err != nil {
t.Fatal(err)
}
}
dump := ShortCodeTable(dset, 2)
if len(dump) < len(digests) {
t.Fatalf("Error unexpected size: %d, expecting %d", len(dump), len(digests))
}
assertEqualShort(t, dump[digests[0]], "12341")
assertEqualShort(t, dump[digests[1]], "12345")
assertEqualShort(t, dump[digests[2]], "12346")
assertEqualShort(t, dump[digests[3]], "54")
assertEqualShort(t, dump[digests[4]], "6543")
assertEqualShort(t, dump[digests[5]], "64")
assertEqualShort(t, dump[digests[6]], "6542")
assertEqualShort(t, dump[digests[7]], "653")
}
func createDigests(count int) ([]digest.Digest, error) {
r := rand.New(rand.NewSource(25823))
digests := make([]digest.Digest, count)
for i := range digests {
h := sha256.New()
if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil {
return nil, err
}
digests[i] = digest.NewDigest("sha256", h)
}
return digests, nil
}
func benchAddNTable(b *testing.B, n int) {
digests, err := createDigests(n)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))}
for j := range digests {
if err = dset.Add(digests[j]); err != nil {
b.Fatal(err)
}
}
}
}
func benchLookupNTable(b *testing.B, n int, shortLen int) {
digests, err := createDigests(n)
if err != nil {
b.Fatal(err)
}
dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))}
for i := range digests {
if err := dset.Add(digests[i]); err != nil {
b.Fatal(err)
}
}
shorts := make([]string, 0, n)
for _, short := range ShortCodeTable(dset, shortLen) {
shorts = append(shorts, short)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err = dset.Lookup(shorts[i%n]); err != nil {
b.Fatal(err)
}
}
}
func benchRemoveNTable(b *testing.B, n int) {
digests, err := createDigests(n)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))}
b.StopTimer()
for j := range digests {
if err = dset.Add(digests[j]); err != nil {
b.Fatal(err)
}
}
b.StartTimer()
for j := range digests {
if err = dset.Remove(digests[j]); err != nil {
b.Fatal(err)
}
}
}
}
func benchShortCodeNTable(b *testing.B, n int, shortLen int) {
digests, err := createDigests(n)
if err != nil {
b.Fatal(err)
}
dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))}
for i := range digests {
if err := dset.Add(digests[i]); err != nil {
b.Fatal(err)
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
ShortCodeTable(dset, shortLen)
}
}
func BenchmarkAdd10(b *testing.B) {
benchAddNTable(b, 10)
}
func BenchmarkAdd100(b *testing.B) {
benchAddNTable(b, 100)
}
func BenchmarkAdd1000(b *testing.B) {
benchAddNTable(b, 1000)
}
func BenchmarkRemove10(b *testing.B) {
benchRemoveNTable(b, 10)
}
func BenchmarkRemove100(b *testing.B) {
benchRemoveNTable(b, 100)
}
func BenchmarkRemove1000(b *testing.B) {
benchRemoveNTable(b, 1000)
}
func BenchmarkLookup10(b *testing.B) {
benchLookupNTable(b, 10, 12)
}
func BenchmarkLookup100(b *testing.B) {
benchLookupNTable(b, 100, 12)
}
func BenchmarkLookup1000(b *testing.B) {
benchLookupNTable(b, 1000, 12)
}
func BenchmarkShortCode10(b *testing.B) {
benchShortCodeNTable(b, 10, 12)
}
func BenchmarkShortCode100(b *testing.B) {
benchShortCodeNTable(b, 100, 12)
}
func BenchmarkShortCode1000(b *testing.B) {
benchShortCodeNTable(b, 1000, 12)
}

View file

@ -330,7 +330,7 @@ check before parsing the remainder of the configuration file.
## `log`
The `log` subsection configures the behavior of the logging system. The logging
system outputs everything to stdout. You can adjust the granularity and format
system outputs everything to stderr. You can adjust the granularity and format
with this configuration section.
```none

View file

@ -93,8 +93,8 @@ manifest:
"action": "pull",
"target": {
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 708,
"digest": "sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf",
"size": 708,
"length": 708,
"repository": "hello-world",
"url": "http://192.168.100.227:5000/v2/hello-world/manifests/sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf",
@ -171,8 +171,8 @@ Content-Type: application/vnd.docker.distribution.events.v1+json
"action": "push",
"target": {
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
"length": 1,
"digest": "sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf",
"length": 1,
"repository": "library/test",
"url": "https://example.com/v2/library/test/manifests/sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
},
@ -196,8 +196,8 @@ Content-Type: application/vnd.docker.distribution.events.v1+json
"action": "push",
"target": {
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
"length": 2,
"digest": "sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5",
"length": 2,
"repository": "library/test",
"url": "https://example.com/v2/library/test/blobs/sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
},
@ -221,8 +221,8 @@ Content-Type: application/vnd.docker.distribution.events.v1+json
"action": "push",
"target": {
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
"length": 3,
"digest": "sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5",
"length": 3,
"repository": "library/test",
"url": "https://example.com/v2/library/test/blobs/sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
},

View file

@ -1495,7 +1495,7 @@ The error codes that may be included in the response body are enumerated below:
##### Tags Paginated
```
GET /v2/<name>/tags/list?n=<integer>&last=<integer>
GET /v2/<name>/tags/list?n=<integer>&last=<last tag value from previous response>
```
Return a portion of the tags for the specified repository.
@ -5471,7 +5471,7 @@ The following headers will be returned with the response:
##### Catalog Fetch Paginated
```
GET /v2/_catalog?n=<integer>&last=<integer>
GET /v2/_catalog?n=<integer>&last=<last repository value from previous response>
```
Return the specified portion of repositories.

View file

@ -6,6 +6,13 @@ keywords: registry, on-prem, images, tags, repository, distribution, api, advanc
# Image Manifest Version 2, Schema 1
> **Deprecated**
>
> Image Manifest v2, Schema 1 is deprecated. Use [Image Manifest v2, Schema 2](manifest-v2-2.md),
> or the [OCI Image Specification](https://github.com/opencontainers/image-spec).
> Image Manifest v2, Schema 1 should not be used for purposes other than backward
> compatibility.
This document outlines the format of the V2 image manifest. The image
manifest described herein was introduced in the Docker daemon in the [v1.3.0
release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453).

View file

@ -124,8 +124,8 @@ image manifest based on the Content-Type returned in the HTTP response.
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 7143,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
"size": 7143,
"platform": {
"architecture": "ppc64le",
"os": "linux"
@ -133,8 +133,8 @@ image manifest based on the Content-Type returned in the HTTP response.
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
"size": 7682,
"platform": {
"architecture": "amd64",
"os": "linux",
@ -232,24 +232,24 @@ image. It's the direct replacement for the schema-1 manifest.
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 7023,
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7",
"size": 7023
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 32654,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
"size": 32654
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 16724,
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b",
"size": 16724
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 73109,
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
"size": 73109
}
]
}

12
go.mod
View file

@ -22,7 +22,7 @@ require (
github.com/opencontainers/image-spec v1.0.2
github.com/prometheus/client_golang v1.12.1 // indirect; updated to latest
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.0.0
github.com/spf13/cobra v1.6.1
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c
@ -53,7 +53,7 @@ require (
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/kr/pretty v0.1.0 // indirect
github.com/kr/text v0.1.0 // indirect
@ -62,13 +62,13 @@ require (
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/spf13/pflag v1.0.3 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 // indirect
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f // indirect
go.opencensus.io v0.22.4 // indirect
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c // indirect; updated for CVE-2022-27664
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/net v0.7.0 // indirect; updated for CVE-2022-27664, CVE-2022-41717
golang.org/x/sys v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
google.golang.org/appengine v1.6.6 // indirect
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 // indirect
google.golang.org/grpc v1.31.0 // indirect

81
go.sum
View file

@ -52,7 +52,6 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@ -60,7 +59,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/aws/aws-sdk-go v1.43.16 h1:Y7wBby44f+tINqJjw5fLH3vA+gFq4uMITIKqditwM14=
github.com/aws/aws-sdk-go v1.43.16/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -78,7 +76,6 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0Bsq
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@ -87,19 +84,12 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4=
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
@ -114,8 +104,6 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@ -129,12 +117,10 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU=
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
@ -195,23 +181,17 @@ github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@ -222,7 +202,6 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -232,10 +211,8 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ=
@ -249,19 +226,16 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ=
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
@ -273,8 +247,6 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
@ -282,33 +254,23 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
@ -316,10 +278,6 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@ -329,16 +287,12 @@ github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMzt
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -381,14 +335,12 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -412,8 +364,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c h1:yKufUcDwucU5urd+50/Opbt4AYpqthk7wHpHok8f1lo=
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -432,7 +384,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -471,8 +422,8 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -481,12 +432,12 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@ -590,7 +541,6 @@ google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 h1:PDIOdWxZ8eRizhK
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
@ -621,8 +571,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@ -631,6 +579,7 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View file

@ -7,9 +7,7 @@ import (
"github.com/distribution/distribution/v3/health"
)
var (
updater = health.NewStatusUpdater()
)
var updater = health.NewStatusUpdater()
// DownHandler registers a manual_http_status that always returns an Error
func DownHandler(w http.ResponseWriter, r *http.Request) {

View file

@ -13,29 +13,29 @@
// particularly useful for checks that verify upstream connectivity or
// database status, since they might take a long time to return/timeout.
//
// Installing
// # Installing
//
// To install health, just import it in your application:
//
// import "github.com/distribution/distribution/v3/health"
// import "github.com/distribution/distribution/v3/health"
//
// You can also (optionally) import "health/api" that will add two convenience
// endpoints: "/debug/health/down" and "/debug/health/up". These endpoints add
// "manual" checks that allow the service to quickly be brought in/out of
// rotation.
//
// import _ "github.com/distribution/distribution/v3/health/api"
// import _ "github.com/distribution/distribution/v3/health/api"
//
// # curl localhost:5001/debug/health
// {}
// # curl -X POST localhost:5001/debug/health/down
// # curl localhost:5001/debug/health
// {"manual_http_status":"Manual Check"}
// # curl localhost:5001/debug/health
// {}
// # curl -X POST localhost:5001/debug/health/down
// # curl localhost:5001/debug/health
// {"manual_http_status":"Manual Check"}
//
// After importing these packages to your main application, you can start
// registering checks.
//
// Registering Checks
// # Registering Checks
//
// The recommended way of registering checks is using a periodic Check.
// PeriodicChecks run on a certain schedule and asynchronously update the
@ -45,22 +45,22 @@
// A trivial example of a check that runs every 5 seconds and shuts down our
// server if the current minute is even, could be added as follows:
//
// func currentMinuteEvenCheck() error {
// m := time.Now().Minute()
// if m%2 == 0 {
// return errors.New("Current minute is even!")
// }
// return nil
// }
// func currentMinuteEvenCheck() error {
// m := time.Now().Minute()
// if m%2 == 0 {
// return errors.New("Current minute is even!")
// }
// return nil
// }
//
// health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5)
// health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5)
//
// Alternatively, you can also make use of "RegisterPeriodicThresholdFunc" to
// implement the exact same check, but add a threshold of failures after which
// the check will be unhealthy. This is particularly useful for flaky Checks,
// ensuring some stability of the service when handling them.
//
// health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4)
// health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4)
//
// The lowest-level way to interact with the health package is calling
// "Register" directly. Register allows you to pass in an arbitrary string and
@ -72,7 +72,7 @@
// Assuming you wish to register a method called "currentMinuteEvenCheck()
// error" you could do that by doing:
//
// health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck))
// health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck))
//
// CheckFunc is a convenience type that implements Checker.
//
@ -80,11 +80,11 @@
// and the convenience method RegisterFunc. An example that makes the status
// endpoint always return an error:
//
// health.RegisterFunc("my_check", func() error {
// return Errors.new("This is an error!")
// }))
// health.RegisterFunc("my_check", func() error {
// return Errors.new("This is an error!")
// }))
//
// Examples
// # Examples
//
// You could also use the health checker mechanism to ensure your application
// only comes up if certain conditions are met, or to allow the developer to
@ -92,35 +92,35 @@
// database connectivity and immediately takes the server out of rotation on
// err:
//
// updater = health.NewStatusUpdater()
// health.RegisterFunc("database_check", func() error {
// return updater.Check()
// }))
// updater = health.NewStatusUpdater()
// health.RegisterFunc("database_check", func() error {
// return updater.Check()
// }))
//
// conn, err := Connect(...) // database call here
// if err != nil {
// updater.Update(errors.New("Error connecting to the database: " + err.Error()))
// }
// conn, err := Connect(...) // database call here
// if err != nil {
// updater.Update(errors.New("Error connecting to the database: " + err.Error()))
// }
//
// You can also use the predefined Checkers that come included with the health
// package. First, import the checks:
//
// import "github.com/distribution/distribution/v3/health/checks
// import "github.com/distribution/distribution/v3/health/checks
//
// After that you can make use of any of the provided checks. An example of
// using a `FileChecker` to take the application out of rotation if a certain
// file exists can be done as follows:
//
// health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5))
// health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5))
//
// After registering the check, it is trivial to take an application out of
// rotation from the console:
//
// # curl localhost:5001/debug/health
// {}
// # touch /tmp/disable
// # curl localhost:5001/debug/health
// {"fileChecker":"file exists"}
// # curl localhost:5001/debug/health
// {}
// # touch /tmp/disable
// # curl localhost:5001/debug/health
// {"fileChecker":"file exists"}
//
// FileChecker only accepts absolute or relative file path. It does not work
// properly with tilde(~). You should make sure that the application has
@ -132,5 +132,5 @@
// "HTTPChecker", but ensure that you only mark the test unhealthy if there
// are a minimum of two failures in a row:
//
// health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2))
// health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2))
package health

View file

@ -12,14 +12,14 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
var expectedManifestListSerialization = []byte(`{
const expectedManifestListSerialization = `{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 985,
"digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
"size": 985,
"platform": {
"architecture": "amd64",
"os": "linux",
@ -30,23 +30,23 @@ var expectedManifestListSerialization = []byte(`{
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 2392,
"digest": "sha256:6346340964309634683409684360934680934608934608934608934068934608",
"size": 2392,
"platform": {
"architecture": "sun4m",
"os": "sunos"
}
}
]
}`)
}`
func makeTestManifestList(t *testing.T, mediaType string) ([]ManifestDescriptor, *DeserializedManifestList) {
manifestDescriptors := []ManifestDescriptor{
{
Descriptor: distribution.Descriptor{
MediaType: "application/vnd.docker.distribution.manifest.v2+json",
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
Size: 985,
MediaType: "application/vnd.docker.distribution.manifest.v2+json",
},
Platform: PlatformSpec{
Architecture: "amd64",
@ -56,9 +56,9 @@ func makeTestManifestList(t *testing.T, mediaType string) ([]ManifestDescriptor,
},
{
Descriptor: distribution.Descriptor{
MediaType: "application/vnd.docker.distribution.manifest.v2+json",
Digest: "sha256:6346340964309634683409684360934680934608934608934608934068934608",
Size: 2392,
MediaType: "application/vnd.docker.distribution.manifest.v2+json",
},
Platform: PlatformSpec{
Architecture: "sun4m",
@ -85,17 +85,17 @@ func TestManifestList(t *testing.T) {
// Check that the canonical field is the same as json.MarshalIndent
// with these parameters.
p, err := json.MarshalIndent(&deserialized.ManifestList, "", " ")
expected, err := json.MarshalIndent(&deserialized.ManifestList, "", " ")
if err != nil {
t.Fatalf("error marshaling manifest list: %v", err)
}
if !bytes.Equal(p, canonical) {
t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p))
if !bytes.Equal(expected, canonical) {
t.Fatalf("manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n", string(expected), string(canonical))
}
// Check that the canonical field has the expected value.
if !bytes.Equal(expectedManifestListSerialization, canonical) {
t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedManifestListSerialization))
if !bytes.Equal([]byte(expectedManifestListSerialization), canonical) {
t.Fatalf("manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n", expectedManifestListSerialization, string(canonical))
}
var unmarshalled DeserializedManifestList
@ -136,14 +136,14 @@ func TestManifestList(t *testing.T) {
// Requires changes to distribution/distribution/manifest/manifestlist.ManifestList and .ManifestDescriptor
// and associated serialization APIs in manifestlist.go. Or split the OCI index and
// docker manifest list implementations, which would require a lot of refactoring.
var expectedOCIImageIndexSerialization = []byte(`{
const expectedOCIImageIndexSerialization = `{
"schemaVersion": 2,
"mediaType": "application/vnd.oci.image.index.v1+json",
"manifests": [
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 985,
"digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
"size": 985,
"platform": {
"architecture": "amd64",
"os": "linux",
@ -154,8 +154,8 @@ var expectedOCIImageIndexSerialization = []byte(`{
},
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 985,
"digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
"size": 985,
"annotations": {
"platform": "none"
},
@ -166,8 +166,8 @@ var expectedOCIImageIndexSerialization = []byte(`{
},
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 2392,
"digest": "sha256:6346340964309634683409684360934680934608934608934608934068934608",
"size": 2392,
"annotations": {
"what": "for"
},
@ -177,15 +177,15 @@ var expectedOCIImageIndexSerialization = []byte(`{
}
}
]
}`)
}`
func makeTestOCIImageIndex(t *testing.T, mediaType string) ([]ManifestDescriptor, *DeserializedManifestList) {
manifestDescriptors := []ManifestDescriptor{
{
Descriptor: distribution.Descriptor{
MediaType: "application/vnd.oci.image.manifest.v1+json",
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
Size: 985,
MediaType: "application/vnd.oci.image.manifest.v1+json",
},
Platform: PlatformSpec{
Architecture: "amd64",
@ -195,17 +195,17 @@ func makeTestOCIImageIndex(t *testing.T, mediaType string) ([]ManifestDescriptor
},
{
Descriptor: distribution.Descriptor{
MediaType: "application/vnd.oci.image.manifest.v1+json",
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
Size: 985,
MediaType: "application/vnd.oci.image.manifest.v1+json",
Annotations: map[string]string{"platform": "none"},
},
},
{
Descriptor: distribution.Descriptor{
MediaType: "application/vnd.oci.image.manifest.v1+json",
Digest: "sha256:6346340964309634683409684360934680934608934608934608934068934608",
Size: 2392,
MediaType: "application/vnd.oci.image.manifest.v1+json",
Annotations: map[string]string{"what": "for"},
},
Platform: PlatformSpec{
@ -234,17 +234,17 @@ func TestOCIImageIndex(t *testing.T) {
// Check that the canonical field is the same as json.MarshalIndent
// with these parameters.
p, err := json.MarshalIndent(&deserialized.ManifestList, "", " ")
expected, err := json.MarshalIndent(&deserialized.ManifestList, "", " ")
if err != nil {
t.Fatalf("error marshaling manifest list: %v", err)
}
if !bytes.Equal(p, canonical) {
t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p))
if !bytes.Equal(expected, canonical) {
t.Fatalf("manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n", string(expected), string(canonical))
}
// Check that the canonical field has the expected value.
if !bytes.Equal(expectedOCIImageIndexSerialization, canonical) {
t.Fatalf("manifest bytes not equal to expected: %q != %q", string(canonical), string(expectedOCIImageIndexSerialization))
if !bytes.Equal([]byte(expectedOCIImageIndexSerialization), canonical) {
t.Fatalf("manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n", expectedOCIImageIndexSerialization, string(canonical))
}
var unmarshalled DeserializedManifestList

View file

@ -12,6 +12,7 @@ import (
type mockBlobService struct {
descriptors map[digest.Digest]distribution.Descriptor
distribution.BlobService
}
func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
@ -21,14 +22,6 @@ func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distri
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
func (bs *mockBlobService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
panic("not implemented")
}
func (bs *mockBlobService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
panic("not implemented")
}
func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
d := distribution.Descriptor{
Digest: digest.FromBytes(p),
@ -39,14 +32,6 @@ func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte)
return d, nil
}
func (bs *mockBlobService) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
panic("not implemented")
}
func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
panic("not implemented")
}
func TestBuilder(t *testing.T) {
imgJSON := []byte(`{
"created": "2015-10-31T22:22:56.015925234Z",
@ -107,20 +92,20 @@ func TestBuilder(t *testing.T) {
descriptors := []distribution.Descriptor{
{
MediaType: v1.MediaTypeImageLayerGzip,
Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
Size: 5312,
MediaType: v1.MediaTypeImageLayerGzip,
Annotations: map[string]string{"apple": "orange", "lettuce": "wrap"},
},
{
MediaType: v1.MediaTypeImageLayerGzip,
Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
Size: 235231,
MediaType: v1.MediaTypeImageLayerGzip,
},
{
MediaType: v1.MediaTypeImageLayerGzip,
Digest: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
Size: 639152,
MediaType: v1.MediaTypeImageLayerGzip,
},
}
annotations := map[string]string{"hot": "potato"}

View file

@ -11,14 +11,12 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
SchemaVersion: 2, // historical value here.. does not pertain to OCI or docker version
MediaType: v1.MediaTypeImageManifest,
}
)
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
var SchemaVersion = manifest.Versioned{
SchemaVersion: 2, // historical value here.. does not pertain to OCI or docker version
MediaType: v1.MediaTypeImageManifest,
}
func init() {
ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
@ -95,17 +93,17 @@ func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
copy(m.canonical, b)
// Unmarshal canonical JSON into Manifest object
var manifest Manifest
if err := json.Unmarshal(m.canonical, &manifest); err != nil {
var mfst Manifest
if err := json.Unmarshal(m.canonical, &mfst); err != nil {
return err
}
if manifest.MediaType != "" && manifest.MediaType != v1.MediaTypeImageManifest {
if mfst.MediaType != "" && mfst.MediaType != v1.MediaTypeImageManifest {
return fmt.Errorf("if present, mediaType in manifest should be '%s' not '%s'",
v1.MediaTypeImageManifest, manifest.MediaType)
v1.MediaTypeImageManifest, mfst.MediaType)
}
m.Manifest = manifest
m.Manifest = mfst
return nil
}

View file

@ -13,13 +13,13 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
var expectedManifestSerialization = []byte(`{
const expectedManifestSerialization = `{
"schemaVersion": 2,
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 985,
"digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
"size": 985,
"annotations": {
"apple": "orange"
}
@ -27,8 +27,8 @@ var expectedManifestSerialization = []byte(`{
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 153263,
"digest": "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b",
"size": 153263,
"annotations": {
"lettuce": "wrap"
}
@ -37,7 +37,7 @@ var expectedManifestSerialization = []byte(`{
"annotations": {
"hot": "potato"
}
}`)
}`
func makeTestManifest(mediaType string) Manifest {
return Manifest{
@ -46,16 +46,16 @@ func makeTestManifest(mediaType string) Manifest {
MediaType: mediaType,
},
Config: distribution.Descriptor{
MediaType: v1.MediaTypeImageConfig,
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
Size: 985,
MediaType: v1.MediaTypeImageConfig,
Annotations: map[string]string{"apple": "orange"},
},
Layers: []distribution.Descriptor{
{
MediaType: v1.MediaTypeImageLayerGzip,
Digest: "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b",
Size: 153263,
MediaType: v1.MediaTypeImageLayerGzip,
Annotations: map[string]string{"lettuce": "wrap"},
},
},
@ -64,9 +64,9 @@ func makeTestManifest(mediaType string) Manifest {
}
func TestManifest(t *testing.T) {
manifest := makeTestManifest(v1.MediaTypeImageManifest)
mfst := makeTestManifest(v1.MediaTypeImageManifest)
deserialized, err := FromStruct(manifest)
deserialized, err := FromStruct(mfst)
if err != nil {
t.Fatalf("error creating DeserializedManifest: %v", err)
}
@ -79,17 +79,17 @@ func TestManifest(t *testing.T) {
// Check that the canonical field is the same as json.MarshalIndent
// with these parameters.
p, err := json.MarshalIndent(&manifest, "", " ")
expected, err := json.MarshalIndent(&mfst, "", " ")
if err != nil {
t.Fatalf("error marshaling manifest: %v", err)
}
if !bytes.Equal(p, canonical) {
t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p))
if !bytes.Equal(expected, canonical) {
t.Fatalf("manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n", string(expected), string(canonical))
}
// Check that canonical field matches expected value.
if !bytes.Equal(expectedManifestSerialization, canonical) {
t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedManifestSerialization))
if !bytes.Equal([]byte(expectedManifestSerialization), canonical) {
t.Fatalf("manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n", expectedManifestSerialization, string(canonical))
}
var unmarshalled DeserializedManifest
@ -143,9 +143,9 @@ func TestManifest(t *testing.T) {
}
func mediaTypeTest(t *testing.T, mediaType string, shouldError bool) {
manifest := makeTestManifest(mediaType)
mfst := makeTestManifest(mediaType)
deserialized, err := FromStruct(manifest)
deserialized, err := FromStruct(mfst)
if err != nil {
t.Fatalf("error creating DeserializedManifest: %v", err)
}
@ -186,7 +186,7 @@ func TestMediaTypes(t *testing.T) {
}
func TestValidateManifest(t *testing.T) {
manifest := Manifest{
mfst := Manifest{
Config: distribution.Descriptor{Size: 1},
Layers: []distribution.Descriptor{{Size: 2}},
}
@ -196,7 +196,7 @@ func TestValidateManifest(t *testing.T) {
},
}
t.Run("valid", func(t *testing.T) {
b, err := json.Marshal(manifest)
b, err := json.Marshal(mfst)
if err != nil {
t.Fatal("unexpected error marshaling manifest", err)
}

View file

@ -52,6 +52,10 @@ type configManifestBuilder struct {
// schema version from an image configuration and a set of descriptors.
// It takes a BlobService so that it can add an empty tar to the blob store
// if the resulting manifest needs empty layers.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015,
// use Docker Image Manifest v2, Schema 2, or the OCI Image Specification v1.
// This package should not be used for purposes other than backward compatibility.
func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder {
return &configManifestBuilder{
bs: bs,
@ -61,7 +65,10 @@ func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKe
}
}
// Build produces a final manifest from the given references
// Build produces a final manifest from the given references.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) {
type imageRootFS struct {
Type string `json:"type"`
@ -132,7 +139,7 @@ func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Mani
layerCounter++
}
v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex()
v1ID := digest.FromBytes([]byte(blobsum.Encoded() + " " + parent)).Encoded()
if i == 0 && img.RootFS.BaseLayer != "" {
// windows-only baselayer setup
@ -140,18 +147,18 @@ func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Mani
parent = fmt.Sprintf("%x", baseID[:32])
}
v1Compatibility := v1Compatibility{
v1Compat := v1Compatibility{
ID: v1ID,
Parent: parent,
Comment: h.Comment,
Created: h.Created,
Author: h.Author,
}
v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
v1Compat.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
if h.EmptyLayer {
v1Compatibility.ThrowAway = true
v1Compat.ThrowAway = true
}
jsonBytes, err := json.Marshal(&v1Compatibility)
jsonBytes, err := json.Marshal(&v1Compat)
if err != nil {
return nil, err
}
@ -178,11 +185,11 @@ func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Mani
}
fsLayerList[0] = FSLayer{BlobSum: blobsum}
dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON)))
dgst := digest.FromBytes([]byte(blobsum.Encoded() + " " + parent + " " + string(mb.configJSON)))
// Top-level v1compatibility string should be a modified version of the
// image config.
transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer)
transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Encoded(), parent, latestHistory.EmptyLayer)
if err != nil {
return nil, err
}
@ -238,7 +245,10 @@ func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, e
return descriptor.Digest, nil
}
// AppendReference adds a reference to the current ManifestBuilder
// AppendReference adds a reference to the current ManifestBuilder.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error {
descriptor := d.Descriptor()
@ -250,12 +260,18 @@ func (mb *configManifestBuilder) AppendReference(d distribution.Describable) err
return nil
}
// References returns the current references added to this builder
// References returns the current references added to this builder.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func (mb *configManifestBuilder) References() []distribution.Descriptor {
return mb.descriptors
}
// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON
// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
// Top-level v1compatibility string should be a modified version of the
// image config.

View file

@ -17,6 +17,7 @@ import (
type mockBlobService struct {
descriptors map[digest.Digest]distribution.Descriptor
distribution.BlobService
}
func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
@ -26,32 +27,16 @@ func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distri
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
func (bs *mockBlobService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
panic("not implemented")
}
func (bs *mockBlobService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
panic("not implemented")
}
func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
d := distribution.Descriptor{
MediaType: mediaType,
Digest: digest.FromBytes(p),
Size: int64(len(p)),
MediaType: mediaType,
}
bs.descriptors[d.Digest] = d
return d, nil
}
func (bs *mockBlobService) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
panic("not implemented")
}
func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
panic("not implemented")
}
func TestEmptyTar(t *testing.T) {
// Confirm that gzippedEmptyTar expands to 1024 NULL bytes.
var decompressed [2048]byte

View file

@ -1,3 +1,9 @@
// Package schema1 provides definitions for the deprecated Docker Image
// Manifest v2, Schema 1 specification.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification. This package should not be used for purposes
// other than backward compatibility.
package schema1
import (
@ -10,23 +16,33 @@ import (
"github.com/opencontainers/go-digest"
)
const (
// MediaTypeManifest specifies the mediaType for the current version. Note
// that for schema version 1, the the media is optionally "application/json".
MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json"
// MediaTypeSignedManifest specifies the mediatype for current SignedManifest version
MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
// MediaTypeManifestLayer specifies the media type for manifest layers
MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar"
)
// MediaTypeManifest specifies the mediaType for the current version. Note
// that for schema version 1, the the media is optionally "application/json".
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
const MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json"
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
SchemaVersion: 1,
}
)
// MediaTypeSignedManifest specifies the mediatype for current SignedManifest version
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
const MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
// MediaTypeManifestLayer specifies the media type for manifest layers
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
const MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar"
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
var SchemaVersion = manifest.Versioned{
SchemaVersion: 1,
}
func init() {
schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
@ -37,9 +53,9 @@ func init() {
}
desc := distribution.Descriptor{
MediaType: MediaTypeSignedManifest,
Digest: digest.FromBytes(sm.Canonical),
Size: int64(len(sm.Canonical)),
MediaType: MediaTypeSignedManifest,
}
return sm, desc, err
}
@ -57,13 +73,19 @@ func init() {
}
}
// FSLayer is a container struct for BlobSums defined in an image manifest
// FSLayer is a container struct for BlobSums defined in an image manifest.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
type FSLayer struct {
// BlobSum is the tarsum of the referenced filesystem image layer
BlobSum digest.Digest `json:"blobSum"`
}
// History stores unstructured v1 compatibility information
// History stores unstructured v1 compatibility information.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
type History struct {
// V1Compatibility is the raw v1 compatibility information
V1Compatibility string `json:"v1Compatibility"`
@ -71,6 +93,9 @@ type History struct {
// Manifest provides the base accessible fields for working with V2 image
// format in the registry.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
type Manifest struct {
manifest.Versioned
@ -93,6 +118,9 @@ type Manifest struct {
// SignedManifest provides an envelope for a signed image manifest, including
// the format sensitive raw bytes.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
type SignedManifest struct {
Manifest
@ -107,6 +135,9 @@ type SignedManifest struct {
}
// UnmarshalJSON populates a new SignedManifest struct from JSON data.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func (sm *SignedManifest) UnmarshalJSON(b []byte) error {
sm.all = make([]byte, len(b))
// store manifest and signatures in all
@ -128,17 +159,20 @@ func (sm *SignedManifest) UnmarshalJSON(b []byte) error {
copy(sm.Canonical, bytes)
// Unmarshal canonical JSON into Manifest object
var manifest Manifest
if err := json.Unmarshal(sm.Canonical, &manifest); err != nil {
var mfst Manifest
if err := json.Unmarshal(sm.Canonical, &mfst); err != nil {
return err
}
sm.Manifest = manifest
sm.Manifest = mfst
return nil
}
// References returns the descriptors of this manifests references
// References returns the descriptors of this manifests references.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func (sm SignedManifest) References() []distribution.Descriptor {
dependencies := make([]distribution.Descriptor, len(sm.FSLayers))
for i, fsLayer := range sm.FSLayers {
@ -149,13 +183,15 @@ func (sm SignedManifest) References() []distribution.Descriptor {
}
return dependencies
}
// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner
// contents. Applications requiring a marshaled signed manifest should simply
// use Raw directly, since the the content produced by json.Marshal will be
// compacted and will fail signature checks.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func (sm *SignedManifest) MarshalJSON() ([]byte, error) {
if len(sm.all) > 0 {
return sm.all, nil
@ -166,6 +202,9 @@ func (sm *SignedManifest) MarshalJSON() ([]byte, error) {
}
// Payload returns the signed content of the signed manifest.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func (sm SignedManifest) Payload() (string, []byte, error) {
return MediaTypeSignedManifest, sm.all, nil
}
@ -173,6 +212,9 @@ func (sm SignedManifest) Payload() (string, []byte, error) {
// Signatures returns the signatures as provided by
// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws
// signatures.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func (sm *SignedManifest) Signatures() ([][]byte, error) {
jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures")
if err != nil {

View file

@ -21,13 +21,13 @@ func TestManifestMarshaling(t *testing.T) {
// Check that the all field is the same as json.MarshalIndent with these
// parameters.
p, err := json.MarshalIndent(env.signed, "", " ")
expected, err := json.MarshalIndent(env.signed, "", " ")
if err != nil {
t.Fatalf("error marshaling manifest: %v", err)
}
if !bytes.Equal(p, env.signed.all) {
t.Fatalf("manifest bytes not equal: %q != %q", string(env.signed.all), string(p))
if !bytes.Equal(expected, env.signed.all) {
t.Fatalf("manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n", string(expected), string(env.signed.all))
}
}
@ -42,7 +42,6 @@ func TestManifestUnmarshaling(t *testing.T) {
if !reflect.DeepEqual(&signed, env.signed) {
t.Fatalf("manifests are different after unmarshaling: %v != %v", signed, env.signed)
}
}
func TestManifestVerification(t *testing.T) {

View file

@ -21,6 +21,9 @@ type referenceManifestBuilder struct {
// NewReferenceManifestBuilder is used to build new manifests for the current
// schema version using schema1 dependencies.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder {
tag := ""
if tagged, isTagged := ref.(reference.Tagged); isTagged {
@ -54,7 +57,10 @@ func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Man
return Sign(&m, mb.pk)
}
// AppendReference adds a reference to the current ManifestBuilder
// AppendReference adds a reference to the current ManifestBuilder.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error {
r, ok := d.(Reference)
if !ok {
@ -65,10 +71,12 @@ func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable)
mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...)
mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...)
return nil
}
// References returns the current references added to this builder
// References returns the current references added to this builder.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func (mb *referenceManifestBuilder) References() []distribution.Descriptor {
refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers))
for i := range mb.Manifest.FSLayers {
@ -80,15 +88,21 @@ func (mb *referenceManifestBuilder) References() []distribution.Descriptor {
return refs
}
// Reference describes a manifest v2, schema version 1 dependency.
// Reference describes a Manifest v2, schema version 1 dependency.
// An FSLayer associated with a history entry.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
type Reference struct {
Digest digest.Digest
Size int64 // if we know it, set it for the descriptor.
History History
}
// Descriptor describes a reference
// Descriptor describes a reference.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func (r Reference) Descriptor() distribution.Descriptor {
return distribution.Descriptor{
MediaType: MediaTypeManifestLayer,

View file

@ -10,6 +10,9 @@ import (
// Sign signs the manifest with the provided private key, returning a
// SignedManifest. This typically won't be used within the registry, except
// for testing.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) {
p, err := json.MarshalIndent(m, "", " ")
if err != nil {
@ -40,6 +43,9 @@ func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) {
// SignWithChain signs the manifest with the given private key and x509 chain.
// The public key of the first element in the chain must be the public key
// corresponding with the sign key.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) {
p, err := json.MarshalIndent(m, "", " ")
if err != nil {

View file

@ -9,6 +9,9 @@ import (
// Verify verifies the signature of the signed manifest returning the public
// keys used during signing.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) {
js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
if err != nil {
@ -22,6 +25,9 @@ func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) {
// VerifyChains verifies the signature of the signed manifest against the
// certificate pool returning the list of verified chains. Signatures without
// an x509 chain are not checked.
//
// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015.
// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification.
func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) {
js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
if err != nil {

View file

@ -11,6 +11,7 @@ import (
type mockBlobService struct {
descriptors map[digest.Digest]distribution.Descriptor
distribution.BlobService
}
func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
@ -20,32 +21,16 @@ func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distri
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
func (bs *mockBlobService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
panic("not implemented")
}
func (bs *mockBlobService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
panic("not implemented")
}
func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
d := distribution.Descriptor{
MediaType: "application/octet-stream",
Digest: digest.FromBytes(p),
Size: int64(len(p)),
MediaType: "application/octet-stream",
}
bs.descriptors[d.Digest] = d
return d, nil
}
func (bs *mockBlobService) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
panic("not implemented")
}
func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
panic("not implemented")
}
func TestBuilder(t *testing.T) {
imgJSON := []byte(`{
"architecture": "amd64",
@ -149,19 +134,19 @@ func TestBuilder(t *testing.T) {
descriptors := []distribution.Descriptor{
{
MediaType: MediaTypeLayer,
Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
Size: 5312,
MediaType: MediaTypeLayer,
},
{
MediaType: MediaTypeLayer,
Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
Size: 235231,
MediaType: MediaTypeLayer,
},
{
MediaType: MediaTypeLayer,
Digest: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
Size: 639152,
MediaType: MediaTypeLayer,
},
}

View file

@ -33,14 +33,12 @@ const (
MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
)
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: MediaTypeManifest,
}
)
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
var SchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: MediaTypeManifest,
}
func init() {
schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
@ -111,18 +109,17 @@ func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
copy(m.canonical, b)
// Unmarshal canonical JSON into Manifest object
var manifest Manifest
if err := json.Unmarshal(m.canonical, &manifest); err != nil {
var mfst Manifest
if err := json.Unmarshal(m.canonical, &mfst); err != nil {
return err
}
if manifest.MediaType != MediaTypeManifest {
if mfst.MediaType != MediaTypeManifest {
return fmt.Errorf("mediaType in manifest should be '%s' not '%s'",
MediaTypeManifest, manifest.MediaType)
MediaTypeManifest, mfst.MediaType)
}
m.Manifest = manifest
m.Manifest = mfst
return nil
}

View file

@ -10,22 +10,22 @@ import (
"github.com/distribution/distribution/v3/manifest"
)
var expectedManifestSerialization = []byte(`{
const expectedManifestSerialization = `{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 985,
"digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b"
"digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
"size": 985
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 153263,
"digest": "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b"
"digest": "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b",
"size": 153263
}
]
}`)
}`
func makeTestManifest(mediaType string) Manifest {
return Manifest{
@ -34,24 +34,24 @@ func makeTestManifest(mediaType string) Manifest {
MediaType: mediaType,
},
Config: distribution.Descriptor{
MediaType: MediaTypeImageConfig,
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
Size: 985,
MediaType: MediaTypeImageConfig,
},
Layers: []distribution.Descriptor{
{
MediaType: MediaTypeLayer,
Digest: "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b",
Size: 153263,
MediaType: MediaTypeLayer,
},
},
}
}
func TestManifest(t *testing.T) {
manifest := makeTestManifest(MediaTypeManifest)
mfst := makeTestManifest(MediaTypeManifest)
deserialized, err := FromStruct(manifest)
deserialized, err := FromStruct(mfst)
if err != nil {
t.Fatalf("error creating DeserializedManifest: %v", err)
}
@ -64,17 +64,17 @@ func TestManifest(t *testing.T) {
// Check that the canonical field is the same as json.MarshalIndent
// with these parameters.
p, err := json.MarshalIndent(&manifest, "", " ")
expected, err := json.MarshalIndent(&mfst, "", " ")
if err != nil {
t.Fatalf("error marshaling manifest: %v", err)
}
if !bytes.Equal(p, canonical) {
t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p))
if !bytes.Equal(expected, canonical) {
t.Fatalf("manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n", string(expected), string(canonical))
}
// Check that canonical field matches expected value.
if !bytes.Equal(expectedManifestSerialization, canonical) {
t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedManifestSerialization))
if !bytes.Equal([]byte(expectedManifestSerialization), canonical) {
t.Fatalf("manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n", expectedManifestSerialization, string(canonical))
}
var unmarshalled DeserializedManifest
@ -119,9 +119,9 @@ func TestManifest(t *testing.T) {
}
func mediaTypeTest(t *testing.T, mediaType string, shouldError bool) {
manifest := makeTestManifest(mediaType)
mfst := makeTestManifest(mediaType)
deserialized, err := FromStruct(manifest)
deserialized, err := FromStruct(mfst)
if err != nil {
t.Fatalf("error creating DeserializedManifest: %v", err)
}

View file

@ -150,9 +150,9 @@ func (b *bridge) createManifestEvent(action string, repo reference.Named, sm dis
}
event.Target.MediaType = mt
event.Target.Length = desc.Size
event.Target.Size = desc.Size
event.Target.Digest = desc.Digest
event.Target.Size = desc.Size
event.Target.Length = desc.Size
if b.includeReferences {
event.Target.References = append(event.Target.References, manifest.References()...)
}

View file

@ -233,7 +233,6 @@ func checkCommon(t *testing.T, event events.Event) {
if event.(Event).Target.Repository != repo {
t.Fatalf("unexpected repository: %q != %q", event.(Event).Target.Repository, repo)
}
}
type testSinkFn func(event events.Event) error

View file

@ -143,9 +143,7 @@ type SourceRecord struct {
InstanceID string `json:"instanceID,omitempty"`
}
var (
// ErrSinkClosed is returned if a write is issued to a sink that has been
// closed. If encountered, the error should be considered terminal and
// retries will not be successful.
ErrSinkClosed = fmt.Errorf("sink: closed")
)
// ErrSinkClosed is returned if a write is issued to a sink that has been
// closed. If encountered, the error should be considered terminal and
// retries will not be successful.
var ErrSinkClosed = fmt.Errorf("sink: closed")

View file

@ -13,7 +13,7 @@ import (
// envelope has changed. If this code fails, the revision of the protocol may
// need to be incremented.
func TestEventEnvelopeJSONFormat(t *testing.T) {
var expected = strings.TrimSpace(`
expected := strings.TrimSpace(`
{
"events": [
{
@ -22,8 +22,8 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
"action": "push",
"target": {
"mediaType": "application/vnd.docker.distribution.manifest.v1+prettyjws",
"size": 1,
"digest": "sha256:0123456789abcdef0",
"size": 1,
"length": 1,
"repository": "library/test",
"url": "http://example.com/v2/library/test/manifests/latest"
@ -48,8 +48,8 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
"action": "push",
"target": {
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
"size": 2,
"digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5",
"size": 2,
"length": 2,
"repository": "library/test",
"url": "http://example.com/v2/library/test/manifests/latest"
@ -74,8 +74,8 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
"action": "push",
"target": {
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
"size": 3,
"digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6",
"size": 3,
"length": 3,
"repository": "library/test",
"url": "http://example.com/v2/library/test/manifests/latest"
@ -114,30 +114,30 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
prototype.Request.UserAgent = "test/0.1"
prototype.Source.Addr = "hostname.local:port"
var manifestPush = prototype
manifestPush := prototype
manifestPush.ID = "asdf-asdf-asdf-asdf-0"
manifestPush.Target.Digest = "sha256:0123456789abcdef0"
manifestPush.Target.Length = 1
manifestPush.Target.Size = 1
manifestPush.Target.MediaType = schema1.MediaTypeSignedManifest
manifestPush.Target.Digest = "sha256:0123456789abcdef0"
manifestPush.Target.Size = 1
manifestPush.Target.Length = 1
manifestPush.Target.Repository = "library/test"
manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest"
var layerPush0 = prototype
layerPush0 := prototype
layerPush0.ID = "asdf-asdf-asdf-asdf-1"
layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
layerPush0.Target.Length = 2
layerPush0.Target.Size = 2
layerPush0.Target.MediaType = layerMediaType
layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
layerPush0.Target.Size = 2
layerPush0.Target.Length = 2
layerPush0.Target.Repository = "library/test"
layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest"
var layerPush1 = prototype
layerPush1 := prototype
layerPush1.ID = "asdf-asdf-asdf-asdf-2"
layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6"
layerPush1.Target.Length = 3
layerPush1.Target.Size = 3
layerPush1.Target.MediaType = layerMediaType
layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6"
layerPush1.Target.Size = 3
layerPush1.Target.Length = 3
layerPush1.Target.Repository = "library/test"
layerPush1.Target.URL = "http://example.com/v2/library/test/manifests/latest"

View file

@ -135,7 +135,7 @@ type headerRoundTripper struct {
}
func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
var nreq = *req
nreq := *req
nreq.Header = make(http.Header)
merge := func(headers http.Header) {

View file

@ -197,7 +197,6 @@ func TestHTTPSink(t *testing.T) {
if err := sink.Close(); err == nil {
t.Fatalf("second close should have returned error: %v", err)
}
}
func createTestEvent(action, repo, typ string) Event {

View file

@ -2,6 +2,7 @@ package notifications
import (
"context"
"io"
"net/http"
"github.com/distribution/distribution/v3"
@ -147,7 +148,7 @@ func (bsl *blobServiceListener) Get(ctx context.Context, dgst digest.Digest) ([]
return p, err
}
func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (io.ReadSeekCloser, error) {
rc, err := bsl.BlobStore.Open(ctx, dgst)
if err == nil {
if desc, err := bsl.Stat(ctx, dgst); err != nil {

View file

@ -3,13 +3,12 @@ package notifications
import (
"reflect"
"sync"
"testing"
"time"
events "github.com/docker/go-events"
"github.com/sirupsen/logrus"
"testing"
)
func TestEventQueue(t *testing.T) {

View file

@ -1,12 +0,0 @@
// +build gofuzz
package reference
// fuzzParseNormalizedNamed implements a fuzzer
// that targets ParseNormalizedNamed
// Export before building the fuzzer.
// nolint:deadcode
func fuzzParseNormalizedNamed(data []byte) int {
_, _ = ParseNormalizedNamed(string(data))
return 1
}

14
reference/fuzz_test.go Normal file
View file

@ -0,0 +1,14 @@
package reference
import (
"testing"
)
// fuzzParseNormalizedNamed implements a fuzzer
// that targets ParseNormalizedNamed
// nolint:deadcode
func FuzzParseNormalizedNamed(f *testing.F) {
f.Fuzz(func(t *testing.T, data string) {
_, _ = ParseNormalizedNamed(data)
})
}

View file

@ -32,7 +32,7 @@ func FamiliarString(ref Reference) string {
}
// FamiliarMatch reports whether ref matches the specified pattern.
// See https://godoc.org/path#Match for supported patterns.
// See [path.Match] for supported patterns.
func FamiliarMatch(pattern string, ref Reference) (bool, error) {
matched, err := path.Match(pattern, FamiliarString(ref))
if namedRef, isNamed := ref.(Named); isNamed && !matched {

View file

@ -4,15 +4,39 @@ import (
"fmt"
"strings"
"github.com/distribution/distribution/v3/digestset"
"github.com/opencontainers/go-digest"
)
var (
const (
// legacyDefaultDomain is the legacy domain for Docker Hub (which was
// originally named "the Docker Index"). This domain is still used for
// authentication and image search, which were part of the "v1" Docker
// registry specification.
//
// This domain will continue to be supported, but there are plans to consolidate
// legacy domains to new "canonical" domains. Once those domains are decided
// on, we must update the normalization functions, but preserve compatibility
// with existing installs, clients, and user configuration.
legacyDefaultDomain = "index.docker.io"
defaultDomain = "docker.io"
officialRepoName = "library"
defaultTag = "latest"
// defaultDomain is the default domain used for images on Docker Hub.
// It is used to normalize "familiar" names to canonical names, for example,
// to convert "ubuntu" to "docker.io/library/ubuntu:latest".
//
// Note that actual domain of Docker Hub's registry is registry-1.docker.io.
// This domain will continue to be supported, but there are plans to consolidate
// legacy domains to new "canonical" domains. Once those domains are decided
// on, we must update the normalization functions, but preserve compatibility
// with existing installs, clients, and user configuration.
defaultDomain = "docker.io"
// officialRepoPrefix is the namespace used for official images on Docker Hub.
// It is used to normalize "familiar" names to canonical names, for example,
// to convert "ubuntu" to "docker.io/library/ubuntu:latest".
officialRepoPrefix = "library/"
// defaultTag is the default tag if no tag is provided.
defaultTag = "latest"
)
// normalizedNamed represents a name which has been
@ -34,14 +58,14 @@ func ParseNormalizedNamed(s string) (Named, error) {
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
}
domain, remainder := splitDockerDomain(s)
var remoteName string
var remote string
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
remoteName = remainder[:tagSep]
remote = remainder[:tagSep]
} else {
remoteName = remainder
remote = remainder
}
if strings.ToLower(remoteName) != remoteName {
return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remoteName)
if strings.ToLower(remote) != remote {
return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remote)
}
ref, err := Parse(domain + "/" + remainder)
@ -55,41 +79,53 @@ func ParseNormalizedNamed(s string) (Named, error) {
return named, nil
}
// ParseDockerRef normalizes the image reference following the docker convention. This is added
// mainly for backward compatibility.
// The reference returned can only be either tagged or digested. For reference contains both tag
// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@
// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
// namedTaggedDigested is a reference that has both a tag and a digest.
type namedTaggedDigested interface {
NamedTagged
Digested
}
// ParseDockerRef normalizes the image reference following the docker convention,
// which allows for references to contain both a tag and a digest. It returns a
// reference that is either tagged or digested. For references containing both
// a tag and a digest, it returns a digested reference. For example, the following
// reference:
//
// docker.io/library/busybox:latest@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
//
// Is returned as a digested reference (with the ":latest" tag removed):
//
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
//
// References that are already "tagged" or "digested" are returned unmodified:
//
// // Already a digested reference
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
//
// // Already a named reference
// docker.io/library/busybox:latest
func ParseDockerRef(ref string) (Named, error) {
named, err := ParseNormalizedNamed(ref)
if err != nil {
return nil, err
}
if _, ok := named.(NamedTagged); ok {
if canonical, ok := named.(Canonical); ok {
// The reference is both tagged and digested, only
// return digested.
newNamed, err := WithName(canonical.Name())
if err != nil {
return nil, err
}
newCanonical, err := WithDigest(newNamed, canonical.Digest())
if err != nil {
return nil, err
}
return newCanonical, nil
if canonical, ok := named.(namedTaggedDigested); ok {
// The reference is both tagged and digested; only return digested.
newNamed, err := WithName(canonical.Name())
if err != nil {
return nil, err
}
return WithDigest(newNamed, canonical.Digest())
}
return TagNameOnly(named), nil
}
// splitDockerDomain splits a repository name to domain and remotename string.
// splitDockerDomain splits a repository name to domain and remote-name.
// If no valid domain is found, the default domain is used. Repository name
// needs to be already validated before.
func splitDockerDomain(name string) (domain, remainder string) {
i := strings.IndexRune(name, '/')
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost" && strings.ToLower(name[:i]) == name[:i]) {
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != localhost && strings.ToLower(name[:i]) == name[:i]) {
domain, remainder = defaultDomain, name
} else {
domain, remainder = name[:i], name[i+1:]
@ -98,7 +134,7 @@ func splitDockerDomain(name string) (domain, remainder string) {
domain = defaultDomain
}
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
remainder = officialRepoName + "/" + remainder
remainder = officialRepoPrefix + remainder
}
return
}
@ -118,8 +154,15 @@ func familiarizeName(named namedRepository) repository {
if repo.domain == defaultDomain {
repo.domain = ""
// Handle official repositories which have the pattern "library/<official repo name>"
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
repo.path = split[1]
if strings.HasPrefix(repo.path, officialRepoPrefix) {
// TODO(thaJeztah): this check may be too strict, as it assumes the
// "library/" namespace does not have nested namespaces. While this
// is true (currently), technically it would be possible for Docker
// Hub to use those (e.g. "library/distros/ubuntu:latest").
// See https://github.com/distribution/distribution/pull/3769#issuecomment-1302031785.
if remainder := strings.TrimPrefix(repo.path, officialRepoPrefix); !strings.ContainsRune(remainder, '/') {
repo.path = remainder
}
}
}
return repo
@ -179,20 +222,3 @@ func ParseAnyReference(ref string) (Reference, error) {
return ParseNormalizedNamed(ref)
}
// ParseAnyReferenceWithSet parses a reference string as a possible short
// identifier to be matched in a digest set, a full digest, or familiar name.
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
dgst, err := ds.Lookup(ref)
if err == nil {
return digestReference(dgst), nil
}
} else {
if dgst, err := digest.Parse(ref); err == nil {
return digestReference(dgst), nil
}
}
return ParseNormalizedNamed(ref)
}

View file

@ -4,11 +4,11 @@ import (
"strconv"
"testing"
"github.com/distribution/distribution/v3/digestset"
"github.com/opencontainers/go-digest"
)
func TestValidateReferenceName(t *testing.T) {
t.Parallel()
validRepoNames := []string{
"docker/docker",
"library/debian",
@ -71,6 +71,7 @@ func TestValidateReferenceName(t *testing.T) {
}
func TestValidateRemoteName(t *testing.T) {
t.Parallel()
validRepositoryNames := []string{
// Sanity check.
"docker/docker",
@ -84,7 +85,7 @@ func TestValidateRemoteName(t *testing.T) {
// Allow multiple hyphens as well.
"docker---rules/docker",
//Username doc and image name docker being tested.
// Username doc and image name docker being tested.
"doc/docker",
// single character names are now allowed.
@ -129,7 +130,7 @@ func TestValidateRemoteName(t *testing.T) {
// No repository.
"docker/",
//namespace too long
// namespace too long
"this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker",
}
for _, repositoryName := range invalidRepositoryNames {
@ -140,6 +141,7 @@ func TestValidateRemoteName(t *testing.T) {
}
func TestParseRepositoryInfo(t *testing.T) {
t.Parallel()
type tcase struct {
RemoteName, FamiliarName, FullName, AmbiguousName, Domain string
}
@ -293,6 +295,7 @@ func TestParseRepositoryInfo(t *testing.T) {
}
func TestParseReferenceWithTagAndDigest(t *testing.T) {
t.Parallel()
shortRef := "busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"
ref, err := ParseNormalizedNamed(shortRef)
if err != nil {
@ -314,6 +317,7 @@ func TestParseReferenceWithTagAndDigest(t *testing.T) {
}
func TestInvalidReferenceComponents(t *testing.T) {
t.Parallel()
if _, err := ParseNormalizedNamed("-foo"); err == nil {
t.Fatal("Expected WithName to detect invalid name")
}
@ -356,11 +360,11 @@ func equalReference(r1, r2 Reference) bool {
}
func TestParseAnyReference(t *testing.T) {
t.Parallel()
tcases := []struct {
Reference string
Equivalent string
Expected Reference
Digests []digest.Digest
}{
{
Reference: "redis",
@ -416,62 +420,16 @@ func TestParseAnyReference(t *testing.T) {
Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9",
Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9",
},
{
Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9",
Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"),
Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
Digests: []digest.Digest{
digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"),
digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"),
},
},
{
Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9",
Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9",
Digests: []digest.Digest{
digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"),
},
},
{
Reference: "dbcc1c",
Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"),
Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
Digests: []digest.Digest{
digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"),
digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"),
},
},
{
Reference: "dbcc1",
Equivalent: "docker.io/library/dbcc1",
Digests: []digest.Digest{
digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"),
digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"),
},
},
{
Reference: "dbcc1c",
Equivalent: "docker.io/library/dbcc1c",
Digests: []digest.Digest{
digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"),
},
},
}
for _, tcase := range tcases {
var ref Reference
var err error
if len(tcase.Digests) == 0 {
ref, err = ParseAnyReference(tcase.Reference)
} else {
ds := digestset.NewSet()
for _, dgst := range tcase.Digests {
if err := ds.Add(dgst); err != nil {
t.Fatalf("Error adding digest %s: %v", dgst.String(), err)
}
}
ref, err = ParseAnyReferenceWithSet(tcase.Reference, ds)
}
ref, err = ParseAnyReference(tcase.Reference)
if err != nil {
t.Fatalf("Error parsing reference %s: %v", tcase.Reference, err)
}
@ -493,6 +451,7 @@ func TestParseAnyReference(t *testing.T) {
}
func TestNormalizedSplitHostname(t *testing.T) {
t.Parallel()
testcases := []struct {
input string
domain string
@ -575,6 +534,7 @@ func TestNormalizedSplitHostname(t *testing.T) {
}
func TestMatchError(t *testing.T) {
t.Parallel()
named, err := ParseAnyReference("foo")
if err != nil {
t.Fatal(err)
@ -586,6 +546,7 @@ func TestMatchError(t *testing.T) {
}
func TestMatch(t *testing.T) {
t.Parallel()
matchCases := []struct {
reference string
pattern string
@ -653,6 +614,7 @@ func TestMatch(t *testing.T) {
}
func TestParseDockerRef(t *testing.T) {
t.Parallel()
testcases := []struct {
name string
input string
@ -716,6 +678,7 @@ func TestParseDockerRef(t *testing.T) {
}
for _, test := range testcases {
t.Run(test.name, func(t *testing.T) {
t.Parallel()
normalized, err := ParseDockerRef(test.input)
if err != nil {
t.Fatal(err)

View file

@ -3,15 +3,16 @@
//
// Grammar
//
// reference := name [ ":" tag ] [ "@" digest ]
// name := [domain '/'] path-component ['/' path-component]*
// reference := name [ ":" tag ] [ "@" digest ]
// name := [domain '/'] remote-name
// domain := host [':' port-number]
// host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A
// domain-name := domain-component ['.' domain-component]*
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/
// path-component := alpha-numeric [separator alpha-numeric]*
// alpha-numeric := /[a-z0-9]+/
// path (or "remote-name") := path-component ['/' path-component]*
// alpha-numeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/
//
// tag := /[\w][\w.-]{0,127}/
@ -23,7 +24,6 @@
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
//
// identifier := /[a-f0-9]{64}/
// short-identifier := /[a-f0-9]{6,64}/
package reference
import (
@ -147,7 +147,7 @@ type namedRepository interface {
Path() string
}
// Domain returns the domain part of the Named reference
// Domain returns the domain part of the [Named] reference.
func Domain(named Named) string {
if r, ok := named.(namedRepository); ok {
return r.Domain()
@ -156,7 +156,7 @@ func Domain(named Named) string {
return domain
}
// Path returns the name without the domain part of the Named reference
// Path returns the name without the domain part of the [Named] reference.
func Path(named Named) (name string) {
if r, ok := named.(namedRepository); ok {
return r.Path()
@ -177,7 +177,8 @@ func splitDomain(name string) (string, string) {
// hostname and name string. If no valid hostname is
// found, the hostname is empty and the full value
// is returned as name
// DEPRECATED: Use Domain or Path
//
// Deprecated: Use [Domain] or [Path].
func SplitHostname(named Named) (string, string) {
if r, ok := named.(namedRepository); ok {
return r.Domain(), r.Path()
@ -187,7 +188,6 @@ func SplitHostname(named Named) (string, string) {
// Parse parses s and returns a syntactically valid Reference.
// If an error was encountered it is returned, along with a nil Reference.
// NOTE: Parse will not handle short digests.
func Parse(s string) (Reference, error) {
matches := ReferenceRegexp.FindStringSubmatch(s)
if matches == nil {
@ -239,7 +239,6 @@ func Parse(s string) (Reference, error) {
// the Named interface. The reference must have a name and be in the canonical
// form, otherwise an error is returned.
// If an error was encountered it is returned, along with a nil Reference.
// NOTE: ParseNamed will not handle short digests.
func ParseNamed(s string) (Named, error) {
named, err := ParseNormalizedNamed(s)
if err != nil {
@ -322,11 +321,13 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
// TrimNamed removes any tag or digest from the named reference.
func TrimNamed(ref Named) Named {
domain, path := SplitHostname(ref)
return repository{
domain: domain,
path: path,
repo := repository{}
if r, ok := ref.(namedRepository); ok {
repo.domain, repo.path = r.Domain(), r.Path()
} else {
repo.domain, repo.path = splitDomain(ref.Name())
}
return repo
}
func getBestReferenceType(ref reference) Reference {

View file

@ -4,7 +4,6 @@ import (
_ "crypto/sha256"
_ "crypto/sha512"
"encoding/json"
"strconv"
"strings"
"testing"
@ -12,6 +11,7 @@ import (
)
func TestReferenceParse(t *testing.T) {
t.Parallel()
// referenceTestcases is a unified set of testcases for
// testing the parsing of references
referenceTestcases := []struct {
@ -103,10 +103,10 @@ func TestReferenceParse(t *testing.T) {
},
// FIXME "Uppercase" is incorrectly handled as a domain-name here, therefore passes.
// See https://github.com/distribution/distribution/pull/1778, and https://github.com/docker/docker/pull/20175
//{
// {
// input: "Uppercase/lowercase:tag",
// err: ErrNameContainsUppercase,
//},
// },
{
input: "test:5000/Uppercase/lowercase:tag",
err: ErrNameContainsUppercase,
@ -268,71 +268,70 @@ func TestReferenceParse(t *testing.T) {
},
}
for _, testcase := range referenceTestcases {
failf := func(format string, v ...interface{}) {
t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
t.Fail()
}
repo, err := Parse(testcase.input)
if testcase.err != nil {
if err == nil {
failf("missing expected error: %v", testcase.err)
} else if testcase.err != err {
failf("mismatched error: got %v, expected %v", err, testcase.err)
}
continue
} else if err != nil {
failf("unexpected parse error: %v", err)
continue
}
if repo.String() != testcase.input {
failf("mismatched repo: got %q, expected %q", repo.String(), testcase.input)
}
if named, ok := repo.(Named); ok {
if named.Name() != testcase.repository {
failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository)
}
domain, _ := SplitHostname(named)
if domain != testcase.domain {
failf("unexpected domain: got %q, expected %q", domain, testcase.domain)
}
} else if testcase.repository != "" || testcase.domain != "" {
failf("expected named type, got %T", repo)
}
tagged, ok := repo.(Tagged)
if testcase.tag != "" {
if ok {
if tagged.Tag() != testcase.tag {
failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag)
testcase := testcase
t.Run(testcase.input, func(t *testing.T) {
t.Parallel()
repo, err := Parse(testcase.input)
if testcase.err != nil {
if err == nil {
t.Errorf("missing expected error: %v", testcase.err)
} else if testcase.err != err {
t.Errorf("mismatched error: got %v, expected %v", err, testcase.err)
}
} else {
failf("expected tagged type, got %T", repo)
return
} else if err != nil {
t.Errorf("unexpected parse error: %v", err)
return
}
if repo.String() != testcase.input {
t.Errorf("mismatched repo: got %q, expected %q", repo.String(), testcase.input)
}
} else if ok {
failf("unexpected tagged type")
}
digested, ok := repo.(Digested)
if testcase.digest != "" {
if ok {
if digested.Digest().String() != testcase.digest {
failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest)
if named, ok := repo.(Named); ok {
if named.Name() != testcase.repository {
t.Errorf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository)
}
} else {
failf("expected digested type, got %T", repo)
domain, _ := SplitHostname(named)
if domain != testcase.domain {
t.Errorf("unexpected domain: got %q, expected %q", domain, testcase.domain)
}
} else if testcase.repository != "" || testcase.domain != "" {
t.Errorf("expected named type, got %T", repo)
}
} else if ok {
failf("unexpected digested type")
}
tagged, ok := repo.(Tagged)
if testcase.tag != "" {
if ok {
if tagged.Tag() != testcase.tag {
t.Errorf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag)
}
} else {
t.Errorf("expected tagged type, got %T", repo)
}
} else if ok {
t.Errorf("unexpected tagged type")
}
digested, ok := repo.(Digested)
if testcase.digest != "" {
if ok {
if digested.Digest().String() != testcase.digest {
t.Errorf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest)
}
} else {
t.Errorf("expected digested type, got %T", repo)
}
} else if ok {
t.Errorf("unexpected digested type")
}
})
}
}
// TestWithNameFailure tests cases where WithName should fail. Cases where it
// should succeed are covered by TestSplitHostname, below.
func TestWithNameFailure(t *testing.T) {
t.Parallel()
testcases := []struct {
input string
err error
@ -363,19 +362,19 @@ func TestWithNameFailure(t *testing.T) {
},
}
for _, testcase := range testcases {
failf := func(format string, v ...interface{}) {
t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
t.Fail()
}
_, err := WithName(testcase.input)
if err == nil {
failf("no error parsing name. expected: %s", testcase.err)
}
testcase := testcase
t.Run(testcase.input, func(t *testing.T) {
t.Parallel()
_, err := WithName(testcase.input)
if err == nil {
t.Errorf("no error parsing name. expected: %s", testcase.err)
}
})
}
}
func TestSplitHostname(t *testing.T) {
t.Parallel()
testcases := []struct {
input string
domain string
@ -413,22 +412,21 @@ func TestSplitHostname(t *testing.T) {
},
}
for _, testcase := range testcases {
failf := func(format string, v ...interface{}) {
t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
t.Fail()
}
named, err := WithName(testcase.input)
if err != nil {
failf("error parsing name: %s", err)
}
domain, name := SplitHostname(named)
if domain != testcase.domain {
failf("unexpected domain: got %q, expected %q", domain, testcase.domain)
}
if name != testcase.name {
failf("unexpected name: got %q, expected %q", name, testcase.name)
}
testcase := testcase
t.Run(testcase.input, func(t *testing.T) {
t.Parallel()
named, err := WithName(testcase.input)
if err != nil {
t.Errorf("error parsing name: %s", err)
}
domain, name := SplitHostname(named)
if domain != testcase.domain {
t.Errorf("unexpected domain: got %q, expected %q", domain, testcase.domain)
}
if name != testcase.name {
t.Errorf("unexpected name: got %q, expected %q", name, testcase.name)
}
})
}
}
@ -438,6 +436,7 @@ type serializationType struct {
}
func TestSerialization(t *testing.T) {
t.Parallel()
testcases := []struct {
description string
input string
@ -469,99 +468,98 @@ func TestSerialization(t *testing.T) {
},
}
for _, testcase := range testcases {
failf := func(format string, v ...interface{}) {
t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
t.Fail()
}
m := map[string]string{
"Description": testcase.description,
"Field": testcase.input,
}
b, err := json.Marshal(m)
if err != nil {
failf("error marshalling: %v", err)
}
t := serializationType{}
if err := json.Unmarshal(b, &t); err != nil {
if testcase.err == nil {
failf("error unmarshalling: %v", err)
testcase := testcase
t.Run(testcase.description, func(t *testing.T) {
t.Parallel()
m := map[string]string{
"Description": testcase.description,
"Field": testcase.input,
}
if err != testcase.err {
failf("wrong error, expected %v, got %v", testcase.err, err)
b, err := json.Marshal(m)
if err != nil {
t.Errorf("error marshalling: %v", err)
}
st := serializationType{}
continue
} else if testcase.err != nil {
failf("expected error unmarshalling: %v", testcase.err)
}
if t.Description != testcase.description {
failf("wrong description, expected %q, got %q", testcase.description, t.Description)
}
ref := t.Field.Reference()
if named, ok := ref.(Named); ok {
if named.Name() != testcase.name {
failf("unexpected repository: got %q, expected %q", named.Name(), testcase.name)
}
} else if testcase.name != "" {
failf("expected named type, got %T", ref)
}
tagged, ok := ref.(Tagged)
if testcase.tag != "" {
if ok {
if tagged.Tag() != testcase.tag {
failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag)
if err := json.Unmarshal(b, &st); err != nil {
if testcase.err == nil {
t.Errorf("error unmarshalling: %v", err)
}
} else {
failf("expected tagged type, got %T", ref)
}
} else if ok {
failf("unexpected tagged type")
}
digested, ok := ref.(Digested)
if testcase.digest != "" {
if ok {
if digested.Digest().String() != testcase.digest {
failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest)
if err != testcase.err {
t.Errorf("wrong error, expected %v, got %v", testcase.err, err)
}
} else {
failf("expected digested type, got %T", ref)
return
} else if testcase.err != nil {
t.Errorf("expected error unmarshalling: %v", testcase.err)
}
} else if ok {
failf("unexpected digested type")
}
t = serializationType{
Description: testcase.description,
Field: AsField(ref),
}
if st.Description != testcase.description {
t.Errorf("wrong description, expected %q, got %q", testcase.description, st.Description)
}
b2, err := json.Marshal(t)
if err != nil {
failf("error marshing serialization type: %v", err)
}
ref := st.Field.Reference()
if string(b) != string(b2) {
failf("unexpected serialized value: expected %q, got %q", string(b), string(b2))
}
if named, ok := ref.(Named); ok {
if named.Name() != testcase.name {
t.Errorf("unexpected repository: got %q, expected %q", named.Name(), testcase.name)
}
} else if testcase.name != "" {
t.Errorf("expected named type, got %T", ref)
}
// Ensure t.Field is not implementing "Reference" directly, getting
// around the Reference type system
var fieldInterface interface{} = t.Field
if _, ok := fieldInterface.(Reference); ok {
failf("field should not implement Reference interface")
}
tagged, ok := ref.(Tagged)
if testcase.tag != "" {
if ok {
if tagged.Tag() != testcase.tag {
t.Errorf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag)
}
} else {
t.Errorf("expected tagged type, got %T", ref)
}
} else if ok {
t.Errorf("unexpected tagged type")
}
digested, ok := ref.(Digested)
if testcase.digest != "" {
if ok {
if digested.Digest().String() != testcase.digest {
t.Errorf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest)
}
} else {
t.Errorf("expected digested type, got %T", ref)
}
} else if ok {
t.Errorf("unexpected digested type")
}
st = serializationType{
Description: testcase.description,
Field: AsField(ref),
}
b2, err := json.Marshal(st)
if err != nil {
t.Errorf("error marshing serialization type: %v", err)
}
if string(b) != string(b2) {
t.Errorf("unexpected serialized value: expected %q, got %q", string(b), string(b2))
}
// Ensure st.Field is not implementing "Reference" directly, getting
// around the Reference type system
var fieldInterface interface{} = st.Field
if _, ok := fieldInterface.(Reference); ok {
t.Errorf("field should not implement Reference interface")
}
})
}
}
func TestWithTag(t *testing.T) {
t.Parallel()
testcases := []struct {
name string
digest digest.Digest
@ -596,34 +594,34 @@ func TestWithTag(t *testing.T) {
},
}
for _, testcase := range testcases {
failf := func(format string, v ...interface{}) {
t.Logf(strconv.Quote(testcase.name)+": "+format, v...)
t.Fail()
}
named, err := WithName(testcase.name)
if err != nil {
failf("error parsing name: %s", err)
}
if testcase.digest != "" {
canonical, err := WithDigest(named, testcase.digest)
testcase := testcase
t.Run(testcase.combined, func(t *testing.T) {
t.Parallel()
named, err := WithName(testcase.name)
if err != nil {
failf("error adding digest")
t.Errorf("error parsing name: %s", err)
}
if testcase.digest != "" {
canonical, err := WithDigest(named, testcase.digest)
if err != nil {
t.Errorf("error adding digest")
}
named = canonical
}
named = canonical
}
tagged, err := WithTag(named, testcase.tag)
if err != nil {
failf("WithTag failed: %s", err)
}
if tagged.String() != testcase.combined {
failf("unexpected: got %q, expected %q", tagged.String(), testcase.combined)
}
tagged, err := WithTag(named, testcase.tag)
if err != nil {
t.Errorf("WithTag failed: %s", err)
}
if tagged.String() != testcase.combined {
t.Errorf("unexpected: got %q, expected %q", tagged.String(), testcase.combined)
}
})
}
}
func TestWithDigest(t *testing.T) {
t.Parallel()
testcases := []struct {
name string
digest digest.Digest
@ -653,33 +651,33 @@ func TestWithDigest(t *testing.T) {
},
}
for _, testcase := range testcases {
failf := func(format string, v ...interface{}) {
t.Logf(strconv.Quote(testcase.name)+": "+format, v...)
t.Fail()
}
named, err := WithName(testcase.name)
if err != nil {
failf("error parsing name: %s", err)
}
if testcase.tag != "" {
tagged, err := WithTag(named, testcase.tag)
testcase := testcase
t.Run(testcase.combined, func(t *testing.T) {
t.Parallel()
named, err := WithName(testcase.name)
if err != nil {
failf("error adding tag")
t.Errorf("error parsing name: %s", err)
}
named = tagged
}
digested, err := WithDigest(named, testcase.digest)
if err != nil {
failf("WithDigest failed: %s", err)
}
if digested.String() != testcase.combined {
failf("unexpected: got %q, expected %q", digested.String(), testcase.combined)
}
if testcase.tag != "" {
tagged, err := WithTag(named, testcase.tag)
if err != nil {
t.Errorf("error adding tag")
}
named = tagged
}
digested, err := WithDigest(named, testcase.digest)
if err != nil {
t.Errorf("WithDigest failed: %s", err)
}
if digested.String() != testcase.combined {
t.Errorf("unexpected: got %q, expected %q", digested.String(), testcase.combined)
}
})
}
}
func TestParseNamed(t *testing.T) {
t.Parallel()
testcases := []struct {
input string
domain string
@ -724,31 +722,30 @@ func TestParseNamed(t *testing.T) {
},
}
for _, testcase := range testcases {
failf := func(format string, v ...interface{}) {
t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
t.Fail()
}
testcase := testcase
t.Run(testcase.input, func(t *testing.T) {
t.Parallel()
named, err := ParseNamed(testcase.input)
if err != nil && testcase.err == nil {
t.Errorf("error parsing name: %s", err)
return
} else if err == nil && testcase.err != nil {
t.Errorf("parsing succeeded: expected error %v", testcase.err)
return
} else if err != testcase.err {
t.Errorf("unexpected error %v, expected %v", err, testcase.err)
return
} else if err != nil {
return
}
named, err := ParseNamed(testcase.input)
if err != nil && testcase.err == nil {
failf("error parsing name: %s", err)
continue
} else if err == nil && testcase.err != nil {
failf("parsing succeeded: expected error %v", testcase.err)
continue
} else if err != testcase.err {
failf("unexpected error %v, expected %v", err, testcase.err)
continue
} else if err != nil {
continue
}
domain, name := SplitHostname(named)
if domain != testcase.domain {
failf("unexpected domain: got %q, expected %q", domain, testcase.domain)
}
if name != testcase.name {
failf("unexpected name: got %q, expected %q", name, testcase.name)
}
domain, name := SplitHostname(named)
if domain != testcase.domain {
t.Errorf("unexpected domain: got %q, expected %q", domain, testcase.domain)
}
if name != testcase.name {
t.Errorf("unexpected name: got %q, expected %q", name, testcase.name)
}
})
}
}

View file

@ -1,48 +1,102 @@
package reference
import "regexp"
import (
"regexp"
"strings"
)
var (
// alphaNumeric defines the alpha numeric atom, typically a
// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:<encoded>").
var DigestRegexp = regexp.MustCompile(digestPat)
// DomainRegexp matches hostname or IP-addresses, optionally including a port
// number. It defines the structure of potential domain components that may be
// part of image names. This is purposely a subset of what is allowed by DNS to
// ensure backwards compatibility with Docker image names. It may be a subset of
// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between
// square brackets (excluding zone identifiers as defined by [RFC 6874] or special
// addresses such as IPv4-Mapped).
//
// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874.
var DomainRegexp = regexp.MustCompile(domainAndPort)
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
var IdentifierRegexp = regexp.MustCompile(identifier)
// NameRegexp is the format for the name component of references, including
// an optional domain and port, but without tag or digest suffix.
var NameRegexp = regexp.MustCompile(namePat)
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
var ReferenceRegexp = regexp.MustCompile(referencePat)
// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go].
//
// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28
var TagRegexp = regexp.MustCompile(tag)
const (
// alphanumeric defines the alphanumeric atom, typically a
// component of names. This only allows lower case characters and digits.
alphaNumeric = `[a-z0-9]+`
alphanumeric = `[a-z0-9]+`
// separator defines the separators allowed to be embedded in name
// components. This allow one period, one or two underscore and multiple
// components. This allows one period, one or two underscore and multiple
// dashes. Repeated dashes and underscores are intentionally treated
// differently. In order to support valid hostnames as name components,
// supporting repeated dash was added. Additionally double underscore is
// now allowed as a separator to loosen the restriction for previously
// supported names.
separator = `(?:[._]|__|[-]*)`
separator = `(?:[._]|__|[-]+)`
// nameComponent restricts registry path component names to start
// with at least one letter or number, with following parts able to be
// separated by one period, one or two underscore and multiple dashes.
nameComponent = expression(
alphaNumeric,
optional(repeated(separator, alphaNumeric)))
// localhost is treated as a special value for domain-name. Any other
// domain-name without a "." or a ":port" are considered a path component.
localhost = `localhost`
// domainNameComponent restricts the registry domain component of a
// repository name to start with a component as defined by DomainRegexp.
domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`
// optionalPort matches an optional port-number including the port separator
// (e.g. ":80").
optionalPort = `(?::[0-9]+)?`
// tag matches valid tag names. From docker/docker:graph/tags.go.
tag = `[\w][\w.-]{0,127}`
// digestPat matches well-formed digests, including algorithm (e.g. "sha256:<encoded>").
//
// TODO(thaJeztah): this should follow the same rules as https://pkg.go.dev/github.com/opencontainers/go-digest@v1.0.0#DigestRegexp
// so that go-digest defines the canonical format. Note that the go-digest is
// more relaxed:
// - it allows multiple algorithms (e.g. "sha256+b64:<encoded>") to allow
// future expansion of supported algorithms.
// - it allows the "<encoded>" value to use urlsafe base64 encoding as defined
// in [rfc4648, section 5].
//
// [rfc4648, section 5]: https://www.rfc-editor.org/rfc/rfc4648#section-5.
digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`
// identifier is the format for a content addressable identifier using sha256.
// These identifiers are like digests without the algorithm, since sha256 is used.
identifier = `([a-f0-9]{64})`
// ipv6address are enclosed between square brackets and may be represented
// in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format
// are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as
// IPv4-Mapped are deliberately excluded.
ipv6address = expression(
literal(`[`), `(?:[a-fA-F0-9:]+)`, literal(`]`),
)
ipv6address = `\[(?:[a-fA-F0-9:]+)\]`
)
var (
// domainName defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names. This includes IPv4 addresses on decimal format.
domainName = expression(
domainNameComponent,
optional(repeated(literal(`.`), domainNameComponent)),
)
domainName = domainNameComponent + anyTimes(`\.`+domainNameComponent)
// host defines the structure of potential domains based on the URI
// Host subcomponent on rfc3986. It may be a subset of DNS domain name,
@ -53,129 +107,57 @@ var (
// allowed by the URI Host subcomponent on rfc3986 to ensure backwards
// compatibility with Docker image names.
domain = expression(
host,
optional(literal(`:`), `[0-9]+`))
domainAndPort = host + optionalPort
// DomainRegexp defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names.
DomainRegexp = regexp.MustCompile(domain)
tag = `[\w][\w.-]{0,127}`
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
TagRegexp = regexp.MustCompile(tag)
anchoredTag = anchored(tag)
// anchoredTagRegexp matches valid tag names, anchored at the start and
// end of the matched string.
anchoredTagRegexp = regexp.MustCompile(anchoredTag)
anchoredTagRegexp = regexp.MustCompile(anchored(tag))
digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`
// DigestRegexp matches valid digests.
DigestRegexp = regexp.MustCompile(digestPat)
anchoredDigest = anchored(digestPat)
// anchoredDigestRegexp matches valid digests, anchored at the start and
// end of the matched string.
anchoredDigestRegexp = regexp.MustCompile(anchoredDigest)
anchoredDigestRegexp = regexp.MustCompile(anchored(digestPat))
namePat = expression(
optional(domain, literal(`/`)),
nameComponent,
optional(repeated(literal(`/`), nameComponent)))
// NameRegexp is the format for the name component of references. The
// regexp has capturing groups for the domain and name part omitting
// the separating forward slash from either.
NameRegexp = regexp.MustCompile(namePat)
// pathComponent restricts path-components to start with an alphanumeric
// character, with following parts able to be separated by a separator
// (one period, one or two underscore and multiple dashes).
pathComponent = alphanumeric + anyTimes(separator+alphanumeric)
// remoteName matches the remote-name of a repository. It consists of one
// or more forward slash (/) delimited path-components:
//
// pathComponent[[/pathComponent] ...] // e.g., "library/ubuntu"
remoteName = pathComponent + anyTimes(`/`+pathComponent)
namePat = optional(domainAndPort+`/`) + remoteName
anchoredName = anchored(
optional(capture(domain), literal(`/`)),
capture(nameComponent,
optional(repeated(literal(`/`), nameComponent))))
// anchoredNameRegexp is used to parse a name value, capturing the
// domain and trailing components.
anchoredNameRegexp = regexp.MustCompile(anchoredName)
anchoredNameRegexp = regexp.MustCompile(anchored(optional(capture(domainAndPort), `/`), capture(remoteName)))
referencePat = anchored(capture(namePat),
optional(literal(":"), capture(tag)),
optional(literal("@"), capture(digestPat)))
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
ReferenceRegexp = regexp.MustCompile(referencePat)
referencePat = anchored(capture(namePat), optional(`:`, capture(tag)), optional(`@`, capture(digestPat)))
identifier = `([a-f0-9]{64})`
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
IdentifierRegexp = regexp.MustCompile(identifier)
shortIdentifier = `([a-f0-9]{6,64})`
// ShortIdentifierRegexp is the format used to represent a prefix
// of an identifier. A prefix may be used to match a sha256 identifier
// within a list of trusted identifiers.
ShortIdentifierRegexp = regexp.MustCompile(shortIdentifier)
anchoredIdentifier = anchored(identifier)
// anchoredIdentifierRegexp is used to check or match an
// identifier value, anchored at start and end of string.
anchoredIdentifierRegexp = regexp.MustCompile(anchoredIdentifier)
anchoredShortIdentifier = anchored(shortIdentifier)
// anchoredShortIdentifierRegexp is used to check if a value
// is a possible identifier prefix, anchored at start and end
// of string.
anchoredShortIdentifierRegexp = regexp.MustCompile(anchoredShortIdentifier)
anchoredIdentifierRegexp = regexp.MustCompile(anchored(identifier))
)
// literal compiles s into a literal regular expression, escaping any regexp
// reserved characters.
func literal(s string) string {
re := regexp.MustCompile(regexp.QuoteMeta(s))
if _, complete := re.LiteralPrefix(); !complete {
panic("must be a literal")
}
return re.String()
}
// expression defines a full expression, where each regular expression must
// follow the previous.
func expression(res ...string) string {
var s string
for _, re := range res {
s += re
}
return s
}
// optional wraps the expression in a non-capturing group and makes the
// production optional.
func optional(res ...string) string {
return group(expression(res...)) + `?`
return `(?:` + strings.Join(res, "") + `)?`
}
// repeated wraps the regexp in a non-capturing group to get one or more
// matches.
func repeated(res ...string) string {
return group(expression(res...)) + `+`
}
// group wraps the regexp in a non-capturing group.
func group(res ...string) string {
return `(?:` + expression(res...) + `)`
// anyTimes wraps the expression in a non-capturing group that can occur
// any number of times.
func anyTimes(res ...string) string {
return `(?:` + strings.Join(res, "") + `)*`
}
// capture wraps the expression in a capturing group.
func capture(res ...string) string {
return `(` + expression(res...) + `)`
return `(` + strings.Join(res, "") + `)`
}
// anchored anchors the regular expression by adding start and end delimiters.
func anchored(res ...string) string {
return `^` + expression(res...) + `$`
return `^` + strings.Join(res, "") + `$`
}

View file

@ -13,6 +13,7 @@ type regexpMatch struct {
}
func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) {
t.Helper()
matches := r.FindStringSubmatch(m.input)
if m.match && matches != nil {
if len(matches) != (r.NumSubexp()+1) || matches[0] != m.input {
@ -34,7 +35,11 @@ func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) {
}
func TestDomainRegexp(t *testing.T) {
hostcases := []regexpMatch{
t.Parallel()
hostcases := []struct {
input string
match bool
}{
{
input: "test.com",
match: true,
@ -157,12 +162,20 @@ func TestDomainRegexp(t *testing.T) {
},
}
r := regexp.MustCompile(`^` + DomainRegexp.String() + `$`)
for i := range hostcases {
checkRegexp(t, r, hostcases[i])
for _, tc := range hostcases {
tc := tc
t.Run(tc.input, func(t *testing.T) {
t.Parallel()
match := r.MatchString(tc.input)
if match != tc.match {
t.Errorf("Expected match=%t, got %t", tc.match, match)
}
})
}
}
func TestFullNameRegexp(t *testing.T) {
t.Parallel()
if anchoredNameRegexp.NumSubexp() != 2 {
t.Fatalf("anchored name regexp should have two submatches: %v, %v != 2",
anchoredNameRegexp, anchoredNameRegexp.NumSubexp())
@ -452,12 +465,17 @@ func TestFullNameRegexp(t *testing.T) {
match: false,
},
}
for i := range testcases {
checkRegexp(t, anchoredNameRegexp, testcases[i])
for _, tc := range testcases {
tc := tc
t.Run(tc.input, func(t *testing.T) {
t.Parallel()
checkRegexp(t, anchoredNameRegexp, tc)
})
}
}
func TestReferenceRegexp(t *testing.T) {
t.Parallel()
if ReferenceRegexp.NumSubexp() != 3 {
t.Fatalf("anchored name regexp should have three submatches: %v, %v != 3",
ReferenceRegexp, ReferenceRegexp.NumSubexp())
@ -522,14 +540,21 @@ func TestReferenceRegexp(t *testing.T) {
},
}
for i := range testcases {
checkRegexp(t, ReferenceRegexp, testcases[i])
for _, tc := range testcases {
tc := tc
t.Run(tc.input, func(t *testing.T) {
t.Parallel()
checkRegexp(t, ReferenceRegexp, tc)
})
}
}
func TestIdentifierRegexp(t *testing.T) {
fullCases := []regexpMatch{
t.Parallel()
fullCases := []struct {
input string
match bool
}{
{
input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821",
match: true,
@ -551,43 +576,14 @@ func TestIdentifierRegexp(t *testing.T) {
match: false,
},
}
shortCases := []regexpMatch{
{
input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821",
match: true,
},
{
input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C",
match: false,
},
{
input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf",
match: true,
},
{
input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821",
match: false,
},
{
input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482",
match: false,
},
{
input: "da304",
match: false,
},
{
input: "da304e",
match: true,
},
}
for i := range fullCases {
checkRegexp(t, anchoredIdentifierRegexp, fullCases[i])
}
for i := range shortCases {
checkRegexp(t, anchoredShortIdentifierRegexp, shortCases[i])
for _, tc := range fullCases {
tc := tc
t.Run(tc.input, func(t *testing.T) {
t.Parallel()
match := anchoredIdentifierRegexp.MatchString(tc.input)
if match != tc.match {
t.Errorf("Expected match=%t, got %t", tc.match, match)
}
})
}
}

75
reference/sort.go Normal file
View file

@ -0,0 +1,75 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reference
import (
"sort"
)
// Sort sorts string references preferring higher information references.
//
// The precedence is as follows:
//
// 1. [Named] + [Tagged] + [Digested] (e.g., "docker.io/library/busybox:latest@sha256:<digest>")
// 2. [Named] + [Tagged] (e.g., "docker.io/library/busybox:latest")
// 3. [Named] + [Digested] (e.g., "docker.io/library/busybo@sha256:<digest>")
// 4. [Named] (e.g., "docker.io/library/busybox")
// 5. [Digested] (e.g., "docker.io@sha256:<digest>")
// 6. Parse error
func Sort(references []string) []string {
var prefs []Reference
var bad []string
for _, ref := range references {
pref, err := ParseAnyReference(ref)
if err != nil {
bad = append(bad, ref)
} else {
prefs = append(prefs, pref)
}
}
sort.Slice(prefs, func(a, b int) bool {
ar := refRank(prefs[a])
br := refRank(prefs[b])
if ar == br {
return prefs[a].String() < prefs[b].String()
}
return ar < br
})
sort.Strings(bad)
var refs []string
for _, pref := range prefs {
refs = append(refs, pref.String())
}
return append(refs, bad...)
}
func refRank(ref Reference) uint8 {
if _, ok := ref.(Named); ok {
if _, ok = ref.(Tagged); ok {
if _, ok = ref.(Digested); ok {
return 1
}
return 2
}
if _, ok = ref.(Digested); ok {
return 3
}
return 4
}
return 5
}

84
reference/sort_test.go Normal file
View file

@ -0,0 +1,84 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reference
import (
"io"
"math/rand"
"testing"
"github.com/opencontainers/go-digest"
)
func TestReferenceSorting(t *testing.T) {
t.Parallel()
digested := func(seed int64) string {
b, err := io.ReadAll(io.LimitReader(rand.New(rand.NewSource(seed)), 64))
if err != nil {
panic(err)
}
return digest.FromBytes(b).String()
}
// Add z. prefix to string sort after "sha256:"
r1 := func(name, tag string, seed int64) string {
return "z.containerd.io/" + name + ":" + tag + "@" + digested(seed)
}
r2 := func(name, tag string) string {
return "z.containerd.io/" + name + ":" + tag
}
r3 := func(name string, seed int64) string {
return "z.containerd.io/" + name + "@" + digested(seed)
}
for i, tc := range []struct {
unsorted []string
expected []string
}{
{
unsorted: []string{r2("name", "latest"), r3("name", 1), r1("name", "latest", 1)},
expected: []string{r1("name", "latest", 1), r2("name", "latest"), r3("name", 1)},
},
{
unsorted: []string{"can't parse this:latest", r3("name", 1), r2("name", "latest")},
expected: []string{r2("name", "latest"), r3("name", 1), "can't parse this:latest"},
},
{
unsorted: []string{digested(1), r3("name", 1), r2("name", "latest")},
expected: []string{r2("name", "latest"), r3("name", 1), digested(1)},
},
{
unsorted: []string{r2("name", "tag2"), r2("name", "tag3"), r2("name", "tag1")},
expected: []string{r2("name", "tag1"), r2("name", "tag2"), r2("name", "tag3")},
},
{
unsorted: []string{r2("name-2", "tag"), r2("name-3", "tag"), r2("name-1", "tag")},
expected: []string{r2("name-1", "tag"), r2("name-2", "tag"), r2("name-3", "tag")},
},
} {
sorted := Sort(tc.unsorted)
if len(sorted) != len(tc.expected) {
t.Errorf("[%d]: Mismatched sized, got %d, expected %d", i, len(sorted), len(tc.expected))
continue
}
for j := range sorted {
if sorted[j] != tc.expected[j] {
t.Errorf("[%d]: Wrong value at %d, got %q, expected %q", i, j, sorted[j], tc.expected[j])
break
}
}
}
}

View file

@ -86,7 +86,6 @@ func TestErrorCodes(t *testing.T) {
t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString)
}
}
}
func TestErrorsManagement(t *testing.T) {
@ -99,7 +98,6 @@ func TestErrorsManagement(t *testing.T) {
errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data"))
p, err := json.Marshal(errs)
if err != nil {
t.Fatalf("error marashaling errors: %v", err)
}
@ -181,5 +179,4 @@ func TestErrorsManagement(t *testing.T) {
if e2.Detail != `stuff2` {
t.Fatalf("e2 had wrong detail: %q", e2.Detail)
}
}

View file

@ -75,8 +75,10 @@ var (
})
)
var nextCode = 1000
var registerLock sync.Mutex
var (
nextCode = 1000
registerLock sync.Mutex
)
// Register will make the passed-in error known to the environment and
// return a new ErrorCode

View file

@ -262,7 +262,6 @@ type RouteDescriptor struct {
// MethodDescriptor provides a description of the requests that may be
// conducted with the target method.
type MethodDescriptor struct {
// Method is an HTTP method, such as GET, PUT or POST.
Method string

View file

@ -1,12 +0,0 @@
// +build gofuzz
package v2
// FuzzParseForwardedHeader implements a fuzzer
// that targets parseForwardedHeader
// Export before building
// nolint:deadcode
func fuzzParseForwardedHeader(data []byte) int {
_, _, _ = parseForwardedHeader(string(data))
return 1
}

View file

@ -0,0 +1,13 @@
package v2
import (
"testing"
)
// FuzzParseForwardedHeader implements a fuzzer
// that targets parseForwardedHeader
func FuzzParseForwardedHeader(f *testing.F) {
f.Fuzz(func(t *testing.T, data string) {
_, _, _ = parseForwardedHeader(data)
})
}

View file

@ -265,7 +265,6 @@ func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, dee
u := server.URL + testcase.RequestURI
resp, err := http.Get(u)
if err != nil {
t.Fatalf("error issuing get request: %v", err)
}
@ -316,7 +315,6 @@ func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, dee
resp.Body.Close()
}
}
// -------------- START LICENSED CODE --------------

View file

@ -80,8 +80,8 @@ func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
// comma-separated list of hosts, to which each proxy appends the
// requested host. We want to grab the first from this comma-separated
// list.
hosts := strings.SplitN(forwardedHost, ",", 2)
host = strings.TrimSpace(hosts[0])
host, _, _ = strings.Cut(forwardedHost, ",")
host = strings.TrimSpace(host)
}
}

View file

@ -8,28 +8,27 @@
// An implementation registers its access controller by name with a constructor
// which accepts an options map for configuring the access controller.
//
// options := map[string]interface{}{"sillySecret": "whysosilly?"}
// accessController, _ := auth.GetAccessController("silly", options)
// options := map[string]interface{}{"sillySecret": "whysosilly?"}
// accessController, _ := auth.GetAccessController("silly", options)
//
// This `accessController` can then be used in a request handler like so:
//
// func updateOrder(w http.ResponseWriter, r *http.Request) {
// orderNumber := r.FormValue("orderNumber")
// resource := auth.Resource{Type: "customerOrder", Name: orderNumber}
// access := auth.Access{Resource: resource, Action: "update"}
// func updateOrder(w http.ResponseWriter, r *http.Request) {
// orderNumber := r.FormValue("orderNumber")
// resource := auth.Resource{Type: "customerOrder", Name: orderNumber}
// access := auth.Access{Resource: resource, Action: "update"}
//
// if ctx, err := accessController.Authorized(ctx, access); err != nil {
// if challenge, ok := err.(auth.Challenge) {
// // Let the challenge write the response.
// challenge.SetHeaders(r, w)
// w.WriteHeader(http.StatusUnauthorized)
// return
// } else {
// // Some other error.
// }
// if ctx, err := accessController.Authorized(ctx, access); err != nil {
// if challenge, ok := err.(auth.Challenge) {
// // Let the challenge write the response.
// challenge.SetHeaders(r, w)
// w.WriteHeader(http.StatusUnauthorized)
// return
// } else {
// // Some other error.
// }
// }
//
// }
// }
package auth
import (

View file

@ -128,10 +128,10 @@ func createHtpasswdFile(path string) error {
return err
}
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
return err
}
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600)
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0o600)
if err != nil {
return fmt.Errorf("failed to open htpasswd path %s", err)
}

View file

@ -2,7 +2,7 @@ package htpasswd
import (
"bytes"
"io/ioutil"
"io"
"net/http"
"net/http/httptest"
"os"
@ -21,7 +21,7 @@ func TestBasicAccessController(t *testing.T) {
MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2
DeokMan:공주님`
tempFile, err := ioutil.TempFile("", "htpasswd-test")
tempFile, err := os.CreateTemp("", "htpasswd-test")
if err != nil {
t.Fatal("could not create temporary htpasswd file")
}
@ -42,7 +42,7 @@ func TestBasicAccessController(t *testing.T) {
tempFile.Close()
var userNumber = 0
userNumber := 0
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := context.WithRequest(ctx, r)
@ -76,7 +76,6 @@ func TestBasicAccessController(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, server.URL, nil)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("unexpected error during GET: %v", err)
}
@ -120,11 +119,10 @@ func TestBasicAccessController(t *testing.T) {
}
}
}
}
func TestCreateHtpasswdFile(t *testing.T) {
tempFile, err := ioutil.TempFile("", "htpasswd-test")
tempFile, err := os.CreateTemp("", "htpasswd-test")
if err != nil {
t.Fatalf("could not create temporary htpasswd file %v", err)
}
@ -137,7 +135,7 @@ func TestCreateHtpasswdFile(t *testing.T) {
if _, err := newAccessController(options); err != nil {
t.Fatalf("error creating access controller %v", err)
}
content, err := ioutil.ReadAll(tempFile)
content, err := io.ReadAll(tempFile)
if err != nil {
t.Fatalf("failed to read file %v", err)
}
@ -152,7 +150,7 @@ func TestCreateHtpasswdFile(t *testing.T) {
if _, err := newAccessController(options); err != nil {
t.Fatalf("error creating access controller %v", err)
}
content, err = ioutil.ReadFile(tempFile.Name())
content, err = os.ReadFile(tempFile.Name())
if err != nil {
t.Fatalf("failed to read file %v", err)
}

View file

@ -8,7 +8,6 @@ import (
)
func TestParseHTPasswd(t *testing.T) {
for _, tc := range []struct {
desc string
input string
@ -81,5 +80,4 @@ asdf
t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries)
}
}
}

View file

@ -70,7 +70,6 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut
ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, auth.UserNameKey, auth.UserKey))
return ctx, nil
}
type challenge struct {

View file

@ -7,7 +7,7 @@ import (
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"io"
"net/http"
"os"
"strings"
@ -187,7 +187,7 @@ func newAccessController(options map[string]interface{}) (auth.AccessController,
}
defer fp.Close()
rawCertBundle, err := ioutil.ReadAll(fp)
rawCertBundle, err := io.ReadAll(fp)
if err != nil {
return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err)
}
@ -247,15 +247,12 @@ func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.
return nil, err
}
parts := strings.Split(req.Header.Get("Authorization"), " ")
if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" {
prefix, rawToken, ok := strings.Cut(req.Header.Get("Authorization"), " ")
if !ok || rawToken == "" || !strings.EqualFold(prefix, "bearer") {
challenge.err = ErrTokenRequired
return nil, challenge
}
rawToken := parts[1]
token, err := NewToken(rawToken)
if err != nil {
challenge.err = err

View file

@ -83,7 +83,9 @@ type VerifyOptions struct {
// NewToken parses the given raw token string
// and constructs an unverified JSON Web Token.
func NewToken(rawToken string) (*Token, error) {
parts := strings.Split(rawToken, TokenSeparator)
// We expect 3 parts, but limit the split to 4 to detect cases where
// the token contains too many (or too few) separators.
parts := strings.SplitN(rawToken, TokenSeparator, 4)
if len(parts) != 3 {
return nil, ErrMalformedToken
}
@ -185,13 +187,15 @@ func (t *Token) Verify(verifyOpts VerifyOptions) error {
// VerifySigningKey attempts to get the key which was used to sign this token.
// The token header should contain either of these 3 fields:
// `x5c` - The x509 certificate chain for the signing key. Needs to be
// verified.
// `jwk` - The JSON Web Key representation of the signing key.
// May contain its own `x5c` field which needs to be verified.
// `kid` - The unique identifier for the key. This library interprets it
// as a libtrust fingerprint. The key itself can be looked up in
// the trustedKeys field of the given verify options.
//
// `x5c` - The x509 certificate chain for the signing key. Needs to be
// verified.
// `jwk` - The JSON Web Key representation of the signing key.
// May contain its own `x5c` field which needs to be verified.
// `kid` - The unique identifier for the key. This library interprets it
// as a libtrust fingerprint. The key itself can be looked up in
// the trustedKeys field of the given verify options.
//
// Each of these methods are tried in that order of preference until the
// signing key is found or an error is returned.
func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) {

View file

@ -8,7 +8,6 @@ import (
"encoding/json"
"encoding/pem"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
@ -285,7 +284,7 @@ func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err er
return "", err
}
tempFile, err := ioutil.TempFile("", "rootCertBundle")
tempFile, err := os.CreateTemp("", "rootCertBundle")
if err != nil {
return "", err
}
@ -307,10 +306,10 @@ func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err er
// TestAccessController tests complete integration of the token auth package.
// It starts by mocking the options for a token auth accessController which
// it creates. It then tries a few mock requests:
// - don't supply a token; should error with challenge
// - supply an invalid token; should error with challenge
// - supply a token with insufficient access; should error with challenge
// - supply a valid token; should not error
// - don't supply a token; should error with challenge
// - supply an invalid token; should error with challenge
// - supply a token with insufficient access; should error with challenge
// - supply a valid token; should not error
func TestAccessController(t *testing.T) {
// Make 2 keys; only the first is to be a trusted root key.
rootKeys, err := makeRootKeys(2)
@ -493,7 +492,7 @@ func TestNewAccessControllerPemBlock(t *testing.T) {
defer os.Remove(rootCertBundleFilename)
// Add something other than a certificate to the rootcertbundle
file, err := os.OpenFile(rootCertBundleFilename, os.O_WRONLY|os.O_APPEND, 0666)
file, err := os.OpenFile(rootCertBundleFilename, os.O_WRONLY|os.O_APPEND, 0o666)
if err != nil {
t.Fatal(err)
}

View file

@ -38,7 +38,6 @@ func TestAuthChallengeParse(t *testing.T) {
if expected := "he\"llo"; challenge.Parameters["slashed"] != expected {
t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected)
}
}
func TestAuthChallengeNormalization(t *testing.T) {
@ -49,7 +48,6 @@ func TestAuthChallengeNormalization(t *testing.T) {
}
func testAuthChallengeNormalization(t *testing.T, host string) {
scm := NewSimpleManager()
url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host))
@ -85,7 +83,6 @@ func testAuthChallengeNormalization(t *testing.T, host string) {
}
func testAuthChallengeConcurrent(t *testing.T, host string) {
scm := NewSimpleManager()
url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host))

View file

@ -50,7 +50,6 @@ func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Re
func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) {
h := testutil.NewHandler(rrm)
wrapper := &testAuthenticationWrapper{
headers: http.Header(map[string][]string{
"X-API-Version": {"registry/2.0"},
"X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"},

View file

@ -5,7 +5,6 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"time"
@ -38,7 +37,7 @@ func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error {
}
func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) {
req, err := http.NewRequestWithContext(hbu.ctx, http.MethodPatch, hbu.location, ioutil.NopCloser(r))
req, err := http.NewRequestWithContext(hbu.ctx, http.MethodPatch, hbu.location, io.NopCloser(r))
if err != nil {
return 0, err
}

View file

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/distribution/distribution/v3/registry/api/errcode"
@ -40,7 +39,7 @@ func (e *UnexpectedHTTPResponseError) Error() string {
func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
var errors errcode.Errors
body, err := ioutil.ReadAll(r)
body, err := io.ReadAll(r)
if err != nil {
return err
}

View file

@ -7,9 +7,9 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
@ -139,16 +139,14 @@ func NewRepository(name reference.Named, baseURL string, transport http.RoundTri
return nil, err
}
client := &http.Client{
Transport: transport,
CheckRedirect: checkHTTPRedirect,
// TODO(dmcgowan): create cookie jar
}
return &repository{
client: client,
ub: ub,
name: name,
client: &http.Client{
Transport: transport,
CheckRedirect: checkHTTPRedirect,
// TODO(dmcgowan): create cookie jar
},
ub: ub,
name: name,
}, nil
}
@ -163,16 +161,15 @@ func (r *repository) Named() reference.Named {
}
func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
statter := &blobStatter{
return &blobs{
name: r.name,
ub: r.ub,
client: r.client,
}
return &blobs{
name: r.name,
ub: r.ub,
client: r.client,
statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(memory.UnlimitedSize), statter),
statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(memory.UnlimitedSize), &blobStatter{
name: r.name,
ub: r.ub,
client: r.client,
}),
}
}
@ -227,7 +224,7 @@ func (t *tags) All(ctx context.Context) ([]string, error) {
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
b, err := ioutil.ReadAll(resp.Body)
b, err := io.ReadAll(resp.Body)
if err != nil {
return tags, err
}
@ -240,8 +237,8 @@ func (t *tags) All(ctx context.Context) ([]string, error) {
}
tags = append(tags, tagsResponse.Tags...)
if link := resp.Header.Get("Link"); link != "" {
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
linkURL, err := url.Parse(linkURLStr)
firsLink, _, _ := strings.Cut(link, ";")
linkURL, err := url.Parse(strings.Trim(firsLink, "<>"))
if err != nil {
return tags, err
}
@ -268,7 +265,7 @@ func descriptorFromResponse(response *http.Response) (distribution.Descriptor, e
digestHeader := headers.Get("Docker-Content-Digest")
if digestHeader == "" {
data, err := ioutil.ReadAll(response.Body)
data, err := io.ReadAll(response.Body)
if err != nil {
return distribution.Descriptor{}, err
}
@ -296,7 +293,6 @@ func descriptorFromResponse(response *http.Response) (distribution.Descriptor, e
desc.Size = length
return desc, nil
}
// Get issues a HEAD request for a Manifest against its named endpoint in order
@ -528,8 +524,7 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
}
}
mt := resp.Header.Get("Content-Type")
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
@ -667,7 +662,6 @@ func sanitizeLocation(location, base string) (string, error) {
func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
return bs.statter.Stat(ctx, dgst)
}
func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
@ -677,10 +671,10 @@ func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
}
defer reader.Close()
return ioutil.ReadAll(reader)
return io.ReadAll(reader)
}
func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (io.ReadSeekCloser, error) {
ref, err := reference.WithDigest(bs.name, dgst)
if err != nil {
return nil, err
@ -690,13 +684,12 @@ func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.Rea
return nil, err
}
return transport.NewHTTPReadSeeker(ctx, bs.client, blobURL,
func(resp *http.Response) error {
if resp.StatusCode == http.StatusNotFound {
return distribution.ErrBlobUnknown
}
return HandleErrorResponse(resp)
}), nil
return transport.NewHTTPReadSeeker(ctx, bs.client, blobURL, func(resp *http.Response) error {
if resp.StatusCode == http.StatusNotFound {
return distribution.ErrBlobUnknown
}
return HandleErrorResponse(resp)
}), nil
}
func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
@ -812,8 +805,8 @@ func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateO
// TODO(dmcgowan): Check for invalid UUID
uuid := resp.Header.Get("Docker-Upload-UUID")
if uuid == "" {
parts := strings.Split(resp.Header.Get("Location"), "/")
uuid = parts[len(parts)-1]
// uuid is expected to be the last path element
_, uuid = path.Split(resp.Header.Get("Location"))
}
if uuid == "" {
return nil, errors.New("cannot retrieve docker upload UUID")

View file

@ -6,7 +6,6 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
@ -126,7 +125,7 @@ func TestBlobServeBlob(t *testing.T) {
t.Errorf("Error serving blob: %s", err.Error())
}
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Errorf("Error reading response body: %s", err.Error())
}
@ -175,7 +174,7 @@ func TestBlobServeBlobHEAD(t *testing.T) {
t.Errorf("Error serving blob: %s", err.Error())
}
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Errorf("Error reading response body: %s", err.Error())
}
@ -319,7 +318,6 @@ func TestBlobDelete(t *testing.T) {
if err != nil {
t.Errorf("Error deleting blob: %s", err.Error())
}
}
func TestBlobFetch(t *testing.T) {
@ -399,7 +397,6 @@ func TestBlobExistsNoContentLength(t *testing.T) {
if !strings.Contains(err.Error(), "missing content-length heade") {
t.Fatalf("Expected missing content-length error message")
}
}
func TestBlobExists(t *testing.T) {
@ -986,7 +983,6 @@ func addTestManifestWithEtag(repo reference.Named, reference string, content []b
"Content-Type": {schema1.MediaTypeSignedManifest},
}),
}
}
*m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag})
}
@ -1535,6 +1531,7 @@ func TestObtainsManifestForTagWithoutHeaders(t *testing.T) {
t.Fatalf("Unexpected digest")
}
}
func TestManifestTagsPaginated(t *testing.T) {
s := httptest.NewServer(http.NotFoundHandler())
defer s.Close()

View file

@ -20,17 +20,17 @@ var (
)
// ReadSeekCloser combines io.ReadSeeker with io.Closer.
type ReadSeekCloser interface {
io.ReadSeeker
io.Closer
}
//
// Deprecated: use [io.ReadSeekCloser].
type ReadSeekCloser = io.ReadSeekCloser
// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET
// request. When seeking and starting a read from a non-zero offset
// the a "Range" header will be added which sets the offset.
//
// TODO(dmcgowan): Move this into a separate utility package
func NewHTTPReadSeeker(ctx context.Context, client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser {
return &httpReadSeeker{
func NewHTTPReadSeeker(ctx context.Context, client *http.Client, url string, errorHandler func(*http.Response) error) *HTTPReadSeeker {
return &HTTPReadSeeker{
ctx: ctx,
client: client,
url: url,
@ -38,7 +38,8 @@ func NewHTTPReadSeeker(ctx context.Context, client *http.Client, url string, err
}
}
type httpReadSeeker struct {
// HTTPReadSeeker implements an [io.ReadSeekCloser].
type HTTPReadSeeker struct {
ctx context.Context
client *http.Client
url string
@ -63,7 +64,7 @@ type httpReadSeeker struct {
err error
}
func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
func (hrs *HTTPReadSeeker) Read(p []byte) (n int, err error) {
if hrs.err != nil {
return 0, hrs.err
}
@ -92,7 +93,7 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
return n, err
}
func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
func (hrs *HTTPReadSeeker) Seek(offset int64, whence int) (int64, error) {
if hrs.err != nil {
return 0, hrs.err
}
@ -135,7 +136,7 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
return hrs.seekOffset, err
}
func (hrs *httpReadSeeker) Close() error {
func (hrs *HTTPReadSeeker) Close() error {
if hrs.err != nil {
return hrs.err
}
@ -152,7 +153,7 @@ func (hrs *httpReadSeeker) Close() error {
return nil
}
func (hrs *httpReadSeeker) reset() {
func (hrs *HTTPReadSeeker) reset() {
if hrs.err != nil {
return
}
@ -162,7 +163,7 @@ func (hrs *httpReadSeeker) reset() {
}
}
func (hrs *httpReadSeeker) reader() (io.Reader, error) {
func (hrs *HTTPReadSeeker) reader() (io.Reader, error) {
if hrs.err != nil {
return nil, hrs.err
}

View file

@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/http/httputil"
@ -69,7 +68,7 @@ func TestCheckAPI(t *testing.T) {
"Content-Length": []string{"2"},
})
p, err := ioutil.ReadAll(resp.Body)
p, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("unexpected error reading response body: %v", err)
}
@ -87,7 +86,8 @@ func TestCatalogAPI(t *testing.T) {
values := url.Values{
"last": []string{""},
"n": []string{strconv.Itoa(chunkLen)}}
"n": []string{strconv.Itoa(chunkLen)},
}
catalogURL, err := env.builder.BuildCatalogURL(values)
if err != nil {
@ -453,7 +453,6 @@ func TestBlobAPI(t *testing.T) {
defer env2.Shutdown()
args = makeBlobArgs(t)
testBlobAPI(t, env2, args)
}
func TestBlobDelete(t *testing.T) {
@ -1110,7 +1109,7 @@ const (
func (factory *storageManifestErrDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
// Initialize the mock driver
var errGenericStorage = errors.New("generic storage error")
errGenericStorage := errors.New("generic storage error")
return &mockErrorDriver{
returnErrs: []mockErrorMapping{
{
@ -1346,7 +1345,6 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name
for i := range unsignedManifest.FSLayers {
rs, dgst, err := testutil.CreateRandomTarFile()
if err != nil {
t.Fatalf("error creating random layer %d: %v", i, err)
}
@ -1450,7 +1448,6 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name
sm2, err := schema1.Sign(&fetchedManifestByDigest.Manifest, env.pk)
if err != nil {
t.Fatal(err)
}
// Re-push with a few different Content-Types. The official schema1
@ -1684,7 +1681,6 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name
for i := range manifest.Layers {
rs, dgst, err := testutil.CreateRandomTarFile()
if err != nil {
t.Fatalf("error creating random layer %d: %v", i, err)
}
@ -1860,7 +1856,7 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name
}
defer resp.Body.Close()
manifestBytes, err := ioutil.ReadAll(resp.Body)
manifestBytes, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("error reading response body: %v", err)
}
@ -2094,7 +2090,7 @@ func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs)
}
defer resp.Body.Close()
manifestBytes, err := ioutil.ReadAll(resp.Body)
manifestBytes, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("error reading response body: %v", err)
}
@ -2279,7 +2275,6 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) {
if len(tagsResponse.Tags) != 0 {
t.Fatalf("expected 0 tags in response: %v", tagsResponse.Tags)
}
}
type testEnv struct {
@ -2308,7 +2303,6 @@ func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv {
config.Compatibility.Schema1.Enabled = true
return newTestEnvWithConfig(t, &config)
}
func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv {
@ -2334,7 +2328,6 @@ func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *te
app := NewApp(ctx, config)
server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app))
builder, err := v2.NewURLBuilderFromString(server.URL+config.HTTP.Prefix, false)
if err != nil {
t.Fatalf("error creating url builder: %v", err)
}
@ -2620,7 +2613,7 @@ func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus
// expected error codes, returning the error structure, the json slice and a
// count of the errors by code.
func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, errorCodes ...errcode.ErrorCode) (errcode.Errors, []byte, map[errcode.ErrorCode]int) {
p, err := ioutil.ReadAll(resp.Body)
p, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("unexpected error reading body %s: %v", msg, err)
}
@ -2832,7 +2825,6 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) {
blobURL, _ := env.builder.BuildBlobURL(ref)
resp, _ = httpDelete(blobURL)
checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode)
}
func TestProxyManifestGetByTag(t *testing.T) {

View file

@ -645,13 +645,6 @@ func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx = dcontext.WithLogger(ctx, dcontext.GetRequestLogger(ctx))
r = r.WithContext(ctx)
defer func() {
status, ok := ctx.Value("http.response.status").(int)
if ok && status >= 200 && status <= 399 {
dcontext.GetResponseLogger(r.Context()).Infof("response completed")
}
}()
// Set a header with the Docker Distribution API Version for all responses.
w.Header().Add("Docker-Distribution-API-Version", "registry/2.0")
app.router.ServeHTTP(w, r)
@ -678,6 +671,18 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler {
context := app.context(w, r)
defer func() {
// Automated error response handling here. Handlers may return their
// own errors if they need different behavior (such as range errors
// for layer upload).
if context.Errors.Len() > 0 {
_ = errcode.ServeJSON(w, context.Errors)
app.logError(context, context.Errors)
} else if status, ok := context.Value("http.response.status").(int); ok && status >= 200 && status <= 399 {
dcontext.GetResponseLogger(context).Infof("response completed")
}
}()
if err := app.authorized(w, r, context); err != nil {
dcontext.GetLogger(context).Warnf("error authorizing context: %v", err)
return
@ -703,7 +708,6 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler {
return
}
repository, err := app.registry.Repository(context, nameRef)
if err != nil {
dcontext.GetLogger(context).Errorf("error resolving repository: %v", err)
@ -741,16 +745,7 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler {
}
dispatch(context, r).ServeHTTP(w, r)
// Automated error response handling here. Handlers may return their
// own errors if they need different behavior (such as range errors
// for layer upload).
if context.Errors.Len() > 0 {
if err := errcode.ServeJSON(w, context.Errors); err != nil {
dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors)
}
app.logError(context, context.Errors)
}
})
}
@ -983,7 +978,6 @@ func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespac
registry = rmw
}
return registry, nil
}
// applyRepoMiddleware wraps a repository with the configured middlewares

View file

@ -120,13 +120,11 @@ func TestAppDispatcher(t *testing.T) {
app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars)))
route := router.GetRoute(testcase.endpoint).Host(serverURL.Host)
u, err := route.URL(testcase.vars...)
if err != nil {
t.Fatal(err)
}
resp, err := http.Get(u.String())
if err != nil {
t.Fatal(err)
}
@ -275,5 +273,4 @@ func TestAppendAccessRecords(t *testing.T) {
if ok := reflect.DeepEqual(result, expectedResult); !ok {
t.Fatalf("Actual access record differs from expected")
}
}

View file

@ -79,7 +79,6 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req
blobs := buh.Repository.Blobs(buh)
upload, err := blobs.Create(buh, options...)
if err != nil {
if ebm, ok := err.(distribution.ErrBlobMounted); ok {
if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil {
@ -219,7 +218,6 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht
// really set the mediatype. For now, we can let the backend take care
// of this.
})
if err != nil {
switch err := err.(type) {
case distribution.ErrBlobInvalidDigest:

View file

@ -34,7 +34,7 @@ type catalogAPIResponse struct {
}
func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) {
var moreEntries = true
moreEntries := true
q := r.URL.Query()
lastEntry := q.Get("last")

View file

@ -1,7 +1,6 @@
package handlers
import (
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
@ -17,7 +16,7 @@ import (
func TestFileHealthCheck(t *testing.T) {
interval := time.Second
tmpfile, err := ioutil.TempFile(os.TempDir(), "healthcheck")
tmpfile, err := os.CreateTemp(os.TempDir(), "healthcheck")
if err != nil {
t.Fatalf("could not create temporary file: %v", err)
}

View file

@ -31,7 +31,7 @@ func closeResources(handler http.Handler, closers ...io.Closer) http.Handler {
func copyFullPayload(ctx context.Context, responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, limit int64, action string) error {
// Get a channel that tells us if the client disconnects
clientClosed := r.Context().Done()
var body = r.Body
body := r.Body
if limit > 0 {
body = http.MaxBytesReader(responseWriter, body, limit)
}
@ -68,19 +68,18 @@ func copyFullPayload(ctx context.Context, responseWriter http.ResponseWriter, r
return nil
}
func parseContentRange(cr string) (int64, int64, error) {
ranges := strings.Split(cr, "-")
if len(ranges) != 2 {
func parseContentRange(cr string) (start int64, end int64, err error) {
rStart, rEnd, ok := strings.Cut(cr, "-")
if !ok {
return -1, -1, fmt.Errorf("invalid content range format, %s", cr)
}
start, err := strconv.ParseInt(ranges[0], 10, 64)
start, err = strconv.ParseInt(rStart, 10, 64)
if err != nil {
return -1, -1, err
}
end, err := strconv.ParseInt(ranges[1], 10, 64)
end, err = strconv.ParseInt(rEnd, 10, 64)
if err != nil {
return -1, -1, err
}
return start, end, nil
}

View file

@ -18,11 +18,10 @@ type logHook struct {
// Fire forwards an error to LogHook
func (hook *logHook) Fire(entry *logrus.Entry) error {
addr := strings.Split(hook.Mail.Addr, ":")
if len(addr) != 2 {
host, _, ok := strings.Cut(hook.Mail.Addr, ":")
if !ok || host == "" {
return errors.New("invalid Mail Address")
}
host := addr[0]
subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message)
html := `

View file

@ -479,7 +479,6 @@ func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest)
}
return nil
}
// DeleteManifest removes the manifest with the given digest or the tag with the given name from the registry.

View file

@ -12,8 +12,10 @@ import (
// used to register the constructor for different RegistryMiddleware backends.
type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error)
var middlewares map[string]InitFunc
var registryoptions []storage.RegistryOption
var (
middlewares map[string]InitFunc
registryoptions []storage.RegistryOption
)
// Register is used to register an InitFunc for
// a RegistryMiddleware backend with the given name.

View file

@ -213,7 +213,7 @@ func (pbs *proxyBlobStore) Mount(ctx context.Context, sourceRepo reference.Named
return distribution.Descriptor{}, distribution.ErrUnsupported
}
func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (io.ReadSeekCloser, error) {
return nil, distribution.ErrUnsupported
}

View file

@ -2,7 +2,7 @@ package proxy
import (
"context"
"io/ioutil"
"io"
"math/rand"
"net/http"
"net/http/httptest"
@ -59,7 +59,7 @@ func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.B
return sbs.blobs.Resume(ctx, id)
}
func (sbs statsBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
func (sbs statsBlobStore) Open(ctx context.Context, dgst digest.Digest) (io.ReadSeekCloser, error) {
sbsMu.Lock()
sbs.stats["open"]++
sbsMu.Unlock()
@ -114,6 +114,8 @@ func (te *testEnv) RemoteStats() *map[string]int {
// Populate remote store and record the digests
func makeTestEnv(t *testing.T, name string) *testEnv {
t.Helper()
nameRef, err := reference.WithName(name)
if err != nil {
t.Fatalf("unable to parse reference: %s", err)
@ -121,15 +123,8 @@ func makeTestEnv(t *testing.T, name string) *testEnv {
ctx := context.Background()
truthDir, err := ioutil.TempDir("", "truth")
if err != nil {
t.Fatalf("unable to create tempdir: %s", err)
}
cacheDir, err := ioutil.TempDir("", "cache")
if err != nil {
t.Fatalf("unable to create tempdir: %s", err)
}
truthDir := t.TempDir()
cacheDir := t.TempDir()
localDriver, err := filesystem.FromParameters(map[string]interface{}{
"rootdirectory": truthDir,
@ -221,6 +216,7 @@ func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) {
te.inRemote = inRemote
te.numUnique = numUnique
}
func TestProxyStoreGet(t *testing.T) {
te := makeTestEnv(t, "foo/bar")
@ -253,7 +249,6 @@ func TestProxyStoreGet(t *testing.T) {
if (*remoteStats)["get"] != 1 {
t.Errorf("Unexpected remote get count")
}
}
func TestProxyStoreStat(t *testing.T) {
@ -284,7 +279,6 @@ func TestProxyStoreStat(t *testing.T) {
if te.store.authChallenger.(*mockChallenger).count != len(te.inRemote) {
t.Fatalf("Unexpected auth challenge count, got %#v", te.store.authChallenger)
}
}
func TestProxyStoreServeHighConcurrency(t *testing.T) {

View file

@ -79,7 +79,7 @@ func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, optio
pms.scheduler.AddManifest(repoBlob, repositoryTTL)
// Ensure the manifest blob is cleaned up
//pms.scheduler.AddBlob(blobRef, repositoryTTL)
// pms.scheduler.AddBlob(blobRef, repositoryTTL)
}

Some files were not shown because too many files have changed in this diff Show more